././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8081114 trove-12.1.0.dev92/0000755000175000017500000000000000000000000014147 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/.coveragerc0000644000175000017500000000111000000000000016261 0ustar00coreycorey00000000000000# .coveragerc to control coverage.py [run] branch = True source=trove omit=*trove/tests* [report] # Regexes for lines to exclude from consideration exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Don't complain about missing debug-only code: def __repr__ if self\.debug # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError # Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: ignore_errors = False [html] directory=cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/.stestr.conf0000644000175000017500000000011000000000000016410 0ustar00coreycorey00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./trove/tests/unittests} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/.zuul.yaml0000644000175000017500000002704000000000000016113 0ustar00coreycorey00000000000000- project: templates: - check-requirements - openstack-cover-jobs - openstack-lower-constraints-jobs - openstack-python3-ussuri-jobs - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - openstack-tox-cover: voting: false - openstack-tox-pylint - trove-tox-bandit-baseline: voting: false - trove-tempest - trove-functional-mysql - trove-scenario-mysql-single - trove-scenario-mysql-multi - trove-scenario-mariadb-single - trove-scenario-postgresql-single: voting: false - trove-scenario-postgresql-multi: voting: false - trove-scenario-mariadb-multi: voting: false - trove-tempest-ipv6-only: voting: false - trove-functional-mysql-nondev: voting: false gate: queue: trove jobs: - openstack-tox-pylint - trove-functional-mysql - trove-scenario-mysql-single - trove-scenario-mysql-multi experimental: jobs: - trove-grenade - trove-scenario-cassandra-single - trove-scenario-cassandra-multi - trove-scenario-couchbase-single - trove-scenario-couchdb-single - trove-scenario-percona-single - trove-scenario-percona-multi - trove-scenario-pxc-single - trove-scenario-pxc-multi - trove-scenario-redis-single - trove-scenario-redis-multi periodic: jobs: - publish-trove-guest-image-mysql-ubuntu-xenial: branches: - master - publish-trove-guest-image-mysql-ubuntu-xenial-dev: branches: - master - job: name: trove-legacy-dsvm-base parent: legacy-dsvm-base abstract: true timeout: 10800 required-projects: - openstack/devstack-gate - openstack/diskimage-builder - openstack/tripleo-image-elements - openstack/trove - openstack/trove-dashboard irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^tools/.*$ - ^trove/hacking/.*$ - ^trove/tests/unittests/.*$ - job: name: trove-devstack-base parent: devstack abstract: true timeout: 10800 required-projects: - openstack/tempest - openstack/diskimage-builder - openstack/tripleo-image-elements - openstack/trove - openstack/trove-dashboard vars: devstack_services: tempest: true tls-proxy: false devstack_plugins: trove: https://opendev.org/openstack/trove # NOTE(zhaochao): we have to override the default settings from the # the base devstack job about "SERVICE_HOST" and "HOST_IP", as trove # guestagent should use public addresses to access the underlying # components(MQ and Swift), and the previous devstack-gate jobs didn't # set these variables. However we cannot unset variables in job # definition, so we just set SERVICE_HOST and HOST_IP to empty strings, # and VNCSERVER_PROXYCLIENT_ADDRESS to 127.0.0.1 instead. devstack_localrc: SERVICE_HOST: '' HOST_IP: '' VNCSERVER_PROXYCLIENT_ADDRESS: 127.0.0.1 USE_PYTHON3: True run: playbooks/trove-devstack-base.yaml irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^tools/.*$ - ^trove/hacking/.*$ - ^trove/tests/unittests/.*$ - job: name: trove-fakemodetests-base parent: openstack-tox-py36 abstract: true irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^contrib/.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^trove/hacking/.*$ - ^trove/tests/api/.*$ - ^trove/tests/db/.*$ - ^trove/tests/scenario/.*$ - ^trove/tests/unittests/.*$ - job: name: trove-functional-mysql parent: trove-devstack-base vars: devstack_localrc: TROVE_RESIZE_TIME_OUT: 1800 trove_resize_time_out: 1800 trove_test_datastore: 'mysql' trove_test_group: 'mysql' trove_test_datastore_version: '5.7' - job: name: trove-functional-mysql-nondev parent: trove-devstack-base vars: devstack_localrc: TROVE_RESIZE_TIME_OUT: 1800 TROVE_NON_DEV_IMAGE_URL_MYSQL: https://tarballs.opendev.org/openstack/trove/images/trove-master-mysql-ubuntu-xenial.qcow2 trove_resize_time_out: 1800 trove_test_datastore: 'mysql' trove_test_group: 'mysql' trove_test_datastore_version: '5.7' - job: name: trove-grenade parent: trove-legacy-dsvm-base run: playbooks/legacy/grenade-dsvm-trove/run.yaml post-run: playbooks/legacy/grenade-dsvm-trove/post.yaml required-projects: - openstack/grenade - openstack/devstack-gate - openstack/trove - openstack/trove-dashboard - job: name: trove-scenario-cassandra-single parent: trove-devstack-base vars: trove_test_datastore: cassandra trove_test_group: cassandra-supported-single devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-cassandra-multi parent: trove-devstack-base vars: trove_test_datastore: cassandra trove_test_group: cassandra-supported-multi devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-couchdb-single parent: trove-devstack-base vars: trove_test_datastore: couchdb trove_test_group: couchdb-supported-single devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-couchbase-single parent: trove-devstack-base vars: trove_test_datastore: couchbase trove_test_group: couchbase-supported-single devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-mariadb-single parent: trove-devstack-base vars: trove_test_datastore: mariadb trove_test_group: mariadb-supported-single trove_test_datastore_version: 10.4 devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-mariadb-multi parent: trove-devstack-base vars: trove_test_datastore: mariadb trove_test_group: mariadb-supported-multi trove_test_datastore_version: 10.4 devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-mysql-single parent: trove-devstack-base vars: trove_test_datastore: mysql trove_test_group: mysql-supported-single trove_test_datastore_version: 5.7 - job: name: trove-scenario-mysql-multi parent: trove-devstack-base vars: trove_test_datastore: mysql trove_test_group: mysql-supported-multi trove_test_datastore_version: 5.7 - job: name: trove-scenario-percona-multi parent: trove-devstack-base vars: trove_test_datastore: percona trove_test_group: percona-supported-multi devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-percona-single parent: trove-devstack-base vars: trove_test_datastore: percona trove_test_group: percona-supported-single devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-postgresql-single parent: trove-devstack-base vars: trove_test_datastore: postgresql trove_test_group: postgresql-supported-single devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-postgresql-multi parent: trove-devstack-base vars: trove_test_datastore: postgresql trove_test_group: postgresql-supported-multi devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-pxc-single parent: trove-devstack-base vars: trove_test_datastore: pxc trove_test_group: pxc-supported-single devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-pxc-multi parent: trove-devstack-base vars: trove_test_datastore: pxc trove_test_group: pxc-supported-multi devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-redis-single parent: trove-devstack-base vars: trove_test_datastore: redis trove_test_group: redis-supported-single devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-scenario-redis-multi parent: trove-devstack-base vars: trove_test_datastore: redis trove_test_group: redis-supported-multi devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false - job: name: trove-tox-bandit-baseline parent: openstack-tox timeout: 2400 vars: tox_envlist: bandit-baseline required-projects: - openstack/requirements irrelevant-files: - ^.*\.rst$ - ^.*\.txt$ - ^api-ref/.*$ - ^contrib/.*$ - ^doc/.*$ - ^etc/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^trove/hacking/.*$ - ^trove/tests/scenario/.*$ - ^trove/tests/unittests/.*$ - job: name: trove-tempest parent: devstack-tempest timeout: 7800 required-projects: &base_required_projects - openstack/trove - openstack/trove-tempest-plugin - openstack/tempest irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^etc/.*$ - ^releasenotes/.*$ vars: &base_vars tox_envlist: all tempest_concurrency: 2 devstack_localrc: TEMPEST_PLUGINS: /opt/stack/trove-tempest-plugin USE_PYTHON3: true devstack_local_conf: post-config: $TROVE_CONF: DEFAULT: usage_timeout: 1800 devstack_plugins: trove: https://opendev.org/openstack/trove.git devstack_services: etcd3: false tls-proxy: false ceilometer-acentral: false ceilometer-acompute: false ceilometer-alarm-evaluator: false ceilometer-alarm-notifier: false ceilometer-anotification: false ceilometer-api: false ceilometer-collector: false cinder: true c-sch: true c-api: true c-vol: true c-bak: false swift: true s-account: true s-container: true s-object: true s-proxy: true tempest: true tempest_test_regex: ^trove_tempest_plugin\.tests - job: name: publish-trove-guest-image parent: publish-openstack-artifacts run: playbooks/image-build/run.yaml post-run: playbooks/image-build/post.yaml required-projects: - openstack/diskimage-builder - openstack/trove - openstack/tripleo-image-elements - job: name: publish-trove-guest-image-mysql-ubuntu-xenial parent: publish-trove-guest-image description: | Build and publish Ubuntu Xenial based Trove guest image to tarballs.openstack.org. vars: datastore_type: mysql guest_os: ubuntu guest_os_release: xenial guest_username: ubuntu branch: master dev_mode: false image_suffix: "" - job: name: publish-trove-guest-image-mysql-ubuntu-xenial-dev parent: publish-trove-guest-image description: | Build and publish Ubuntu Xenial based Trove guest image to tarballs.openstack.org. vars: datastore_type: mysql guest_os: ubuntu guest_os_release: xenial guest_username: ubuntu branch: master dev_mode: true image_suffix: "-dev" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/AUTHORS0000644000175000017500000003205500000000000015224 0ustar00coreycorey00000000000000Aaron Crickenberger Abitha Palaniappan Adam Gandelman Alex Tomic Alexander Ignatov Ali Adil Amrith Kumar Amrith Kumar Amrith Kumar Andreas Jaeger Andreas Jaeger Andrew Bramley Andrey Shestakov Andy Botting Anh Tran Anna Philips Anna Shen Ashleigh Farnham Attila Fazekas Auston McReynolds Bartosz Zurkowski Bertrand Lallau Bertrand Lallau Bin Zhou Bo Wang Boden R Brandon Irizarry Brian Hunter Cao Xuan Hoang Carl Perry Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) Chaozhe.Chen Christian Berendt Clark Boylan Colleen Murphy Conrad Weidenkeller Corey Bryant Craig Craig Vyvial Cyril Roelandt DJ Johnstone Dai Dang Van Dan Nguyen Daniel Krysiak Daniel Mellado Daniel Salinas Dao Cong Tien Dariusz Krol David Fecker David Rabel David Sariel Debasish Chowdhury DeepaJon Denis M Denis Makogon Denys Makogon Dina Belova Dirk Mueller Dmitriy Ukhlov Dong Ma Doug Hellmann Doug Shelley Dror Kagan Duk Loi Ed Cranford Edmond Kotowski Edu Alcaniz Emilien Macchi Erik Olof Gunnar Andersson Erik Redding Fan Zhang Felipe Reyes Felipe Reyes Flavio Percoco Gaetan Trellu Gauvain Pocentek Gaëtan Trellu George Peristerakis Ghanshyam Mann Graham Hayes Greg Hill Greg Lucas Greg Retkowski Gábor Antal Ha Van Tu Haomai Wang He Yongli Hirotaka Wakabayashi Huan Zhang Ian Wienand Iccha Sethi Ihar Hrachyshka Illia Khudoshyn Ilya Sviridov Ionuț Arțăriși Ionuț Arțăriși Ishita Mandhan Iswarya_Vakati J Daniel Ritchie Jacek Kaniuk James E. Blair James E. Blair James E. Blair James Page Jamie Lennox Jared Rohe Javier Castillo Alcíbar Jeremy Stanley Jesse Pretorius Jian Xu Joe Cruz Joe Cruz Joe Cruz Joe Gordon Josh Dorothy Joshua Harlow JuPing Julien Danjou Julien Vey KATO Tomoyuki KIYOHIRO ADACHI Kaleb Pomeroy Kamil Rykowski Kapil Saxena Kasper Hasior Kenneth Wilke Kevin Conway Khyati Sheth Kim Jensen Krzysztof Opasiak Lance Bragstad Laurel Michaels Li Ma Lingxian Kong LiuNanke LiuYang Luigi Toscano Luke Browning Luong Anh Tuan Manoj Kumar Marcin Piwowarczyk Mariam John Mark Biciunas Mark Kirkwood Mark McLoughlin Martin Kletzander Masaki Matsushita Mat Lowery Matt Fischer Matt Riedemann Matt Riedemann Matt Van Dijk Matthew Treinish Mayuri Ganguly Michael Basnight Michael Krotscheck Michael Still Michael Yu Minmin Ren Monty Taylor Morgan Jones Morgan Jones Nguyen Hai Truong Nguyen Hung Phuong Nguyen Van Trung Nguyen Van Trung Nikhil Nikhil Manchanda Nilakhya Chatterjee Nirav Shah Nirmal Ranganathan OTSUKA, Yuanying OctopusZhang Oleksandr Kyrylchuk Olga Kopylova Ondřej Nový OpenStack Release Bot Paul Lodronio Paul Marshall Paul Marshall Peter MacKinnon Peter Stachowski Petr Malik Petra Sargent Pierre Blanc Pierre RAMBAUD Pradeep Chandani Przemysław Godek Qian Min Chen Ramashri Umale Riccardo Pittau Riddhi Shah Riley Bastrop Robert Myers Rocky Ronald Bradford Russell Bryant Sam Morrison Samuel Matzek Satoshi Mitani Saurabh Surana Sean McCully Sean McGinnis Sebastien Badia Sergey Gotliv Sergey Lukjanov Sergey Vilgelm Shaik Apsar Shuichiro MAKIGAKI Shuquan Huang Simon Chang Sonali Goyal Sreedhar Chidambaram Stephen Ma Steve Leon Steve Leon Sudarshan Acharya Sushil Kumar Sushil Kumar Sushil Kumar SushilKM Swapnil Kulkarni (coolsvap) Swapnil Kulkarni Takashi NATSUME Takeaki Matsumoto Tanis De Luna Telles Nobrega Theron Voran Thierry Carrez Thomas Bechtold Tim Simpson Timothy He Tomasz Nowak Tony Xu Tovin Seven Trevor McCasland Trevor McCasland Tristan Cacqueray Valencia Serrao Victor Stinner Victoria Martinez de la Cruz Victoria Martinez de la Cruz Vincent Untz Vipul Sabhaya Viswa Vutharkar Vu Cong Tuan XiaBing Yao XiaojueGuan Yang Youseok YuYang Zhangfei Gao Zhao Chao Zhenguo Niu Zhenguo Niu Zhi Yan Liu ZhiQiang Fan ZhongShengping Zhongyue Luo abhishekkekane alex amcrn baz bhagyashris boden brandonzhao caoyuan caoyue chenaidong1 chengyang <374519141@qq.com> chengyang chenshujuan chenxiangui chenxing dagnello dangming daniel-a-nguyen daniel-a-nguyen debasish deepakmourya dineshbhor ekotowski ericxiett ewilson-tesora gecong1973 geng chc <578043796@qq.com> gengchc2 ghanshyam ghanshyam ghanshyam guang-yee hardy.jung himani jcannava jeremy.zhang jiansong justin-hopper lijunjie liuqing <970717493@qq.com> liuqing lvdongbing mariam john mariamj mariamj@us.ibm.com mbasnight melissaml ming dang <743759846@qq.com> nbziouech npraveen35 pangliye pawnesh.kumar pradeep rajat29 rameshsahu rico.lin ricolin ridhi.j.shah@gmail.com ruiyuan-shen rumale rumale rvemula sandrely26 saradpatel sarvesh-ranjan shalini khandelwal shangxiaobj sharika shashank-gl shayne-burgess stewie925 svenkataramanaia svenkataramanaia tanlin taoguo tbbrave tianqing ting.wang venkatamahesh venkatamahesh wangjun wangjunqing wangqi wangyao weiyj whoami-rajat xhzhf yangyapeng yangyong yatin yfzhao yuhui_inspur zhangdebo zhanggang zhangyanxian zhufl ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/CONTRIBUTING.rst0000644000175000017500000002547600000000000016626 0ustar00coreycorey00000000000000============ Contributing ============ Our community welcomes all people interested in open source cloud computing, and encourages you to join the `OpenStack Foundation `_. If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow (Pull requests submitted through GitHub will be ignored.) Bugs should be filed on Storyboard now, not GitHub: https://storyboard.openstack.org/#!/project/openstack/trove We welcome all types of contributions, from blueprint designs to documentation to testing to deployment scripts. The best way to get involved with the community is to talk with others online or at a meetup and offer contributions through our processes, the `OpenStack wiki `_, blogs, or on IRC at ``#openstack-trove`` on ``irc.freenode.net``. House Rules =========== Code Reviews ------------ We value your contribution in reviewing code changes submitted by others, as this helps increase the quality of the product as well. The Trove project encourages the guidelines (below). - A rating of +1 on a code review is indicated if: * It is your opinion that the change, as proposed, should be considered for merging. - A rating of 0 on a code review is indicated if: * The reason why you believe that the proposed change needs improvement is merely an opinion, * You have a question, or need a clarification from the author, * The proposed change is functional but you believe that there is a different, better, or more appropriate way in which to achieve the end result being sought by the proposed change, * There is an issue of some kind with the Commit Message, including violations of the Commit Message guidelines, * There is a typographical or formatting error in the commit message or the body of the change itself, * There could be improvements in the test cases provided as part of the proposed change. - A rating of -1 on a code review is indicated if: * The reason why you believe that the proposed change needs improvement is irrefutable, or it is a widely shared opinion as indicated by a number of +0 comments, * The subject matter of the change (not the commit message) violates some well understood OpenStack procedure(s), * The change contains content that is demonstrably inappropriate, * The test cases do not exercise the change(s) being proposed, * The change causes a failure in the pylint job (see pylint section below), * A user visible change does not provide a release note. Some other reviewing guidelines: - In general, when in doubt, a rating of 0 is advised, - The code style guidelines accepted by the project are part of tox.ini, a violation of some other hacking rule(s), or pep8 is not a reason to -1 a change. Other references: - https://wiki.openstack.org/wiki/CodeReviewGuidelines - https://docs.openstack.org/infra/manual/developers.html - https://wiki.openstack.org/wiki/ReviewChecklist - https://wiki.openstack.org/wiki/GitCommitMessages - https://docs.openstack.org/hacking/latest/ - https://review.opendev.org/#/c/116176/ - trove-pylint readme file in tools/trove-pylint.README Code Review Priority -------------------- At the design summit in Barcelona (October 2016) we discussed code review priority. We have a significant number of priorities for what we want to get merged in each release. As we get closer to the release date the time crunch will become even more acute. Therefore, we consciously focus on taking steps to merge changes in a manner consistent with these priorities. All contributors to the project can help with this by reviewing the code submitted by others, and getting them merged in a timely manner. Reviewing code is an important community activity and if you would like others to prioritize the review of your changes, it is strongly advised that you take the time to review other contributors code, and provide useful feedback. You will notice that as you review others code, you will not only learn more about the project and the many supported databases, but also that others take a more proactive view to reviewing the changes that you submit. Merely submitting code and expecting others to review it will (most likely) not work. If you've submitted code and you find that it isn't getting reviewed, consider whether you've done your fair share for the project by reviewing others code, or testing, or documenting, or submitting significant improvements, or in one of many other ways in which you can help advance the project. Approving Changes ----------------- The Trove project follows the conventions below in approving changes. 1. In general, two core reviewers must +2 a change before it can be approved. In practice this means that coreA can +2 the change, then coreB can +2/+A the change and it can be merged. 2. coreA and coreB should belong to different organizations. 3. For requirements changes proposed by the Proposal Bot or translations proposed by Zanata, a single core reviewer can review and approve the change. NOTE: For the remainder of the Newton release cycle, we will relax the above conventions. These relaxations apply to the master branch only. We will adopt a practice of lazy consensus for approving all changes and a single core reviewer can review and approve a change. This could be done, for example, by allowing all reviewers know that he or she intends to approve some change or set of changes if there are no additional negative comments by a certain time definite. We will however still require that at least one other person review (and +1 or +2) the change before it can be +A'ed. Abandoning changes ------------------ At the Trove mid-cycle held in July 2016 we discussed our process for abandoning changes and concluded that we would adopt the following process. 1. We will take a more proactive policy towards abandoning changes that have not been merged for a long time. 2. A list of changes proposed for abandonment will be presented at a weekly meeting and if there is no objection, those changes will be abandoned. If the patch sets are associated with bugs, the bugs will be unassigned. 3. In general, changes will be proposed for abandonment if the change being proposed has either been addressed in some other patch set, or if the patch is not being actively maintained by the author and there is no available volunteer who will step up to take over the patch set. Storyboard Bugs --------------- Bugs should be filed on Storyboard at: https://storyboard.openstack.org/#!/project/openstack/trove All changes that address a Storyboard bug should include the bug in the Commit Message using the Story and Task. It is not required that a Storyboard bug be filed for every change. Release Notes ------------- All user visible changes should include a release note. Trove uses reno to generate release notes and therefore only those release notes that are submitted as part of a change will be included in the release notes. The failure to add a release note for a user visible change should be identified in review, and corrected. If a Storyboard bug is being fixed, the release note should list the story and task number. For help using reno, the release notes tool, see: https://wiki.openstack.org/wiki/Trove/create-release-notes-with-reno Trove Documentation =================== This repository also contains the Database Services API Reference. To build the API reference, run:: $ tox -e api-ref The generated documentation is found:: api-ref/html/index.html Trove PyLint Failures ===================== The Trove project uses trove-pylint (tools/trove-pylint) in the gate and this job is intended to help catch coding errors that sometimes may not get caught in a code review, or by the automated tests. The gate-trove-tox-pylint jobs are run by the CI, and these invoke the command in tools/trove-pylint. The tool can produce false-positive notifications and therefore supports a mechanism to provide a list of errors that are to be ignored. Before submitting a change, please do run .. code-block:: bash $ tox -e pylint on your development environment. If this fails, you will have to resolve all the errors before you can commit the code. This means you either must fix the problem being identified, or regenerate the list of ignored errors and submit that as part of your review. To regenerate the list of ignored errors, you run the command(s): .. code-block:: bash $ tox -e pylint rebuild Warning: trove-pylint is very sensitive to the version(s) of pylint and astroid that are installed on your system and for this reason, a tox environment is provided that will mimic the environment that pylint will encounter in the gate. Pre-commit checklist ==================== Before committing code to Gerrit for review, please at least do the following on your development system and ensure that they pass. .. code-block:: bash $ tox -e pep8 $ tox -e py27 $ tox -e py34 $ tox -e pylint If you are unable to get these to pass locally, it is a waste of the CI resources to push up a change for review. Testing ======= Usage for integration testing ----------------------------- If you'd like to start up a fake Trove API daemon for integration testing with your own tool, run: .. code-block:: bash $ ./tools/start-fake-mode.sh Stop the server with: .. code-block:: bash $ ./tools/stop-fake-mode.sh Tests ----- To run all tests and PEP8, run tox, like so: .. code-block:: bash $ tox To run just the tests for Python 2.7, run: .. code-block:: bash $ tox -epy27 To run just PEP8, run: .. code-block:: bash $ tox -epep8 To generate a coverage report,run: .. code-block:: bash $ tox -ecover (note: on some boxes, the results may not be accurate unless you run it twice) If you want to run only the tests in one file you can use testtools e.g. .. code-block:: bash $ python -m testtools.run trove.tests.unittests.python.module.path Note that some unit tests can use an existing database. The script ``tools/test-setup.sh`` sets up the database for CI jobs and can be used for local setup. Is there something missing? --------------------------- Do not hesitate to chat and clear your doubts about Trove on IRC: #openstack-trove `_ on freenode.net. Also, we meet every week at #openstack-meeting-alt `_ to discuss ongoing issues. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/ChangeLog0000644000175000017500000033116500000000000015732 0ustar00coreycorey00000000000000CHANGES ======= * Change @property usage to function * Update hacking for Python3 * Add innodb configuration parameters log\_file\_size and flush\_method * Devstack should install the trove-dashboad module by default * Release note for XFS disk format support * Support XFS disk format * Fixes "a2ensite" command arg and adds mod\_wsgi package installation * Fix devstack installation guide * Improve the doc * Fixes the following syntax error of etc/apache2/trove apache conf * Delete error volumes when deleting instance * Support to test non-dev guest image in CI * Add python-troveclient in requirements.txt * Small cleanups * [Community goal] Add contributor and PTL guide * Cleanup docs building * Improve the function tests * Add 'Quota Management' in production guide * Config admin clients as default * Add running trove in production guide * Fix missing parameter in log message * Check network conflict * Delete datastore * Fix duplicated words issume like "object of of the instance" * Fix unmount path for instance upgrade * Fix an invalid assertIsNotNone statement * Improve API doc * Fix trovestack tox job * Show service\_status\_updated in instance API * Support PostgreSQL 12 * Fix the log related tests * About Trove datastore image * Remove some unrelated code * Support HEALTHY status for db instance * Add quotas resource operation in API doc * Fix delete instance * Add trove-tempest CI job * Fix Trove periodic CI jobs * Fix some issues with replicate with mysql * Support incremental backup for MariaDB * Move the iptable change from trovestack to devstack * Mark CI job trove-scenario-mariadb-single voting * [ussuri][goal] Drop python 2.7 support and testing * Support pip3 and run on guest-agent service for redis * Support pip3-virtualenv during image creation * Fix mariadb CI - trove-scenario-mariadb-single * Add CI job to build Ubuntu Xenial based Trove guest image for dev * Build reusable Trove guest image for dev * Rename devstack variable TROVE\_DISABLE\_IMAGE\_SETUP * Release note for service\_credentials config * Improve image building doc * Support to specify branch when building the image * Use dedicated service credential config * Remove all the resources when the Nova VM creation failed * Use correct Swift credential for instance backup * Remove the annoying debug logs * Add branch param for image building * Doc: Add public trove guest images info * Update master for stable/train * Add release note for public images * Fix Trove periodic job * Fix an error when generate root password during DB initialization 12.0.0.0rc1 ----------- * Change imag build job to the periodic pipeline * Some clean up * Add image build experimental CI job * Fix Trove CI failure * Fix issue with pip2 command and update pip3 for mongodb * [doc] Create instance in user guide * Release note for public instance * API doc: 'access' support for creating trove instance * Support to create public trove instance * Improve guest image creation in devstack * Add periodic logs during database mechanism * Fix MariaDB image build * fix bug report url * Modify CreateInstanceTest setUp to initially be OK * Improve image build * Support management security group * Minor change to image build guide * Support keypair in devstack * Mark the instance ERROR when Nova VM creation fails * Support python3 in guest agent * Make volume type optional * Release note for backup filtering * Filtering description for backup API * Improve devmode=flase when building the image * Fix all\_projects filtering for backups * Fix backup tests * Support backup filtering * Fix python3 failure inside guest when doing restore * [train][goal] Run 'trove-tempest-ipv6-only' job * Support new mysql8.0 utf8mb4 character and collation sets * Fix 31-fix-init-script for Postgresql * Refactor variables in plugin.sh to simplify setting a datastore * Add a designate V2 API dns driver * Remove invalid assert state * Update api-ref location * Add Python 3 Train unit tests * Fix Trove CI jobs * Re-define the 'nics' parameter for creating database * Support renamed postgresql log functions * Use newer style mysql syntax for users/passwords * Fix incorrect use of raise in template test * Ignore new 'sys' mysql database by default * Update the outdated content * trovestack guide * Enable service tenant deployment model by default in DevStack * Remove flavor operations from API doc * Fix the structure in releasenotes folder * Remove the trove-tox-apiexamples CI job * docs: fix build failure on html\_last\_updated\_fmt * Add releasenotes for Redis upgrade * Add Redis datastore upgrade * Fix tox debug mode * Extend cluster events * Update Python 3 test runtimes for Train * Fix syntax error * Instead of deprecated keystone cli in docs * Fix error URL * Update min tox version to 2.0 * Dropping the py35 testing * Add Cassandra datastore upgrade * Skip image building in DevStack for functional tests * Changing file owner when upgrading mariadb * Move to opendev * Use opendev.org instead of git.openstack.org * OpenDev Migration Patch * Fix cloudinit mariadb scenario test error * Pass kwargs to exception to get better format of error message * Nova keypair support * Improve trove guest agent image building * Add error handling when Swift is not installed * Disable devstack image building for trove-scenario-mariadb-single CI job * Fix mariadb status after upgrade * Remove SecurityGroup API extension * Add new Galera Cluster bootstraping method * Migrate legacy jobs to Ubuntu Bionic * Fix tests for Ubuntu Bionic migration of CI jobs * Fix poll\_until exception type * Fix redis expected parameter type * User guide update to use Openstack client syntax * add python 3.7 unit test job * Deprecate the config option default\_neutron\_networks * Fix the way to get localhost IP in devstack * Replace openstack.org git:// URLs with https:// * Update master for stable/stein * Share networks created by Trove Devstack plugin * Fix Mariadb replication config * Add documentation for managing databases and users * Additional logs for Mariadb restore strategy 11.0.0 ------ * Fix modules import order * Add log retrieval to Redis * Skip IP addresses from management networks * Fix SUBNETPOOL\_V4\_ID check in devstack * Fix key generation for devstack * Fix the misspelling of "directory" * Add upgrade status check - instances with assigned tasks * Do not use self in classmethod * update spelling error * Fix the misspelling of "configuration" * Use existing CNF\_INCLUDE\_DIR to create mysql-flavor directory * Add image setup to trove devstack plugin * Execute functional test jobs running under python3 * Use Ubuntu Xenial distro for integration tests * Change openstack-dev to openstack-discuss * Set Tempest's service\_availability setting for Trove * Add missing ws separator between words * [fix\_typos] fix wrongly spell word "configration " * Update http to https * Fix the wrong url * Extend contribution doc by IRC contact details * Add python 3.6 unit test job * Fix incorrect test\_group in zull job definition * Fix home direcroty path in trovestack script * Fix the conflict of urlparse between python2 and python3 * Add trove-status upgrade check command framework * Fix build ubuntu-geust issue on arm64 * Increment versioning with pbr instruction * Be compilance with latest oslo.messaging * Add detailed list for instances * Fix the mysql start up issue after restore * Fix cover job * Add blueprints and bugs link in documents * endpoint\_type option not used with single tenant * Cleanup zuul.yaml * add python 3.6 unit test job * switch documentation job to new PTI * make tox -e pylint only run pylint * fix tox python3 overrides * update pylint to 1.9.2 * Use latests version of python-troveclient in tests * import zuul job settings from project-config * Replace 14.04 trusty with 16.04 xenial * Enable mutable config in trove * Remove unused imports from the integration tests * Use print function rather than statement in tests * Update reno for stable/rocky * Reject zero volume size in API * Add extended properties support for mongo cluster * Remove nova conf for guestagent 10.0.0 ------ * Fix replication failure when Swift isn't available 10.0.0.0b3 ---------- * Sync the data fields of DB\* class and table fields * Migrate to Zuul v3 native job definitions * More reliable gpg keys importing in DIB elements * Format service apache2 reload section for Debian/Ubuntu * [doc] Use openstack client command to replace others * Raise timeout for instance resizing checking job * Fix invalid escape sequence warnings * Add release note link in README * py3.x: Fix usage of gettext.install * Remove pycrypto from requirements * change pylint wrapper to let messages be prefixes * Update Trove's README * Add volume\_type to apischema * Add a hook for restore process to check if successful * Switch to cryptography from pycrypto * Update character set and coallaction for mysql5.7 * Trivial: Update pypi url to new url * Cleanup testrepository and os-testr requirements * Run unittests under the low-constraints job * Fix dict iteration in PropertiesCodec 10.0.0.0b1 ---------- * Fix lower-constraints and uncap eventlet * Update auth\_uri option to www\_authenticate\_uri * Add check\_process() for mysqldump * fix a typo * Updated from global requirements * fix a typo in documentation * add lower-constraints job * Mox removal for instances\_resize API tests * Revert "Fix false-negative failure report for mysqldump backup" * Updated from global requirements * Fix os.path.join() for unittests under python 3.6 * Updated from global requirements * Mox removal for MgmtInstance actions API tests * Updated from global requirements * Avoid diverged slave when migrating MariaDB master * Skip root state inherting scenario tests for Redis * Fix client recreation in Redis root-disable test * Register all replicas in replication scenario test * Fix create mongodb cluster error in multi-network env * Fix annotation info error in guestagent-api * Fix false-negative failure report for mysqldump backup * Use neutronclient for floatingip operations * Return 204 instead of 200 for root-disable API * Fix guestagent.test\_operating\_system for Python3 * Remove entry of policy.json from setup.cfg * Use RootHistory to check if root is ever enabled * [api-ref] Add sections for backups * Fix client ping in redis scenario tests * Fix PostgreSQL non-dynamic configration tests * Fix incorrect usage of assertTrue * drop extra word to fix typo * Only launch in-tree zuul jobs when necessary * Remove install-guide tox env * Generate policy sample file automatically * [api-ref]: update instance creating parameters * Remove support of creating volume from Nova * Remove security.authorization option from mongos * Update reno for stable/queens * Add bandit-baseline check job * report\_root should always use context.user 9.0.0 ----- * Remove unused optparse code in trove.db * Allow host URL for versions to be configurable * Zuul: Remove project name * Unable to grow/shrink Vertica 9.x cluster * Accept the IBM DB2 license during the DIB process * Remove hardcoded version in DB2 install path * Improve Vertica 9.x support * Update the validation template for postgresql * Unable to perform backup on DB2 instance * Fix gate error * [api-ref] Add sections for instance logs * Add innodb rules for mysql validation template * Revert Cassandra version to 2 on ubuntu element * Fix Cassandra element * Change file permissions on element script * [api-ref] Update style and instances api 9.0.0.0b3 --------- * Use neutronclient to get networks * [api-ref] Add fault names for the error response codes * Remove log translations * Updated from global requirements * Missing element-deps files for xenial * Remove checkpoint\_segments validation rules * Adding missing dependencay * Remove use of unsupported TEMPEST\_SERVICES variable * Fix api exception with unicode tenant name * Fix a error exception code * Add missing permission on 10-fix-mycnf for Percona * Unable to build cassandra images * Upgrade Postgresql support to v9.6 * Use DocumentedRuleDefault instead of RuleDefault * Guest agent won't start on Xenial Percona 5.7 * Updated from global requirements * Remove the heat related documents * Fix systemd service mongodb on xenial element * Updated from global requirements * Always kill all child processes when backup runner exits * Import experimental Zuul jobs * Initialize BadRequest exception with correct message * Add validate\_instances\_network for cluster create * Add support for MySQL 5.7 on Ubuntu Xenial * Move legacy trove zuul jobs to trove project * Replace outdated image info in guest\_cloud\_init doc * Fix integration cgit url in image building doc * Add functionality to define different Message and Notification destination * Remove bundled intree trove tempest plugin * Updated from global requirements * Fix wrong error message for secgroup\_rule method * TrivialFix: remove redundant import alias * Remove the deprecated Nova-network * Fix Increase guest agent cmd process timeout * Implementation of root-enable, root-disable in redis * Remove policy.json file * Replace assertRaisesRegexp with assertRaisesRegex * Add validation for galera\_common grow * Use keystone session for single tenant remote clients * Set right status when grow/shrink failed * Fix status message inside validate\_can\_perform\_action() * Improve .gitignore file in the project * Add doc8 to pep8 check for trove project * Updated from global requirements * Apply pep8 check to app.wsgi * Add overrides related unittests for redis manager * Fix create redis instance with new requirepass * Unuse an undefined local variable 'name' * cluster-create support volume\_type * fix typos in cluster/test\_models.py * Add Database service user guide * Fix trove-guestagent startup for redis mangled config cmd * Don't refresh trove code in guestagent once installed * Add #!/bin/bash to /etc/rc.local * Improve code to reduce traverse times * Fix mongodb database create * Fix typo in trovestack cleanup * Remove setting of version/release from releasenotes * Utilize Ubuntu's hardware enablement stack for trovestack * Increase guest agent cmd process timeout * Allow tunable for guest agent process timeout * Updated from global requirements * Initialize RedisAdmin with correct config command * Fix nova proxy admin login * Allow the user to disable tmpfs when building guest images * Updated from global requirements * Enable other Ubuntu architectures to utilize diskimage-builder * Add volume size verify for replica based on master * Fix variable user's definition in unittest code * Let cluster action\_\*\*\* load the right schema * For Python 3 Common patterns-six string * Update DIB doc * Fix qemu image compatibility mode * Stop polling if nova instances goto error * Lazy load all configuration options * Add timestamp to cluster instance name * Support -1 as unlimited quota restraint in Trove * Configure guestagent on Ubuntu guest images to use CA certificates * Missing import of 'assert\_equal' in tests/util/\_\_init\_\_.py * MongoDB create raise index out of range error * Allow py27 test selection * Fix mysql instance create failed when enable skip-name-resolve * Fix duplicate instancetask code * Support insecure SSL when talking to services * Avoid load deleted instances that belong to a cluster * Update URLs in documents according to document migration * Open the volume\_support of redis * Change RPC dispatcher access\_policy to DefaultRPCAccessPolicy * Force delete any instance or cluster * Fix some typos in trove/instance/models.py * Fix indent in docs * Do not configure kvm virt\_type in devstack * Fix requirepass problem with redis * Remove Mitaka reference in install/dashboard.rst * Optimize import inside trove.common.remote.neutron\_client * Fix python2/unicode/string issue in mongodb/cluster * Enhance test case fail to build message * Enable longer Keystone token life * Remove tempest from the test requirements * Updated from global requirements * When creating a replica do not allow to create users or databases in the same call * Add test for flavor * Open test\_create\_too\_many\_instances * Avoid deleting parent backup failed caused by 404 9.0.0.0b1 --------- * Enable integration tests * TrivialFix: Redundant alias in import statement * Fix integration test exception handling * Imported Translations from Zanata * Move Pylint ignore * Fix gate issues * Replace deprecated alias 'os' with 'os\_primary' * Adding mongodb support to xenial * Add default configuration files to data\_files * Fix inaccurate message while creating replica * [Trivialfix]Fix typos in trove * Fix to use "." to source script files * Remove the use of deprecated attributes in novaclient calls * Comment out the option oslo\_messaging\_rabbit.rabbit\_password * Remove unneeded msgfmt test * Update ubuntu cassandra to supported repo and version * Update reno for stable/pike * Remove inexistent option in install guide 8.0.0 ----- * Fix AttributeError in api example snippets tests * Remove exists\_notification\_ticks from sample conf 8.0.0.0b3 --------- * move from oslosphinx to openstackdocstheme * import content from cli-reference guide in openstack-manuals * import admin-guide content from openstack-manuals * rearrange existing docs to fit the new standard layout * Updated from global requirements * Fix a exception error * Remoe obsolete apidocs * When running in gate environment, don't start services * Wrong load removed node of galera cluster * TrivialFix: Update api-ref link * Redis 'repl-backlog-size' conf parameter using wrong MIN value * Updated from global requirements * Handle isotime deprecation in oslo\_utils.timeutils * Updated from global requirements * Updated from global requirements * Use get\_rpc\_transport instead of get\_transport 8.0.0.0b2 --------- * Handle log message interpolation by the logger part 10 * Log the right attached configuration id * Improve list-of-ports validation * Updated from global requirements * Mask passwords in configuration-show * Handle log message interpolation by the logger in common/ * Fix user-list failed if host uses host\_ip/netmask * Remove usage of parameter enforce\_type * Remove Trove's support for Heat * trovestack doesn't understand the new systemd based system * How to create a trove instance * fix issue with Nova passthrough deprecation * Trove user-delete API can use periods in user names in fact * fix build: identity\_admin is no longer available in CI * Trove:trove guide link * Handle log message interpolation by the logger part 8 * Redis backup continues if auto one already running * Fixed MySQL 5.7 not starting after restore * Add Couchbase helper client methods * Add port 16379 to conf.sample * handle impending oslo.messaging deprecation * enable trove-api behind mod-wsgi * Fixing PROXY\_AUTH\_URL not being populated properly * Updated from global requirements * fix-gate: change trove auth URL's to reflect new URL settings * Remove the check about related\_to * Call wrong father class's method with super() * Fix Cassandra cluster restart * Update SUSE distro information in install guide * fix the gate: heat-cfntools was yanked out from under us 8.0.0.0b1 --------- * Use os\_cache instead no\_cache * Revert "Remove the tempest plugin" * add debug information to scenario test stdout * update setup.cfg for classifier python 3.5 * Updated from global requirements * Add jinja2 autoescape=True * Fix a few typos * Fix some reST field lists in docstrings * Remove LIBS\_FROM\_GIT\_\* * Improve Gate: address multiple issues relative to timing * Fix the gate; py27 job failure with failure to import designate tests * Handle log message interpolation by the logger part 6 * Remove redstack reference * we now need to have dib installed explicitly * Handle log message interpolation by the logger part 9 * Sphinx: Treat warnings as errors * Handle log message interpolation by the logger in common/strategies/ * Handle log message interpolation by the logger part 11 * [Fix gate]Update test requirement * Updated from global requirements * Handle readfp deprecation * Stabilize gate: Fix CONTROLLER\_IP is .1 issue * Handle log message interpolation by the logger part 7 * Handle log message interpolation by the logger part 5 * Add support for module-reapply command * Fix module-instances command * Wrong comment symbol in configuration file * Remove the tempest plugin * Install Redis 3.2.6 by compilation * DeprecationWarning: passlib.utils.generate\_password * Handle log message interpolation by the logger in cluster/ * Handle log message interpolation by the logger in backup/ * Removed reimport in models.py * Fix glance cli option * fix requirements file branching for trove guests * Update reno for stable/ocata 7.0.0.0rc1 ---------- * insulate TroveContext from possible changes in oslo.context * Prepare for using standard python tests * unwedge the gate * Add translation\_checks for i18n * Add i18n tranlastion to guestagent datastore 2/5 * Add i18n translation to guestagent datastore 1/5 * Add i18n translation to others 3/3 * Add i18n translation to extensions 3/3 * Typo fix: encouter => encounter, lastest => latest 7.0.0.0b3 --------- * Updated from global requirements * Add configuration support for clusters * [gate fix] Handle case where hostname ends in .1 * [gate fix] Run resize-\* for MySQL only * flake8 to ignore releasenotes directory * backoff in poll\_until * [fix gate] Change size of MySQL flavors * Test to figure out why CONTROLLER\_IP is blank * Disable wait for force-delete * Display boolean module values * Add support for cluster restart * secure oslo\_messaging.rpc * Add i18n translation to guestagent datastore 3/5 * Add i18n translation to guestagent 3/5 * Add i18n translation to guestagent 4/5 * Add i18n translation to extensions 2/3 * Enable Redis configuration tests * Fix trovestack clean to remove nova instances * Remove negative backup tests * Have scenario tests retrive guest log on error * Updated from global requirements * Move try-again loop around all data methods * Remove netifaces useless requirement * Skip redis clustering tests * Update method cfg.set\_defaults * Add support for module ordering on apply * Updated from global requirements * Fix Redis cluster grow scenario tests * Replace pexpect with processutils in volume.py * Restrict negative backup tests to Redis only * Updated from global requirements * get rid of a couple of old oslo-incubator object references * Add missing LOG mocks to unittests * Remove negative backup tests from api run * Removed instance reset\_password from trove * Trivial: Remove vim header from source files * Print error on resize failure * formalizing configure\_nova\_kvm * Add i18n translation to others 2/3 * Add i18n translation to guestagent 2/5 * Add i18n translation common 5/5 * Fix i18n translation in common 1/5 * Add i18n tranlastion to common 3/5 * Add i18n translation to common 2/5 * Add i18n translation to guestagent 1/5 * Add i18n translation to others 1/3 * Add i18n translation to guestagent 5/5 * Add i18n tranlation to common 4/5 * Add i18n translation to guestagent datastore 4/5 * Add i18n translation to extenstions 1/3 * Add i18n translation to guestagent datastore 5/5 * Fix Trove tempest plugin * allow unauthenticated packages to be installed * Associate datastore, version with volume-type * Avoid double-check when waiting for test states * trove pylint cleanup(s) * Tweak trove devstack plugin * Have inst-error delete before inst-create-wait * (fix gate) Remove liberty from the releasenotes * Debug code to dump env * Fix Gate: Force pip2 instead of pip * Fix backup of mysql variants on Centos * Add support for nic and az in cluster grow 7.0.0.0b2 --------- * Clean up trovestack utility (fix kick-start) * Stop caching client to fix long-running test fails * Add Xenial support for MariaDB * Add DeprecationWarning in test environments * Removing reference to trove-integration in doc index * Add support for Oslo Policies to Trove * Tests skip adding data if instance not exists * Fix a typo * (fix troveclient gate) Use alt-demo network * Show team and repo badges on README * Updated from global requirements * Fix slow nodes causing guest\_log to fail * (fix gate) Don't source openrc in gate run * Initial work to support Xenial * Various post-upgrade fixes * Fix mountpoint detection * Create net/subnet for alt\_demo tenant * Add compute instance ID and volume ID to trove show output * Scenario tests wait on helper user creation * Have api tests use IPv4 * Updated from global requirements * Allow splitting test groups into sub groups * Removed check for KEYSTONE\_CATALOG\_BACKEND from trove plugin * Fix Galera\_common.grow/shrink to propogate exceptions * trovestack: Better error message for missing arg * Updated from global requirements * Fix log-generator-user-by-row test * Fix race condition in cluster-show * clean up references to trove-integration and redstack * Have cluster-show list all ips * Fix module apply * Have scenario tests use ipv4 * Enable hacking extensions framework * Fix error case where server.fault is missing * Delete description of the disk * Fail on deleting non-existing database 7.0.0.0b1 --------- * Install Couchbase SDK for scenario test client * when pylint has an error loading the config, it should fail * Files with no code must be left completely empty * Updated from global requirements * Add pycrypto to the requirements file * Multi-Region Support * trovestack clean doesn't delete secgroups * Updated from global requirements * Change the way trovestack figures the default network * Add reference to OpenStack documentation * add section on code review priority * Updated from global requirements * Remove blacklist test volume * Update the Signing Key for Percona Debian and Ubuntu Packages * Update some information * Fix a typo in previous commit; missed $ * Remove nic handling in galera cluster grow * Fix incorrect version and release details * Make trovestack able to run in the gate context * Fix scenario tests * Updated from global requirements * allow PATH\_TROVE to be specified by gate or defaulted by trovestack * Issue deprecation warning for the use of heat templates in Trove * PostgreSQL guest\_log test fails intermittently * Fix Postgresql pg\_rewind call * Updated from global requirements * Drop MANIFEST.in - it's not needed by pbr * Improve guestagent datastore models * Updated from global requirements * beginning to change trove-integration * Fix RPC Versioning * Remove downgrade * Enable release notes translation * Extract RPMPackagerMixin * Removed H237 from tox.ini * Delete unnessary i18n * Fix some typos in doc and comments * Cluster Upgrade * Fix typos in description * Fix SafeConfigParser DeprecationWarning in Python 3.2 * Updated from global requirements * Fix Old style classes are deprecated and no longer available in Python 3 * Fix a typo in service.py and trove\_testtools.py * Use more specific asserts in tests * Add Apache 2.0 license to source file * Modify parameters of spelling mistakes * Updated from global requirements * [api-ref] configure LogABug feature * Delete openstack in flake8 * Taskmgr & guestagent use inconsistent topics * Make parameter consistent in devstack plugin * Fixing trove config group request * Remove cfg and logging import unused * Concentrated test\_validate\_xx\_configuration to a new method * Add max\_prepared\_stmt\_count to validation rules * update contributing.rst to reflect release notes and bugs * Use common methods for cluster validation * Remove unused variables * Fixup indentation errors * Updated from global requirements * Skip 'eject valid master' replication test * Fix the Manager module path in documentation * Fix Postgresql configuration test and guest-log * Perfect the mysql test * Repeat definition blacklist * Fix typos in validation-rules.json & service.py * Delete unused method verify\_errors * improve pylint; generate errors and config in sorted order * Fix pylint Error:PGSQL\_RECOVERY\_CONFIG * Fix call to to\_mb * Missing logging import * Display flavor-ephemeral in trove flavor-list command * Updated from global requirements * Fix error: option --no-parallel not recognized * Update reno for stable/newton 6.0.0.0rc1 ---------- * Handle the exceptions and errors correctly for DB2 * initial chkin of pylint * Cluster code looks for wrong field in request for volume type * Allow for invalid packet sequence in keepalive * Implement full online backups for DB2 * Add command to delete BUILD instances and clusters * Use assertIn instead of assertTrue(A in B) * Update #noqa for out 79 character * Fix race in Postgres user-list * Postgresql Streaming Replication * Skip 'eject valid master' replication test * Updated from global requirements * Remove hardcoded timeout in MongoDB clusters * Add os\_admin to the ignored\_dbs for PostgreSQL * Update api-ref documentation * Separate database and user create in prepare * Update some installation instructions * [fix gate] Cluster grow not showing locality * standardize release note page ordering * Add support for modules in cluster create/grow * document configure and verify steps * Insulate TroveContext from oslo.context changes * Implement Instance Upgrade * To simplify the setUp * Implement configuration management for DB2 * Update release note page ordering * Reference to unknown CONF in fakemode.py * Stop adding ServiceAvailable group option * Remove unuse flavor test * [api-ref] Remove temporary block * Fix a small typo * Add missing '\_LE' import in trove/cmd/guest.py * Add --incremental flag to backup-create * Remove unused parameters * In fakemode.py, add the import os * Quota Management * Updated from global requirements 6.0.0.0b3 --------- * Port pagination on Python 3 * Call GuestError with proper options * Correct incorrect i18n of some messages * Recent changes to api examples * Config logABug feature for Trove api-ref * Fix assert root connection on Couchbase * Cleanup trove's tox.ini and .gitignore * Display more flavor information in trove flavor-list command * Updated from global requirements * Tests verify cluster data via every node * Updated from global requirements * Trove workbook for scheduled backups * Increase cluster\_complete timeouts * Updated from global requirements * Add tox entry for py35 tests * Remove openstack-doc-tools * Extend guest agent call timeouts * Updated from global requirements * Get ready for os-api-ref sphinx theme change * Cassandra fix lost keyspace(s) after cluster grow * Fix reading files that are symlinks * Adds the api-ref migrated RST + YAML files * Couchbase run backup as root * Turn off test\_create\_too\_many\_instances * tempest/test\_flavors: vcpus is in db * Deprecate 'guest\_log\_long\_query\_time' * Add missing ICMP option for pxc & mariadb * Module re-apply does not reflect updated values * Add log retrieval to Cassandra * About update setup.cfg homepage * Updated from global requirements * Constraints are ready to be used for tox.ini * Pass 'guest\_log\_expiry' as a string * Replace some of assertEqual * Add trove-guestagnet.conf sample for log rotation * Updated from global requirements * guest\_id missing err, switch config\_drive default * Introduce "icmp" option for security group rule * Updated from global requirements * Use http\_proxy\_to\_wsgi middleware * Fix for tcp\_ports configuration in devstack * Fix some typo in the files * Replace assertEqual(None,...) * Update CONTRIBUTING.rst * Fix xtrabackup-binlog file GTID parsing * Fix MySql replication start from incr backup * Break out error instance create * Updated from global requirements * Replace OpenStack LLC with OpenStack Foundation * Add port 22 to tcp\_ports in devstack * MySQL do not retrieve Password in get user * Define safe pid-file and socket paths in config * py3: Enable test\_backup\_models and test\_datastores * Port guestagent test\_dbaas to Python 3 * Remove times.dbm file for each tox run * Backup tests verify restored databases * Fix config registration for Trove tempest plugin * Updated from global requirements * Imported Translations from Zanata * Cluster tests get the instance flavor * Serialize py34 tests * Use proper queries to update user properties * Add missing LOG variable to fix scenario tests * Preserve data type when parsing MySQL configs * Use constraints for coverage job * Improve Config Validation * Move cluster tests to the end of the scenario run * Clarify unittest documentation * Fix concurrency issue with Python 3.4 test * Replace assertEqual(None, \*) with assertIsNone in tests * Support flavor ids with leading '0' * Support newer mysql in tests * Cleanup i18n marker functions to match Oslo usage * Removed unreachable statement * Updated from global requirements * Add vCPUs to flavor-list * Correct reraising of exception * Locality support for clusters * Fix secure method to work with PXC 6.0.0.0b2 --------- * Fixing misstypos * Updated from global requirements * Persist error messages and display on 'show' * Updated from global requirements * Fix user/database scenarios to run without helper * Repl tests retrieve configs via test\_helper * Ophaned Volume Not Removed on Instance Delete * Fix tempest base: add missing imports * Switch from MySQL-python to PyMySQL * Improve coverage in guestagent and backup models * Port galera and redis cluster tests to Python 3 * Port test\_pagination on Python 3 * Add trove tempest plugin * Trivial Fix: Fix typo * Reorganize scenario test order to speed up run * Install Guide: Set bug project * Updated from global requirements * Document house rules for approving changes * Run Python 3 unit tests in parallel * Port more guestagent unit tests to Python 3 * Replace print statment with print function * Use correct message for missing default datastore * Updated from global requirements * Port backup test\_storage to Python 3 * Move the rabbit/rpc options to its own section * Minor cleanup for Install Guide * Imported Translations from Zanata * fix wrong id for render default config param 'server\_id' * Break apart backup scenario tests * Break apart database scenario tests * Break apart user scenario tests * Break apart module scenario tests * Break apart root scenario tests * Break apart instance create/actions scenario tests * Break apart replication scenario tests * Locality support for replication * Trivial Fix: Fix typo * Delete with fail nova instance tries to stop DB * Break apart configuration group scenario tests * Have Trove plugin install dashboard * Add Install Guide * pkg: replace commands module with subprocess * Mock detector run only once for each testcase * Postgresql Incremental Backup and Restore * Fix user & database pagination in guests * Rename called\_once\_with methods correctly * Updated from global requirements * Imported Translations from Zanata * Add New Relic License module driver * Switch test config file to use os\_region\_name * Cleanup guestagent models * Change service candidates list for MariaDB * Updated from global requirements * Updated from global requirements * Port more unit tests to Python 3 * Catch exceptions from backup runner * Unittests patch 'CONF.datastore\_manager' * Correct spelling mistake * Updated from global requirements * Fix troveclient to support Mistral * remove verbose option in Trove config files * Correct typo in debug message * Fix MariaDB scenario replication test * Fix Percona configuration test * spelling mistake in release description for cassandra * Fixed PXC configuration detach does not work * Updated from global requirements * Updated from global requirements * Increase 'state\_change\_wait\_time' in devstack * Replica report DETACH status when detaching * Updated from global requirements * Add utf8mb4 encoding option to mysql * Updated from global requirements * Updated from global requirements * Fix MariaDB clusters failing with TypeError * Add bandit security config * Fixed module-apply on removed module * Add db-models and RootController for Postgres * User and database tests wait for casted actions * Reuse Cassandra connections * Update 'myisam-recover-options' config template * Trove's tox tests should respect upper-constraints * Improve mock detector performance * Change recursion depth to '1' for mock detection * Updated from global requirements * Unsupported module driver not logged correctly * Fix grammatical mistakes, "it's" to "its" * Notification exceptions not sent back correctly * Correct some issues with devstack plugin for trove * Update Trove's README * Add 'redis' as a valid service candidate * Enable more unit tests on Python 3 * Port instance unit tests to Python 3 * Port more API unit tests to Python 3 * Port designate code to Python 3 * Backup to swift using wrong large object type * Fixed error message logging * Updated from global requirements * Remove unused 'override.config.template' * [Trivial] Remove unnecessary executable flag for files * Schema migrations fail for postgresql backend * Refactor scenario tests to facilitate multi-group * Trivial: Remove unused logging import * Add variables for quota configuration * Make 'default\_password\_length' datastore-specific * Mysql GTID replication fails when data inserted * Updated from global requirements * Simplify guestagent.pkg: don't use metaclass * Python 3: avoid sets.Set and string.letters * pep8: exclude .git/ subdirectory * Python 3: fix relative imports * Port crypto\_utils to Python 3 * Python3: Add support for raise and ConfigParser * Mysql replicas need to set binlog\_format * Python3: Add support for unicode, basestring, long * Imported Translations from Zanata * Update the root scenario tests * Imported Translations from Zanata * Fixed kwargs being defaulted to CONF values * Fix MariaDB config groups * Enhance mongod pid find for multiple distros * Do not remove root user on disable * blacklist\_regex is not an option * Cleanup Mysql replication test on completion * Updated from global requirements * Updated from global requirements * Port more common unit tests to Python 3 * Port test\_template unit test to Python 3 * Updated from global requirements * Tests verify applied configuration values * Updated from global requirements * Add missing args to configuration inst test * Revert the legacy\_compute v2 api for nova * Python3: Add support for iteritems and iterkeys * Updated from global requirements * encrypt\_data(): don't encode IV to base64 5.0.0 ----- * Trove's tox tests should respect upper-constraints.txt * Fixes migrations for MySQL 5.6.\* and MariaDB 10.1.\* * Trove's tox tests should respect upper-constraints.txt * Address change in Route v2.3 * Address change in Route v2.3 * Pass optional timeout to service commands * Removing some redundant words * Add user access scenario tests * PG test-helper create matching db for test user * Add a minimal py34 test environment * clean up errors shown on galera cluster unittests * Unhandled messages logged in tox tests * Fix some unexpected tracebacks while running tox tests * Removes redundant "of" * Fix replication scenario tests * Cap test\_log\_generator\_user\_by\_row to 100 * Update reno for stable/mitaka * Update .gitreview for stable/mitaka 5.0.0.0rc1 ---------- * updating the release notes from mitaka commits * Fix incorrect innobackupex args (fixed gate) * Implementation of couchdb user and database functions * Secure mongodb instances and clusters by default * Fix pagination function * Add better validation to cluster shrink * Do not reset password of user-root accounts * Vertica configuration groups * Implement Backup and Restore for CouchDB * Server support for instance module feature * Addition of DB2 backup & restore functionality * Moved CORS middleware configuration into set\_defaults * Updated from global requirements * Update db2 settings to reflect correct hostname * Replace deprecated LOG.warn with LOG.warning * Revert "Time to get rid of most vestiges of slave\_of" * Add MongoDB config server port to cfg * Add MongoDB cluster root-enable support * Block pymongo version 3.1 * Updated from global requirements * Updated from global requirements 5.0.0.0b3 --------- * Implement Cassandra clustering * Vertica Cluster Grow and Shrink * Implement MariaDB Clustering * Fixed wrongly used assertEqual * Use assertEqual instead of assertTrue * Add support for root functions to Cassandra * Implement DBaaS Ceilometer Notifications * Updated from global requirements * Server side of module maintenance commands * Excessive messages logged during unit tests * Fix test order and region in guest\_log scenario * Pass datastore details when required (again) * Update setup.cfg entry points for oslo namespace-less import paths * Remove usage of WritableLogger from oslo\_log * Updated from global requirements * root enable for pxc clustered datastore * Enable cluster tests for Redis * Updated from global requirements * Add backup & restore for Cassandra * Implement Cassandra Configuration Groups * Disable unsupported root-disable tests * Fix MySQL user list pagination * Implement user functions for Cassandra datastore * Fixed test\_prepare\_mysql\_with\_snapshot failure on rhel 7.2 * Malformed user access sql for postgres guest agent * Code cleanup - commented code * Move 'enable root on prepare' to the base * Handle bad Swift endpoint in guest\_log * pxc grow/shrink cluster implementation * Fix Percona XtraDB Cluster guest to work with v5.6 * Use uppercase 'S' in word "OpenStack" * Add root-actions int-tests * Use OSprofiler options consolidated in lib itself * Use correct depends\_on decorator for log tests * Add support for root-disable * Updated from global requirements * Unittests use trove\_testtools * Redis should perform backup using BGSAVE not SAVE * Remove slave\_of reference from scenario tests * Python3: Add support for httplib, urlparse * Python3: Add support for iter.next * Fix issue of mismatched test-requirements.txt file * Add support for configuration groups in int-tests * Updated from global requirements * Enable Vertica load via curl * Fix apply configuration on prepare * Fix leaked mocks in 'test\_dbaas' * Implement Guest Log File Retrieval * Fixed a typo in log message * Make scenario-tests work with all datastores * Updated from global requirements * Add missing sudo for systemctl command * MariaDB GTID Replication * py3: Replaces xrange() with range() * Imported Translations from Zanata * Make Trove exists events notifs be emitted in current audit period 5.0.0.0b2 --------- * Updated from global requirements * Update requirements.txt * Drop python 2.6 support * Updated from global requirements * Revert Skiptest from change set 245845 * Use built-in function setattr() directly * Time to get rid of most vestiges of slave\_of * Cleanup trove debug and info logging messages * Fix indexing of dict.keys() in python3 * Replaces itertools.izip with six.moves.zip * Updated from global requirements * Sometimes prepare messages are delayed * Add debug testenv in tox * Test: make enforce\_type=True in CONF.set\_override * Code refactoring for couchdb * Added Keystone and RequestID headers to CORS middleware * MongoDB cluster grow failing in get\_admin\_password * Marking downgrade as deprecated * Mongo cluster grow - use az and nic values * Updated from global requirements * Register additional datastores for int-tests * PostgreSQL configuration groups * Fixed 'client connection lost' bug in test\_helper * Use assertTrue/False instead of assertEqual(T/F) * Fix to\_gb & to\_mb conversion function rounding issue * Trivial: Remove meaningless default value in \_\_get\_\_() * Trivial: replace deprecated \_impl\_messaging * Change assertTrue(isinstance()) by optimal assert * Fix missing value types for log message * Fix 'cannot access' error with Redis restore * Updated from global requirements * Using LOG.warning replace LOG.warn * Trivial: Remove vim header from source files * Keep py3.X compatibility for urllib * Define 'device\_path' for Redis * Experimental datastores use new service commands * Finish cluster int-tests * Add MySQL int-test helper client * Move rabbit conf settings to separate section * Remove updateuser test * Replace assertEqual(None, \*) with assertIsNone in tests * Modifying Vertica clusters to have a master node * mock out the guestagent write\_file call * Use a specific network for the test * Move to oslo\_db * Deprecated tox -downloadcache option removed * Move storage strategy code from guestagent to common * Catch all errors in Mock detector * Updated from global requirements * Use configured datastore on instance creation * Mongodb's "security.authorization" wrong type * Remove version per M-1 release instructions 5.0.0.0b1 --------- * updating with other reno changes * Change reporting of unhandled logging * Updated from global requirements * Fix race condition in replication backup delete * Update Trove Installation guide * Add better input checking for MongoDB * fix mongo create database * Fix PostgreSQL root functions * Port run\_tests.py to Python 3 * Fix configuration lookup failure * Do not use api-paste.ini osprofiler options * Delete python bytecode before every test run * Add 'volume\_type' parameter to instance create * Refactor the datastore manager classes (more) * Updated from global requirements * Update internal status when waiting for a change * Move MongoDB mongos upstart script to elements * Remove kombu as a dependency for Trove * Correct the computation of elapsed time while waiting for state change * Fix tox py27 error * Guestagent configuration tests mock chown/chmod * Correct errors resulting in "No handlers ..." error message * Added CORS middleware to Trove * Updated from global requirements * Move ignore\_dbs and ignore\_users out of DEFAULT * Redis 'hz' conf parameter using wrong MIN value * Add reno for release notes management * fix the version info of trove to use pbr * Defer revision dir initialization in GA * Imported Translations from Zanata * this should fix the failing gate * Address issues with Trove eventlet monkey-patching * Refactor the datastore manager classes * Replace assertEqual(None, \*) with assertIsNone in tests * Incorrect usage of python-novaclient * Use oslo\_config new type PortOpt for port options * Updated from global requirements * Changes names of some quota values * The verbose option in section [DEFAULT] has been deprecated * root\_on\_create for Couchbase should be false * Pagination limit code refactor * Fix example value for notification\_topics * oslo.utils 2.6.0 causing tox test to hang * Add .eggs/ to .gitignore * Adding more doc strings to event simulator * Fix the bug of "Error spelling of 'AMPQ'" * Corrected error message for unsupported datastore flavors * Add unspecified options to backup unittests * Fix redis cluster unit test for assertRaisesRegexp * Unused variable backup\_cmd removed * Add instance create int-tests 4.0.0 ----- * Fix promote for Redis datastore * Fix publish\_exists\_event authentication exception * Fix publish\_exists\_event authentication exception * Imported Translations from Zanata * Fix the bug of "Fix spelling typo in trove" * Use IPOpt to validate IPAddress * Use stevedore directive to document plugins * Cleanup of Translations 4.0.0.0rc1 ---------- * Allow more instances than cluster\_member\_count for pxc * Open Mitaka Development * Updated from global requirements * Fix promote for Redis datastore * Fix Mongo report\_root call to have correct args * Enable deploying Trove instances into single tenant * Add support for extended\_properties for clusters * Fix Postgres services management * Couchbase cluster-init command needs credentials * Properly patch \_init\_overrides\_dir in MongoDB * Fix get\_flavors test by sorting json output * use the legacy\_compute v2 api for nova instead of v2.1 for now * Updated from global requirements * Fix typos (from "UPD" to "UDP") * Root enablement for Vertica clusters/instances * Disable MongoDB cluster security * Add user and database actions int-tests * Mongodb Cluster Scaling * Fixes the mgmt.test\_datastores errors * Percona Xtradb Cluster implementation * Add support for Redis replication * Redis Cluster Initial Implementation * Expect ValidationError on an empty user update * Revert change set 217881 * Add support for MariaDB datastore in Trove * Test instance name can not have special charactrers in it now * Add Redis backup/restore functionality * Fixed redeclared CONF = cfg.CONF 4.0.0.0b3 --------- * Fix instance from alternating status on create * MongoDB backup uses "nogroup" which is OS specific * MySQL Manager Refactor * Removing unused dependency: discover * Increase test timeout for instance creation * Enable all trove services by default * Add generic int-test classes * Initialize directory for Mongo's runtime files * Fix not to output confusing message in tr-tmgr.log * Word spellings have been corrected * Update ignore\_dbs for MySQL 5.6 * Implements Datastore Registration API * Fix description for "Inapt spelling of a word" * Fix race conditions in config overrides tasks * MongoDB create\_admin\_user not authorized * Mongodb storing config overrides in /var/run * Updated from global requirements * Updated from global requirements * Associate flavor types with datastore versions * Fix a few typos in log messages and comments * provide default port for pydev\_debug\_port * Updated from global requirements * Configuration Groups for MongoDB * Adds the PATCH method to extensions * Imported Translations from Transifex * Use oslo.log library instead of system logging module * Updated from global requirements * add a missing i18n import for backup strategy * MongoDB databases and users not created on create * MongoDB user management - access grant/revoke/show * Imported Translations from Transifex * User name\_string schema limited to 16 chars * Have devstack plugin install Trove client * Notifications for exists events need nova remote admin url set * MongoDB database management features * Improve the guestagent configuration manager * Updated from global requirements * Adds lower\_case\_table\_names support for MySQL * MongoDB cluster instances missing 'key' * Cluster instances could falsely report 'ready' * MongoDB user management - root enable/show 4.0.0.0b2 --------- * Imported Translations from Transifex * Remove openstack.common package * Switch to the oslo\_log library * Updated from global requirements * Switch to the oslo.serialization library * Switch to the oslo.context library * MongoDB cluster taskmanager's add\_shard not called * MongoDB prepare needs to wait for Mongo to start * MongoDB cluster strategy missing create\_admin\_user * Configuration Groups for Redis * Switch to oslo.service * Which interfaces trove starts up on should be logged * Updated from global requirements * MongoDB user management - create/list/show/delete * correct some grammar * Remove H305,H307,H402,H407,H904 * Updated from global requirements * Support authentication in the MongoDB guest agent * Imported Translations from Transifex * Allow int tests to run in SSL environment * Fix unit test mocks for new mock release * default for percona in notification\_service\_id * Updated from global requirements * Updated from global requirements * Added replica\_of attribute to test\_index\_list * Fake mode service does not start after the changes to the oslo service package changes * Support nics and AZ for MongoDB clusters * Removed the non-existent method call * Fixes the tests in test\_configuration.py * Updated from global requirements * Fix ssl.PROTOCOL\_SSLv3 not supported by Python 2.7.9 * Provide option to read SSH credentials from test env * Fixed the tests in test\_models.py * Make test\_ensure\_mysql\_is\_running more robust * Fixes the failing unit-tests * Implement guestagent Configuration Manager * Move mysql datadir to a sub-directory on mounted volume * Fixes the method update\_datastore 4.0.0.0b1 --------- * Update version for Liberty 4.0.0a0 ------- * correct the annotation of param * Fixed API string references to MySql * Fixes the failing tests in mgmt/test\_models.py * Remove nova\_proxy\_admin\_user from trove guest * PostgreSQL guest agent can't remove temp file * Notification serialization of context * Implements integration tests for Vertica * Updated from global requirements * MongoDB single instance backup and restore * Sent in the topic when taskmanager setup * Decrease replication slave retry wait time * Adds the missing import to manage.py * Fixes db\_upgrade and db\_downgrade methods * Updated from global requirements * Added dangling mock detection to 'guestagent' * Remove rsdns directory * Fix leaked mocks in the 'MockMgmtInstanceTest' * Update devstack to handle Trove/Neutron setups * Fix leaked mocks in the 'LimitsControllerTest' * correct api schema for instance patch * Fixes a broken debug message in schema matching * Updated from global requirements * Fix create Vertica cluster or instance to show ERROR on failure * Fixes hacking rules * Move guestagent settings to default section * Fixes the unsafe global mocks * Fix leaked mocks in the 'guestagent/test\_api' module * Improving manual install docs * Error message missing tenant id * Fix gate failure on gate-trove-pep8 * Added more unit-tests to Vertica-Cluster-Strategy * accepting network and availability zone for instances in cluster * Fixed the unmocked entry in taskmanager unit-tests * Update modules to turn on dangling mock detection * Updated glance API for creating public image * Updated from global requirements * Added more unit-tests to taskmanager * Added unit-tests for mgmt-cluster-service * Updated from global requirements * Added dangling mock detection to 'mgmt' module * Added dangling mock detection to 'conductor' * Added dangling mock detection to 'taskmanager' * Added dangling mock detection to 'secgroups' * Fix leaked mocks in the 'upgrade' module if any * Added dangling mock detection to 'backup' module * Added dangling mock detection to 'dns' module * Added dangling mock detection to 'cluster' module * Drop use of 'oslo' namespace package * Fix leaked mocks in the 'common' module if any * Implement dangling mock detector for unittests * Imported Translations from Transifex * Fixes the unsafe mocking in test\_dbaas * Abstract 'mkdir' shell commands in guestagent * Added more unit-tests to guestagent * Corrects order of parameters to assertEqual * Fix process attribute check in BackupRunner * Fix Mongo status check to work for Mongo 3.0 * Updated from global requirements 2015.1.0 -------- * Remove [Service] section from mongo config tmpl * Remove [Service] section from mongo config tmpl * Add unix\_socket\_directories setting for pgsql * Support testing on 32 bit systems * Fixes unit-tests in test\_dbaas.py * Release Import of Translations from Transifex * Fixes mocking of operating\_system.service\_discovery * Added more unit-tests to Vertica * update .gitreview for stable/kilo * Abstract 'mv'/'cp' shell commands in guestagent * Abstract rm/chmod shell commands in guestagent * Updated from global requirements * Fixes the rollback of flavor resize for couchdb, db2 & vertica 2015.1.0rc1 ----------- * Update openstack-common reference in openstack/common/README * Update redis system.py paths for current RHEL/CentOS/Fedora * Open Liberty development * add devstack plugin * Fixes config templates for mysql & percona * Remove ordereddict from requirements.txt * Make integration-tests run quicker * Adds rpc\_ping method to new datastores * Use UTC to compute heartbeat age in eject-replica-source * Avoid unnecessary restart of replication master * Fixed NotificationTransformer to handle missing InstanceServiceStatus * Moves taskmanager-common-code for clusters * Fixes the resize APIs for Vertica-guest * Use oslo util to determine network interface * Update Trove to use novaclient v2 * Fix DB2 unit test to properly mock restart * Rewrites the ClusterView.build\_instances * Eject-replica-source chooses most recent slave * Add short document on building guest images * Fix replica source state validation * Add support for DB2 datastore in Trove * Reject negative volume size in API * Updating Flavor Resize Restrictions * Fix CouchDB unit test to properly mock restart * Fix common misspellings * Implement clustering for Vertica datastore * Corrects list\_database query for MySQL * Includes snapshot parameter to vertica & couchdb * Move sql\_xx params to [database] conf section * Corrects my.cnf location in RHEL based distros * Fix the mocking in test\_dbaas.py * Updated from global requirements * Remove flaky assert from TestMgmtInstanceDeleted test 2015.1.0b3 ---------- * Add support for HP Vertica datastore in Trove * Replication V2 * Add Mgmt API For Testing RPC Connectivity * Enhance Mgmt-Show To Support Deleted Instances * Add support for CouchDB datastore in Trove * Update config-detach to not remove default config * bypass\_url required in nova admin client * Change nova\_proxy\_admin\_tenant\_name to id * Imported Translations from Transifex * Cleaned up redundancy between instance update() and edit() * Inject guest conf files to configurable location * Updated from global requirements * Updated from global requirements * Introduce a classification of datastores and strategies * Remove now obsolete tox targets * Updated from global requirements * convert the max and min values to int instead of string * Fixes package configuration method in pkg.py * Delete mysql error log file as root on restore * Changed error message for datastore parameter for configuration-create * Correct a test and order of parameters to assertEqual * Do not use '/tmp' as default guestagent log location * Updated from global requirements * Fix PostgreSQL volume definitions * Create docs test target * Integrate OSprofiler and Trove * Provide more readable error message when swift is not installed * Updated from global requirements * Use canonical MySQL root pwd reset procedure * Deletes volume on instance delete after resize * Imported Translations from Transifex * Update cassandra.yaml ownership after write\_config operation * Updated from global requirements * Update CONTRIBUTING.RST file * resync oslo-incubator code * Updated validation\_rules.json due to MySQL doc 2015.1.0b2 ---------- * Added 'redis' test group to int-tests * Changed hardcoded Mongodb username to variable * Fix backup state check while restoring an instance * Updated from global requirements * Add limit stanzas for mongos * Update DatastoreNotFound status code to 404 * Move cluster strategies to strategies/cluster * MySQL restore wait for shutdown before killing * Updated from global requirements * Correct calls to mask\_password() which no longer work * Spelling errors fixed * Eliminate redundant modules from oslo-incubator * Address predictable temp file vulnerability * Imported Translations from Transifex * Fix trove-tox-doc-publish-checkbuild failures * Quote Postgres names to avoid implicit conversion * Trove create with --backup fails for postgresql * Obsolete oslo-incubator modules - processutils * Use dict comprehensions instead of dict constructor * Fix MongoDB guest strategy implementation * Using consistent control\_exchange option for rpc * Obsolete oslo-incubator modules - wsgi * Obsolete oslo-incubator modules - exception * Integration with oslo.messaging library * Obsolete oslo-incubator modules - gettextutils (now oslo.i18n) * Support string flavor IDs * Adds negative unittests to test\_backup\_controller.py * Use unit file to enable systemd service * Obsolete oslo-incubator modules - jsonutils (now oslo.serialization) * Obsolete oslo-incubator modules - timeutils 2015.1.0b1 ---------- * Updated from global requirements * Adds negative test to test\_instance\_controller.py * Enable volume resize tests * Assign os\_region\_name a default value * Fix trove resize-volume resize2fs error * Obsolete oslo-incubator modules - unused modules * Updated from global requirements * Clean up github references from docs * Fix timeout in test\_slave\_user\_removed int-test * Workflow documentation is now in infra-manual * Obsolete oslo-incubator modules - importutils * Eliminate duplicated LoopingCall and LoopingCallDone code * Forbid replica provisioning from replica site * Legacy MySQL datastore is shown on datastore-list * Add missing api example for incremental backups * Config Group Load Fails If DS Version Inactive * Remove Python 2.6 classifier * Rename attrs\_exist() to contains\_allowed\_attrs() * Add few audit log messages to guestagent module * Ensure Replication Tests do not use a stale token * Fix broken instance provisioning with disabled volume support * Updated from global requirements * Poll for replica read\_only status in test * Updated from global requirements * Create example generator * Rename generic variable named with mysql specific name * Deleting failed replication backup can hide error * Increase instances.task\_description column size * Fix exception handling in get\_replication\_snapshot * Update and correct documentation snippets * Updated from global requirements * remove keystonemiddleware settings from api-paste.ini * configuration parameters payload changed * Eliminate use of sudo for two things that don't need it * making service catalog for endpoints more configurable * Added regression test for config with long value * Imported Translations from Transifex * Couchbase backup failing * Added Replication templates for Percona * Updated from global requirements * Updated from global requirements * Check for server attributes before using them * Validate backup size during restore * Couchbase Root Password Can Go Out Of Sync * Document that H301 and H306 are ignored on purpose * Instance-Delete Should Stop the Database First * Configuration group checking 0 validation rules * Update some log calls for translation and lazyness * Yum install should get a list of packages as a string * Imported Translations from Transifex * Allow users the ability to update an instance name * Updated from global requirements * Logging audit for guestagent/redis * Miscellaneous Cluster Fixes * Update config.template for Cassandra 2.1.0 * Increase test rate limit to avoid rate limit error * add back the deleted parameter in the configuration group tests 2014.2 ------ * Logging audit for guestagent/mongodb * Cluster Error On Missing Volume Sizes Unoptimal * cluster\_config argument missing in prepare() * restart\_required cfg-param is bool and not string * Update contributing.rst to include guidelines on Code Review * cluster\_config argument missing in prepare() * restart\_required cfg-param is bool and not string 2014.2.rc2 ---------- * convert restart\_required to 'true' or 'false' string * fixing the flags for guest on resize volume * convert restart\_required to 'true' or 'false' string * Refreshed translations * Mark strings for translation * Imported Translations from Transifex * Removing dependency on trove models in the guest agent * Mgmt Reboot allowed if datastore in crashed state * Docs: Fix Sphinx warnings 2014.2.rc1 ---------- * Use unique passwords for replication user * Add templates for replica and replica source * Open Kilo development * Mandate detach replicas before deleting replica source * Event simulator II * Complete mocking for test\_extensions * Make the replication snapshot timeout configurable * Use different timeouts for create and restore * Partially address concerns in Couchbase restore strategy * Updated from global requirements * Discover config file for mongodb * Isolate unit tests from integration tests data * Deprecate unused entries in cfg.py * Sync latest process and str utils from oslo * Mark trove as being a universal wheel * Document Trove configuration options * Add postgresql to notification\_service\_id option * loading configuration parameters from trove-manage better * Fixed database migration script issues * Updated from global requirements * Fix config parameters test for fake mode * Delete backup created during replication * Make --repo-path an optional argument for db\_recreate * Imported Translations from Transifex * Stop using intersphinx * Fix NoSuchOptError on Couchbase create * Strengthens the regex for mongodb json functions * Add SUSE support in mysql datastore * Add support to detect SUSE * Register postgres\_group in trove config * Marks mysql slave read-only * Fix issue with intermittent test failures in test\_pkg.py * Updated from global requirements 2014.2.b3 --------- * Datastore Configuration Parameters stored in db * In some cases, guest agents may leave temporary config files * Add detach-replica support * make backup\_incremental\_strategy a datastore specific option * Use 'replica' instead of 'slave' * Fix unit tests to work with random PYTHONHASHSEED * Updated from global requirements * Clusters Guest Implementation * Clusters TaskManager Implementation * Clusters API Implementation * Avoid leaking mocks across unit tests * Fixed restore to work correctly with pexpect * Add PostgreSQL support * Cleaned up sample trove-guestagent.conf * Imported Translations from Transifex * Removing the XML info from the docs * Add replication slave info to instance show * Look up trove instance by ID instead of name * Snapshot component for replication * handle repeating mysqld options containing equals * Set the python hash seed that tox uses to 0 * Use netifaces to lookup IP address on guest agent * Added the bind\_host configuration option when launching the API * Move usage\_timeout out of guest options * Add new checklinks tox environment * show stdout/err from failed command execution * Mysql guest agent functionality for replication * Adjusted audit logging for trove.instance module * Unit Tests for Mysql replication functionality * Load trove API extensions using stevedore * allow both ipv4 and ipv6 hostnames by default * Update 'list\_users' call to use AGENT\_HIGH\_TIMEOUT * Adjusted audit logging for taskmanager module * Correct monkey patching in GuestAgentBackupTest * guestagent/mysql: Remove unused function * recent audit log change broke this LOG.debug message * Mocks utils.execute\_with\_timeout for mongodb tests * Imported Translations from Transifex * guestagent/test\_volume.py leaves a file in /tmp * Imported Translations from Transifex * Per datastore volume support * Logging audit for trove/mysql module * Sync service.py from oslo-incubator with deps * Updated from global requirements * Remove accounts use of non-existent Nova extension * Logging audit for trove/guestagent/datastore module * Logging audit for guestagent/couchbase * Logging audit for trove/guestagent module * fix pexpect.spawn.match AttributeError * Logging audit for guestagent/cassandra * Make configuration tests configurable by datastore * Handle error from execute() when deleting non-existant file * document running a small set of tests * guestagent/volume: Remove not necessary sudo call 2014.2.b2 --------- * Logging audit for guestagent/strategies module * Add neutron support * Imported Translations from Transifex * Logging audit for trove/guestagent/backup module * Use auth\_token from keystonemiddleware * guestagent contract for packages should be a list * Make default extension path relative to pybasedir * Refactored datastores to use common chown function * Handle exception from pexpect child.close * Moved core int-test groups from trove-int to trove * Add Backup/Restore support for Couchbase * Manual install page needed swift info * Restrict backup-list on instance to tenant * Enhance trove-manage help * Imported Translations from Transifex * Fixes redundant get call in a few places * Remove setuptools\_git requirement * Fix backup execution workflow * Add datatore/version name into configurations response * Imported Translations from Transifex * Stop cassandra during configuration and volume migration * Enable usage of config-drive for file injection * Enable trove to specify cinder volume\_type when creating a volume * Change default for update\_status\_on\_fail * Add instance IP to /instances * Fix updated timestamp for SecurityGroup model * Updated from global requirements * Use (# of CPUs) api/conductor workers by default * Imported Translations from Transifex * Fixed '--version' for trove processes/utilities * Logging audit for trove/common module * Imported Translations from Transifex * Fix tracking of SG's provisioned by Heat * Add CONTRIBUTING.rst * Imported Translations from Transifex * Updated from global requirements * Logging audit for trove/db module * Remove redundant heat templates from codebase * Imported Translations from Transifex * Imported Translations from Transifex * Sync processutils from oslo with deps * Sync jsonutils (and dependencies) from oslo-incubator * Add timestamps and instance count to config groups * Expose trove conductor manager class as conf property * Updated from global requirements * Deleting incremental backup metadata after restore * Fix data too long for column 'task\_description' * Logging audit for trove/backup module * Corrects typo in instance models and service * Logging audit for trove/configuration module * Add guestagent API calls for replication * Imported Translations from Transifex * Fix typos in trove/instance/models.py * Imported Translations from Transifex * Added route for Admin API to support guest upgrade * Add sample admin\_{user,tenant\_name,password} * Delete undeclared variable in guest-agent API class * Fix enable on boot when working with systemd and symlinked units * Add slave\_of\_id to instance model * Updates developer install doc to use trove cli * Imported Translations from Transifex * Reverting deleted tests * Add datastore version to backups * Fix inheritance for ConfigurationParameterDeleted * Add warnings when a path for api\_extensions\_path does not exist 2014.2.b1 --------- * Add a new column and indexes to agent\_heartbeats * Ensure routing key is specified in the address for a direct producer * Added an int-test for user-update-attributes * Adds exception handling around heat stack creation * Make default extension path relative to pybasedir * Increase time\_out to 30 sec for failing int-tests * Use tcp/udp ports from config for heat templates * Fix calls for proboscis methods * Apply six for metaclass * Increased stop timeout for cassandra * Wait for Couchbase to be ready before node-init * Enabled H402 flake8 rule * Fix log reporting for DNS creation process * pep8: Ignore locale files * switch from mox to mox3 * Include datastore version details in datastore calls * Adds the foundation for datastore capabilities * Remove unused xml config file * Reports enabled-root for restored instance * Clean up openstack-common.conf * Corrects the class name "ExtensionManager" * Corrects mgmt-taskmanager startup method * Remove admin\_token from configs * Cleans up ServiceUser.\_\_str\_\_ method * Imported Translations from Transifex * Update database-api to follow OpenStack conventions * Remove all mostly untranslated PO files * Updated from global requirements * Consider datastore version when generating configs * Fix heat template for cassandra * debug level logs should not be translated * Imported Translations from Transifex * Specify correct constraint name for postgresql * Get service endpoints from catalog * Added support of resize-flavor to Redis datastore * Fix prepare call for redis guest agent * Migrate v20 not handling exception * Datastore\_versions.name shouldn't be unique * Pretty print JSON sample files * Added fix to support Couchbase resize-flavor * Imported Translations from Transifex * Fix datastore tests so that they pass in live mode * Add support for 'trove root-enable' in Couchbase * Fix missing use of test config datastore version in tests * Use six.StringIO/BytesIO instead of StringIO.StringIO * Resolves volume resize issue * Fail a test if time.sleep called for no reason * Improve help for backup\_incremental\_strategy * Updated from global requirements * Imported Translations from Transifex * Unmounting ephemeral if it has already been mounted * Corrected the assert message in test\_dbaas.py * Improve help strings * Imported Translations from Transifex * Remove usages of deprecated name LoopingCall in rpc * Check that all po/pot files are valid * Correct inconsistent state issues with config * Mocks out file.open to not rely on OS dependent files * Make sure eventlet starts correctly * Moved the apidocs from openstack/database-api * Bind to all interfaces for MongoDB * Added separate rate limit setting for mgmt POST * Fix Couchbase Kill Command * Updated from global requirements * changing conductor logging levels * Trove doesn't use extras * Correct the command to stop cassandra server * Remove dependencies on pep8, pyflakes and flake8 * Fix internal error generated from config-detach * Collapse mysql OptGroup Sections * Changed assert statement of test\_volume\_found 2014.1 ------ * Open Juno development * Fixed unit test to not actually run backup command * Fix create call for security group rules * Updated from global requirements * Remove mockito, and replace with mock * Add heat.template for Redis datastore * Don't specify admin\_token in paste config * Changes Volume Prefix From mysql To datastore * Tox tests should not require errors in rigid order * Start using oslosphinx theme for docs * Imported Translations from Transifex * Pop instead of get for timeout kwarg * Change Cassandra to Service Start vs Bin * Setup trove for translation * Fixed unit test to not actually run backup command * Added Backup/Restore validations * Update sqlalchemy migrate scripts for postgres * Remove IDENTIFIED BY clause for MySQL REVOKE * fix default rabbitmq configuration values in sample cfgs * Fix Timestamp diff in Instance and Notification * Improve Datastore Not Implemented exceptions * Hide Datastores In List With No Active Versions * Removes volumes redefinition in fake nova\_client * Add timeout on prepare call per datastore * Parses default configuration of an instance * Make hostnames in designate driver all lower case * Fixed Instance Status usage * Fixes insecure update of /etc/fstab file * Don't run main() on import * Fixed backup unittests to use mocked cmd * Removes extra initialization from config * Test restore full and restore incremental * rename and fix the db\_wipe command * Remove Min/Max for Configuration Group Booleans * Root\_on\_create per datastore * Removes XML api from trove * Increases timeout for guest agent resize\_fs * Update Oslo wiki link in README * Adding missing indexes for trove db * Improve readme.rst 2014.1.b3 --------- * Initial support for single instance MongoDB support * Fix inconsistent usage of mount\_point * Adding percona secgroup config * the check for status.status should happen before trying to log it * simplify dns\_ip\_address code * Add security group rules for Couchbase * Remove unused variables * Fixes restore from incremental backups for Percona * Initial support for single instance Couchbase * Make backup/restore pluggable * Security groups workflow update * Remove extraneous vim configuration comments * Fixes get configuration defaults * Adding "version" to "datastore" field during list instances API call * Fixes a race condition in resize flavor for Percona * Raise max header size to accommodate large tokens * Call debug\_utils.setup() in all entry points * Use six.moves.urllib.parse instead of urlparse * Use consistent timeouts for integration tests * Rename Openstack to OpenStack * Fix Redis After Configuration Group Merge * Initial support for single instance Cassandra Database * fix traceback when default\_version is not in the database * Fixes a race condition in resize flavor * Raises BadRequest for grant\_access on ignore\_dbs * Corrects service URLs from "%s/%s/" to "%s/%s" * Remove unused admin\_context from conductor manager * Improve help strings * Modifying tests to run with different configurations * Adding additional datastore tests * Adding Incremental Backups * Ignore outdated messages sent to conductor * Adding override.config.template for percona * adding configuration group support * Remove copyright and comments from empty files * Fixes resizes for volumes attached to active Nova servers * Add clearer help kwarg for max\_volumes\_per\_user * Make test mysql connection code less confusing * Corrects matching of heat resources status * Migrating trove to entry points * Changing DNS to pass string to driver * Fix default\_datastore migration script * Add Neutron support * Simplify swift storage load logic * Makes the backup tests less onerous * Corrects help messages in cfg.py * Fix Occasional test\_one\_network\_label\_exact Fail * Replaces local generate\_uuid with common.utils * Disable redundant DB initialization on guesagent's start * RootReport through Taskmanager * Changes encoding from 'ascii' to 'utf-8' * Adds exception handling to heat code * make datastore\_version\_id required 2014.1.b2 --------- * Mask database user's password in trove logs * Datastores improvements * changing the routes for the api to be clear * Edits on help strings * Validate databases for user grants * Spelling correction in taskmanager/models.py * Adds Missing \_() for Exception Message Translation * Add Volume Resize filesystem size assertion * move datastore manager to the datastore version * Remove copyright from empty files * Replace oslo.uuidutils module with trove.utils * Updates oslo.context * Relocates generate\_random\_password to common.utils * Fixup indentation errors found by Pep8 1.4.6+ * Fixed misspelled help message in cfg.py * Log service\_status.status instead of service\_status * Added Redis Crud Operations * Removes deprecated usage of BaseException.message * Enabled F403 flake8 rule * Update for datastore tests to support multiple datastore types * Make use of IP filtering when creating DNS records * Instance details view shows hostname (if it has it) or IP * Using base32 encoding to generate DNS records * Removes unused "server" from taskmanager * Add volume total to Mgmt Instance Get * Adds LOG in event\_simulator.py * Removes BuiltInstanceTasks.get\_volume\_mountpoint * Adds tenant\_id to guest\_info in heat code * make the bin scripts called with main() * Removes directory creation prior to \_tmp\_mount * Cleans and improves heat code path * server identifier corrected * Corrects the help message for trove\_volume\_support * Adds non-volume-support to create server with heat * Adds missing dependency to the documentation * Removes privilege modification for MYSQL\_BASE\_DIR * Changes command to create the directory * Cleans the method init\_engine * don't try to concatenate a string with a variable that can be None * Removes dead code from trove/tests/api/root.py * Acknowledge Nova VERIFY\_RESIZE as RESIZE state * Properly mark a backup as FAILED if the process failed * Moving storage logic out of backup runner class * Paginate backup list api * Updates trove-guestagent.conf.sample * Enabling H403, H702 rules * Fix deleted\_at timestamp in usage tests * Fix mysqldump backup and restore * check for invalid hostnames on update\_attributes * Add -U to pip install command in tox.ini * use\_stderr should be False for tox tests * Fixed typos in files in trove/guestagent * Updated from global requirements * Fixed PEP8/flake8 issues * Replace "tokenauth" by "authtoken" in configuration files * Update tox.ini to use new features * Removing IDE specific ignores * Extract suffix from req URL to avoid escaping dots * Unittest Case for get\_user * Added ability to pass custom command-line options to backup runner * Updates tox.ini to remove suppressed H401 * Conductor Does Not Default to Verbose/Debug Logs * Fixed misspellings of common words 2014.1.b1 --------- * Removes unused import statements * db: make compatible with SQLAlchemy 0.8 * Removes security\_groups from \_create\_server\_volume\_heat signature * setting fake mode to print startup errors * Conductor proxies host db access for guests * Clean up zombie processes on backup failure * Corrects heat template for mysql * Adding designate dns support to trove * need to mock out of the clear\_expired\_passwords for tox * Remove radmin credentials from create\_heat\_client * Allow query logging * Fixing typos in \_create\_server\_volume * Add default case for mysqld\_bin * Add support of datastore types * User-Create Host Does Not Allow Wildcarded Octet * Externalization of heat template * Update openstack/common/lockutils * Added logging in taskmanager models && \_() strings * Fix action\_result check * Convert to a more modern form of openstack-common.conf * Update tests to work with the latest testtools * host response attribute should prefer 'host' instead of 'hostId' * Add a hook for backup processes to check if successful * Replace GUID with generated password * Add optional ip address filter for hiding ips * Fix checksum verification exception * Fix bug in Security Group association to Instance * Fixes trove-api daemon fake mode functionality * Add Backup/Restore Checksum Validation * Fix white\_box mode for integration tests * Add tests for hosts mgmt commands * Security groups workflow update * Fix User ID in Context and Notifications * Fix service\_type in instance creation using heat services * Simulates events so run\_tests.py executes 10x faster * Moved create\_instance DNS attempt to post-prepare * Provide service type management code structure * Fixes pagination with non-string markers types * Replace deprecated method aliases in tests * Quote and escape pagination markers * Update statuses on GA timeout * Fixing restart tests * Update trove-guestagent.conf.sample * Add GA related parameters to trove-taskmanager.conf.sample * Modifying tests to use the compat client * Task manager refactoring done * Fix Timestamps for Resize Usage Events * Service Registration using conf file * PEP8. E125, F811, F401, H703 * Allow service\_id per service\_type for Usage Events * Fix quota issue where usages can drop to negative value * Fix the fake nova server implementation * Add tenant id to guest\_info file * Remove Duplicate trove\_auth\_url Property * Adding location attribute to Fake Backup object * Correct the fake implementation of UsageVerifier * Extract generic part of GA code from MySQL specific modules * Allow early host % on validate * fixing symlink attack bug present in the application * Volume timeout is set too low * Update from global requirements * Added server\_id to my.cnf template logic * Fixed method signature \_create\_server\_volume\_heat * PEP8. F841 * Require oslo.config 1.2.0 final * Pydev remote debugging support added * Vote for channel logging * Duplicate Import Statement * User and Database List in Create Not Validated * Support Optional Super User in Instance Create * oslo-incubator rpc update * Replace OpenStack LLC with OpenStack Foundation * Allow optional availability\_zone to be passed * Rename webtest to WebTest * Set sane defaults for required conf params in trove/common/cfg.py * PEP8 rules. H102,103,201 * Adds instructions for manual Trove installation * Fix and enable gating on H702 * Fixed Admin Auth Token in Notification * Fixed backups so that they no longer fail when backup size is > 2GB * Use LOG.exception in place of sys.exc\_info * Fixed directory create exec * Move ServiceStatuses from trove.instance to trove.common * Open Icehouse development * Mark sensitive cfg options with secure flag * Modify User Attributes API - Fix * Made apischema a bit less restrictive * Enclose command args in with\_venv.sh * Fix and enable gating on H703 - string localisation * Do not use locals() for string formatting 2013.2.b3 --------- * service\_statuses updated\_at Field Not Set * Increased unit test coverage * Support Security Group Name Prefix Customization * Implement resize volume method * Adds includedir back to templates * Implementing heat as an optional provisioning system * Add and associate security group rule with group * jsonschema upgrade to v1.3.0(as minimal) * Trove - service\_statuses status set to delete when instance deleted * Modify unit test to allow cli to merge * pep8 checking was added for bin folder * Update nova\_volume\_url to cinder\_url in CONF files * Fixed use of word separators in Notification * Changed system commands depends on OS version * Fix admin extension gives 500 when listing instances * Adds init file for routes directory * Ensure safe format strings for TroveError * Reindenting a function call in taskman models * Guest config file injection * Update oslo.cfg to >= 1.2.0 * Add instance cloudinit support * Fix resize volume stuck in "RESIZE" status * Add RPM class implementation * Use same Nova Client throughout Fresh Instance Task * Secure root password on restore from backup after root-enable * Fixing bug in Interrogator tests mocks * Fix bug with insecure instance on Prepare loss * Moves extension files to routes * Removing mycnf static files from contrib * allows a mgmt user to migrate an instance to a specific host * Configurable network labels used for showing IPs in instance details * Replace nova client with cinder client to use volume * Fix spelling of python * Adding volume size to the backup views/models * Modify User Attributes - name, host and password * Renamed secgroup description * Added docs and made template filename variable * Turns pkg.py into a system aware packager * Clear new or building backups on reset-task-status * Added param name to validation error messages * Fix drift in deleted timestamp for Notification and Database * Added developer documentation for Trove * Makes two tests wait for the instance to go ACTIVE 2013.2.b2 --------- * Change the swift file deletion to use the manifest * Create templated config files * Add service\_type to the instances table * Migrating the create/resize code to use flavor obj * Bump pbr to the version 0.5.16 * Make Volume conditionally required attribute in Instance Create Schema * Wildcards in User Host * Update to latest Oslo rpc modules * Fixing the broken API contract for flavor\_id * Restore should use pgrep instead of mysqladmin to check for mysql down * Python 3.x compatibility fixes * Adding instance id to the security group extension API * Enable disk usage view in instance detail view * API Validation for Trove API * Requirements: Remove factory\_boy, allow SQLAlchemy 0.7.10 * Fix Notifications * Update kombu library version requirement * GuestTimeout needed to be imported * Fixes my.cnf not reverting with a resize revert * Fix few missed issues with the reddwarf -> trove rename * Rename README to README.rst * Start using Hacking * Fix up trivial License Header mismatches * Package AUTHORS and ChangeLog file * Fixed backups GET and DELETE to restrict access to the owner of backup * Rename from reddwarf to trove * Add trove.egg\* entry in .gitignore * Fix faulty 404 errors when requesting bad versions * Change server exceptions to show less details * Changed instances of the name reddwarf with trove in the README * Adding support for encrypted backups * Remove explicit depend on distribute * Renamed repos to trove * Fixed restore to wait for full mysqld shutdown before attempting restart * Fix quota bug where it raises exception when resources are deleted * Adding Exists Event Publishing * Allow remote implementations to be overridden * Fixed race condition in the Restore workflow * Adding delete restore instance tests * Integer Flavor Ids * chmod 755 bin/reddwarf-mgmt-taskmanager * Renaming security group URL API * Updated to use normal requirements names * Migrate to pbr * Don't require an admin user if there isn't a need * Fixing delete backup * Adding missing config value * Use database name validation only on listing and loading of databases * setting up the right percona pkg to be used - bug1185205 * Adding a SERVICE\_REGISTRY for percona - bug 1185138 * Ephemeral volume support * Changes the conf files so redstack doesnt clobber them anymore * Fixed format string vulnerability in reddwarf call to OpenstackException handler * Added optional arg to launch child processes * Backup and Restore for reddwarf instances * Fixes some user and user-access call errors * Re-introduced support for rpc delete\_queue * Refresh Oslo code - add support for ssl * Migrate to flake8 * Add flavor API support for ephemeral * Stop granting users GRANT OPTION by default * Adding the start of notifications * Controller and API changes for backups * Refreshed Oslo Code * added malformed json tests * Makes the guest work for more than just mysql * Quota tests * Adding ability to run XmlLint for each API call * s/OpenStack LLC/OpenStack Foundation/ in Copyright * Remove unused authutils * Add Apache 2.0 LICENSE file * fixing taskmanager exception after migration is complete * Fixes test in response to optional hostname fix * Updating tests to use altered quotas show method * fixing the flavor tests * Addresses xml issues for quota unit tests * Adding a running method to the Backup Model * Stop the deprecated use of Exception.message to please Python * Adding checks when updating a quota * Added support for Security Groups via a new extension * Add snapshot ORM to reddwarf * Adds optional hostname to calls involving users * Addresses failing quota int-tests * Fixing the signing dir and cleaning up the paste config * fix coverage for jenkins - temp fix * Fix for missing quota property for int tests part1 * Adding instance ID to DNS error message * Adds absolute limits to limit API call * update MANIFEST.in to include specific scripts in etc/reddwarf * Ensure to\_dict() returns auth\_tok * Pinning proboscis to a specific version * Test no accept headers * Update test-requires to point to rd cli tarball * Restoring the ability to run fake mode locally * Added tests for Modify User Call * Add python-keystoneclient to deployment dependency for reddwarf * Tests the API in XML mode * Refresh setup.py from Oslo * Rate limits implementation * percona image for reddwarf * Quota feature * Store the context in the local.store * Use tarball for python-reddwarfclient dependency * Fixes data type bug in get-user call * Joins oslo dict values with reddwarf values * Fixing run\_tests.py so it emits an error upon failure * remove the namespace\_packages from setup.py * Implement alternate root privileges * Change default service\_type in tests to 'database' * Modify-user features * Added the cover directory to .gitignore * Specify Python 2.7 for cover environment in Tox * Ignore .testrepository directory * Prevent Upstart from restarting guest in Restart tests * Adds reset-task-status mgmt api instance action * Add missing Import * Fixing the coverage reports for the unittests and adding support for TestR * Adding a config option for running XML client * Add more unittests to guest agent * Negative Taskmanager Resize/Migration fixes * Add unit tests for guestagent.db.models.py * Fixing race condition during instance deletion * testr unit tests for versions api * Add unit tests for guestagent.api.py * Add unit tests for guestagent.volume.py * Checks guest status during migration * create a test adapter for entering an instance and executing a cmd * Adding flags to ssh cmd to bypass host checking * More changes to facilitate oslo * Fix for bug where returned instance status in UnprocessableEntity exception is an empty dictionary * Consolidating multiple volume flags into a single flag * Adding guest agent pkg unit tests and running them with testr * Add unit tests for guestagent.models.py Delete guestagent.utils.py since it is no longer invoked anywhere * fix int-tests running with out volume * Fixing property for pid\_file from "mysqladmin --print-defaults" * Add unit test for guestagent.service.py and query.py * ADD unit tests for guestagent/manager.py * add back the mysql\_base\_dir variable to the guest * Check for 'reddwarf\_can\_have\_volume' * - switch to testtools - remove pep8 warnings - more unit tests for dbaas.py, covers create\_user, enable\_root - refactoring * Update oslo codebase within reddwarf * Avoid null pointer. Fix pep8 mystery problems * Fixed bug 1091927: Pep8 tests are broken for reddwarf * Adding some dbaas.py unittests * Avoids using guest mgmt API functions * Part 1: Create group for simple unit tests * Correcting a log message in resize code * Better defaults that fix volume support * After migration, don't null the instance flavor * Fixing DNS issues related ot changing db\_api * Updates tests to run in other configurations * Fixing the constant for mysql server validation in apt * Change validation to require non-blank name for instances * Fixes another fake mode eventlet bug * Always call 'spawn\_after' for eventlet in fake mode * specify rpc version for guestagent manager fix bug # 1078976 * Update tools/install\_venv.py to work w/ latest pip * No longer import db\_api while parsing db \_\_init\_\_ * Updated the README * Adding test for volume used attribute for management api * Use reddwarf instead of reddwarf\_lite * Add volume usage information to the management api detailed view * Adding tests to Reddwarf * For bug 1079863, fake commit * dummy changes * This is to fix bug 1079827. Please work * remove double colons * Removed TODO: * Removed unused import. Fixes bug #1078522 * Uses the view to circumvent an error related to the assumption that instance.server is present * Add vim formatting line * Do not update flavor if timeout occurs in resize * added a TODO: * Remove tabs in sample my.cnf * checks before deleting kwargs from the context * Removes the vestigial tests * Fix type in test-requires * Aligned tox file with project * Added .gitreview file * Get-host-by-name now uses RD instance status, not vm status, in instance list * Fix some PEP8 violations * Adding the ability to rescan server volumes to fake mode * Mgmt migrate instance call for reddwarf * Get rid of one-character-per-entry traceback logging * Require admin\_context on host and volume calls * Fixes reference to host in instance views * Any() and All() don't short-circuit * Moved the agent heart beat check to inside the guest api class * Indentaion fixes all over the place * Removing the "fake-mode" tox env in favor of specifically using 2.6 * Added PEP8 to tox and repaired a hasty syntax error * PEP8 fixes, mostly whitespace and line-length changes * Adding the mgmt taskmanager * Improved ability to run fake mode in CI environments * Since the comparison inside is <=, the message ought to reflect that * Revamped the mgmt instance call * Added code to declare a topic consumer * mgmt call to get hardware info (cpu and memory) from an instance * Adds filtering for deleted instances in the mgmt instance lists. Fixes deleted and deleted\_at fields * Fixed fake mode, which didn't work with our new changes * mgmt status call should allow for deleted instances and show them as shutdown * add exception handling for trying to update bad instances * Fixing bad import to reflect openstack common RPC * fake mode working with new novaclient create\_server\_volume method * Removed fault guest queue delete code since it's already in delete\_async * Fixed small bug in fake mode * Updated metadata to support XML in the mgmt api * Removing unnnecessary line of code, which was causing mgmt/instances to not load correctly * Host list detail now includes the tenant id of each instance * Adding task\_description to mgmt instance model. Wrapped a 404-happy server load to fix mgmt instance list for busted instances * Adding task\_description to mgmt instance model * management api call to list all accounts with non-deleted instances * Allowed us to grab a mgmt instance that lacks a backing server * adding mgmt action to reboot an instance * Fixing xml serialization for accounts * Pruning one more tree() from the codebase * Removes defaultdict references from mgmt account and instance views * Added an admin task-manager * adding management host update action * Allowing resizes to be performed when MySQL is down * Moved mgmt instances into its own directory * Adding the deleted filter for account instances * Mgmt storage device details * fixing diagnostics tests for fake mode * Added attributes for management instance get * Mgmt instances * Adding MGMT hosts call * Adding a accounts management api extension Adding fake tests for mgmt accounts * Updated RPC code * admin users should use their correct tenant id as well * add updated attribute to list all versions, add ability to list individual versions * Adding support to use the nova create server with volume * Adds ignore\_dbs to configs, and makes the models respect it * Add reset-password action and return Not Implemented * Adds root to the ignore\_users list and forces user list to obey it * Fixed XML serializer to work with pagination * Trimming spaces when reading from the config as a list of comma separated values * Changing the Config get to always use a default value * Adding the ability to get types other than strings to the Config class * Fixed a syntax error * adding logging to the service conf files * Delete can now delete errored instances * Adding tox support to Reddwarf * password check shouldn't look in ignore\_users * added an ignore users option, e.g. prevents such users from being deleted, also only get is root enabled info by checking the db * Changing the max\_instances\_per\_user to 55 in the test config file * change usage table to usage\_events * Adding fake mode support to the delete\_queue function * adding usage table * Task manager will now detect some creation errors and mark the instances * Delete the guest queue when deleting an instance * don't raise an exception if there is an error, allow it to poll until timeout * Allowing resizes to be performed when MySQL is down * Moved functionality into wsgi.Controller * Give the nova instance the real hostname if we've got it * Adding a fault wrapper to catch and wrap exceptions that are thrown as plain text * Fixing delete polling in the taskmanager * Simple per-user instance quota checking in the create call. PEP8 fixes * Forcing HTTPS in pagination next links * Getting rid of our hack to simulate 404's for instance GETs * Polling until the server is deleted * Adding the create users and create databases to the fake mode prepare * Adds deleted and deleted\_at fields to the instance table * Fixing DNS hostname code to save info to DB in taskmanager * Adding default for users * modify prepare call to create the users given on a create instance * Refactoring the instance load method Adding back the volume used parameter. Added timeouts for all guest sync calls. Refactored the instance models * Adding custom taskmanager q * Disabling local-infile in my.cnf Adding max\_user\_connections and updating max\_connections to +10 of user\_connections * Reversing the order of the dns save * Removing hostname from list instances * Farvel /flavors/detail. Also consolidating the API() into a common class * Checks for an empty body in action requests and bottoms out appropriately * Fixing no newline spaces in xml * Actually fixing the regex this time * Au revoir /instances/detail * Fixing the whitespace&newlines in xml * Preserves our contracted error response names by mapping webob exceptions to our named errors. Also repairs references to rd\_exceptions that were missed in a refactor * hacks to get the example generator up and running again * Fixing xml serialization/deserialization \* Adding custom metadata to properly serialize xml for our needs \* Fixing the deserialization of xml objects as plurals \* Adding XMLNS \* Fixing the resize funk * Changes defaults to None in the Query constructor * Adding a admin context check used for the management calls * Query class now makes use of GROUP BY. This fixes a bug with list\_users * Adding the user to the context * Fixed resize flavor code to correctly fetch new info * Fixing the way the common exceptions work with our webob objects * Took vcpus out of flavors view * Changed link generation code * Changing volume\_size to an integer * Fixes an error in update\_db * Reverting migration 007 to as it was and creating 008 * No longer call to Nova to grab server or volume info for majority of instance calls * Adding taskmanager to setup * Making db instance restart async * Async instance create operation * Fixing the 200vs202 * Async operation for instance resize/delete * Raise exceptions on user or database if a resource with the same name is found * Fixing validation to not force public RDL users to have a volume * volume list should return a list of dict and not objects * Resize live volumes attached to an instance * No longer make N calls to the volume API when listing instances + list bug fix * Minor fix for poll\_until * Fixed infinite loop polling for ip address * Serialize XML correctly if instance isn't found * Resurrecting poll\_until * Changing version in reddwarf.conf.sample * Adds pagination to instances, databases, and users. PEP8 fixes * Fixing validation for volume size * First beginnings of pagination * Reinstantiating the Task Manager aka Reddwarf Manager * Adding config for device mapping * Added some additional assertions to the resize flavor code * Dns bug fixes related to usernet ip * Optional hostname in view * Fixing the version * Fixing the returns for no-body calls * Fixed typo * Issue 500 if the volume fails to prov * Default hostname and minor comment * Changing rsdns\_records table name to be rax independent * DNS Support for Instance IP * Updating the port in reddwarf-api * Updating reddwarf-server to use the config for ports, instead of defaulting at the command line args * Adding image\_update call to reddwarf-manage * Adding fields to get better feedback from the agents * updating the volume service endpoint to version 1 instead of v2 * Added another flavor to what gets returned in fake mode * Bypassing auth/svccatalog in novaclient * Fixes a problem when an instance exists, but the root history doesn't. Properly returns Never/Nobody now * Moved root history out of instance details and into standalone mgmt call. PEP8 fixes * No longer require a volume * Fixed things suggested in the pull request * Made resize work in fake mode * Made resize almost work * Continued work on resize\_flavor * Adding manifest * pep8 fixes and resolve a few other rebase issues * clean up and change the volume desription to have the instance id * Allowing a user to add a volume to the instance on create api call * Fixing the setup.py to have the new reddwarf api * Adding a reddwarf-api binscript * Fixed a fake guestagent call so the tests pass in fake mode, too * Moved X-User call to WSGI middleware * PEP8 on a comment and removed a log debug line * Fixed a comment and added InstanceStatus.ERROR since it seemed to be missing * Extending mgmt api from core instancce api * Added root history to instance details. Still need to move the root history over to mysql/extensions, but the int tests work right now * Started the mgmt framework * Fixed root history schema; skeleton of root history model * Making the guest more extensible for services * More work on the restart action * Fixed a few simple issues with the restart code * Continued to work on restart API * Added API code for actions * Making instance creation a bit more extensible * Added fakery to the various database operations * Renaming database to mysql * Syntax errors * Adding multiple addresses instead of just one * PEP8 * Adding initial dbs to the prepare call * Removed some overzealous wrapping * Removed some more wrappers around some exceptions * Fixed some RPC casts and context problems that happened during a rebase * Fixed some of the overzealous \_() wrapping to exclude the parameters being handed to the format strings * Every time I see a LOG.something(, I've wrapped \_() around its contents * Show ips based on a flag value * Fixing the mysql list calls * Created test fakes for Nova and the Guest. With these you can run the server with no external dependencies! * Forcing https * Removed some vestigial CONFIG and LOG lines * Moved build\_links and req into the view, cut out some redundant exceptions * Used new style of context and moved the req to the constructor in the views * Fixed the discrepancy between detail and plain view; fixed links in models * Fixed the Flavors API so the URLs returned are correct and all tests are passed * Adding Flavors API: models, views, and service * Removing the link from webob in exceptions * Made the constant proper cased * Making the exception\_map work for extensions * Fixing up the internal\_message * Updated the context object in all services * Fixed the instance list operations * Changing 201 to 200 * Added validation to the users/databases/root calls * Adding validation to user/schema * Adding root enabled * Fixed the instance status returned during instance deletion * Fixing the queues to use uuid * Fixing the extensions to behave like existing reddwarf * Added schema calls for list/create/delete * Added the delete user call * Built out create\_user call * Fixed a small bug, added error msg as suggested by cp16net * Adding list\_users for mysql users * Fixed bug in status property, added message to UnprocessableEntity error * Added status code, return 422 when trying to delete a busy instance * Adding the guest conf.d writing * Now returning the correct status strings * Adding validation of the api body * Simple change to delete instance call * Getting the guest status update working * Fixing the server load call and amqp cast\_with\_consumer * Minor tweaks while trying to get Instance.load to work * Fixed our keystone hack * Changing instance from a subclass of Compute instance into its own object * Adding dbaas-mycnf to the contrib folder * Furthering the guest agent prepare call * Adding the guestagent * Adding getattr and setattr back to fix the bug I just added * Fixes pep8 and circular import issues * Changed the proxy\_tenant\_id to "reddwarf" since this is what KeyStone expects * Fixing up the instance creation issues * Fixed auth to work with keystone * Adding more tests * Changed the version name attribute to id * Adding database instance details for show/create/delete/list * Creating a model unit test to test the data() functionality * Adding basic skeleton for testing controllers. (stole from melange) * Fixing the assignment issue * Added the use of context instead of passing just the token * removing the reddwarf\_tenant\_id * fix the config * fixing pep8 things * updates to get create working * Added gitignore and re-fixed pep8 violations * Removed API folder * Fixed Pep8 errors * adding some buggy bugs and updates from keystone/novaclient * Added in the guest manager code \* Added the bin script for the guest agent \* Added a sample config file for guest \* Migrated guest-agent to guestagent \* Added a manager and service for the guest * Got the unit test venv environment working * keystone made some changes * Adding some basic service code from nova. \* Adding the proper taskmanager bin script \* Adding a taskmanager impl (needs to be a proper baseclass) \* Adding novas LoopingCall to utils \* Updating dummy rpc cast in the database service so it sends to the task manager * Added the taskmanager class with some testing rpc code \* Fixed a bug in rpc kombu w/ a bad durable declaration \* Fixed the name of the queue exchange \* Added a bit of rpc code to the taskmanager service for consuming \* \* This is mostly experimental at this point!!! \* \* This should be refactored into something common!!! * Initial checkin of rpc code \* Stole most of the code from nova rpc \* Updated the rpc and kombu code so it works with reddwarf \* Import of openstack-common, added the context from common \* Extended the common context * Fleshed out the basics for the guest-agent scp firstboot. \* Copying ssh keys from the host to the image \* Added a bootstrap\_init.sh which does the scp work to the instance \* Finished the bootstrap.sh so it will upload to glance \* Added a service images table for identifing the current image on create \* Added some dummy guest-agent code for testing purposes \* Added a delete method, which is not finished * First pass attempt at a service\_image registry in the db \* Added some finder logic to the base models \* Added service\_image to models \* No longer passing in the image name in the database create call \* Misc mapper stuff w/ adding the table * Adding next\_steps.txt so everyone can see my thoughts * Moving the old bootstrap in anticipation of nuking it * Figured out how to create/add qcow ubuntu images \* new bootstrap/bootstrap.sh shows the proper way to create a image \* modified the funkybugs cuz i squashed one in nova proper \* added apt-cacher-ng and such to the old bootstrap (dont use it yet) * Adding the beginnings of ubuntu bootstrap * Adding the venv/unit test framework stuff \* run\_tests stuff \* gerrit stuff \* test-requires for pip * Adding the missing reddwarf-manage binscript * Getting hooked up w/ the db. \* connection db and all that jazz \* migrations are working \* local DB models are working (only save) * Making a data/view model based on remote objects \* removing the novaclient from the actual service * Adding extensions to the app, and shell extensions for users & schema * Adding a bit of code to get novaclient working via proxy token * A bit more cleanup to remove melange code, and making the auth code work again * Making the API look more like melange. this made the api a TON cleaner than modeling it after the existing nova code. \* now uses no nova imports and still has things like extensions, versions, etc. \* created a new server binscript \* made a new common folder with all the non openstack-common wsgi, exception, extensions etc... \* using openstack-common extensively \* changing the version to use v0.1 \* stole some code from melange to make all this possible <3 melange team * Experimental changes to create instance. this is untested * Adding idea project folder * Getting the delete call and refactoring the req/proxy\_token a bit * Got the basics of the viewbuilder working for list instances * Got a nice API shell working \* uses devstacks install for nova/keystone/et al \* talks to nova via novaclient. \* adds a few extensions to show how its done \* has a single call to list instances \* found a few minor bugs to discuss w/ nova crew \*\* Note in order to run this you have to mod the code downloaded by devstack or have local symlinks to nova & novaclient in your src tree running trunk This will get dealt with soon (it is a weekend!) * Adding a gitignore * Adding a script for getting the environment up and running * Folder structure layout stuff * Initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/HACKING.rst0000644000175000017500000000037100000000000015746 0ustar00coreycorey00000000000000Trove Library Specific Commandments ------------------------------------- - [T103] Exception messages should be translated - [T104] Python 3 is not support basestring,replace basestring with six.string_types - [T105] Validate no LOG translations ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/LICENSE0000644000175000017500000002363700000000000015167 0ustar00coreycorey00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8081114 trove-12.1.0.dev92/PKG-INFO0000644000175000017500000000551700000000000015254 0ustar00coreycorey00000000000000Metadata-Version: 1.1 Name: trove Version: 12.1.0.dev92 Summary: OpenStack DBaaS Home-page: https://docs.openstack.org/trove/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ===== Trove ===== .. image:: https://governance.openstack.org/tc/badges/trove.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Trove is Database as a Service for OpenStack. Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://opendev.org/openstack/trove For information on how to contribute to trove, please see CONTRIBUTING.rst_ and HACKING.rst_ .. _CONTRIBUTING.rst: https://opendev.org/openstack/trove/src/branch/master/CONTRIBUTING.rst .. _HACKING.rst: https://opendev.org/openstack/trove/src/branch/master/HACKING.rst * `Wiki `_ * `Developer Docs `_ You can raise bugs here: `Bug Tracker `_ The plan for trove can be found at `Trove Specs `_ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/trove Python client ------------- Python-troveclient_ is a client for Trove. .. _Python-troveclient: https://opendev.org/openstack/python-troveclient Dashboard plugin ---------------- Trove-dashboard_ is OpenStack dashbaord plugin for Trove. .. _Trove-dashboard: https://opendev.org/openstack/trove-dashboard References ---------- * `Installation docs`_ * `Manual installation docs`_ * `Build guest image`_ .. _Installation docs: https://docs.openstack.org/trove/latest/install/install.html .. _Manual installation docs: https://docs.openstack.org/trove/latest/install/manual_install.html .. _Build guest image: https://docs.openstack.org/trove/latest/admin/building_guest_images.html Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/README.rst0000644000175000017500000000331100000000000015634 0ustar00coreycorey00000000000000===== Trove ===== .. image:: https://governance.openstack.org/tc/badges/trove.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Trove is Database as a Service for OpenStack. Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://opendev.org/openstack/trove For information on how to contribute to trove, please see CONTRIBUTING.rst_ and HACKING.rst_ .. _CONTRIBUTING.rst: https://opendev.org/openstack/trove/src/branch/master/CONTRIBUTING.rst .. _HACKING.rst: https://opendev.org/openstack/trove/src/branch/master/HACKING.rst * `Wiki `_ * `Developer Docs `_ You can raise bugs here: `Bug Tracker `_ The plan for trove can be found at `Trove Specs `_ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/trove Python client ------------- Python-troveclient_ is a client for Trove. .. _Python-troveclient: https://opendev.org/openstack/python-troveclient Dashboard plugin ---------------- Trove-dashboard_ is OpenStack dashbaord plugin for Trove. .. _Trove-dashboard: https://opendev.org/openstack/trove-dashboard References ---------- * `Installation docs`_ * `Manual installation docs`_ * `Build guest image`_ .. _Installation docs: https://docs.openstack.org/trove/latest/install/install.html .. _Manual installation docs: https://docs.openstack.org/trove/latest/install/manual_install.html .. _Build guest image: https://docs.openstack.org/trove/latest/admin/building_guest_images.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6481085 trove-12.1.0.dev92/api-ref/0000755000175000017500000000000000000000000015472 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.668109 trove-12.1.0.dev92/api-ref/source/0000755000175000017500000000000000000000000016772 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/api-versions.inc0000644000175000017500000000061200000000000022103 0ustar00coreycorey00000000000000.. -*- rst -*- ============ API versions ============ Lists information for all Database Service API versions. List versions ~~~~~~~~~~~~~ .. rest_method:: GET / Lists information about all Database Service API versions. No authentication needed. Normal response codes: 200 Response Example ---------------- .. literalinclude:: samples/versions-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/backups.inc0000644000175000017500000000755700000000000021133 0ustar00coreycorey00000000000000.. -*- rst -*- ======= Backups ======= List database backups ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/backups List all database backups information for a project. You can filter results by using query string parameters. The following filters are supported: - ``instance_id={instance_id}`` - Return the list of backups for a particular database instance. - ``all_projects=True/False`` - Return the list of backups for all the projects, this is an admin only param by default. - ``datastore={datastore}`` - Return a list of backups of the same datastore. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backups: backup_list - created: created - datastore: datastore2 - datastore.type: datastore_type1 - datastore.version: datastore_version_name - datastore.version_id: datastore_version_id1 - description: backup_description1 - id: backup_id - instance_id: backup_instanceId - locationRef: backup_locationRef - name: backup_name - parent_id: backup_parentId1 - size: backup_size - status: backup_status - updated: updated Response Example ---------------- .. literalinclude:: samples/backup-list-response.json :language: javascript Create database backup ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/backups Creates a database backup for instance. In the Trove deployment with service tenant enabled, The backup data is stored as objects in OpenStack Swift service in the user's container(``database_backups`` by default) Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - name: backup_name - instance: backup_instanceId - parent_id: backup_parentId - incremental: backup_incremental - description: backup_description Request Example --------------- .. literalinclude:: samples/backup-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - created: created - datastore: datastore2 - datastore.type: datastore_type1 - datastore.version: datastore_version_name - datastore.version_id: datastore_version_id1 - description: backup_description1 - id: backup_id - instance_id: backup_instanceId - locationRef: backup_locationRef - name: backup_name - parent_id: backup_parentId1 - size: backup_size - status: backup_status - updated: updated Response Example ---------------- .. literalinclude:: samples/backup-create-response.json :language: javascript Show database backup details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/backups/{backupId} Show detailes of a backup. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - backupId: backup_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - created: created - datastore: datastore2 - datastore.type: datastore_type1 - datastore.version: datastore_version_name - datastore.version_id: datastore_version_id1 - description: backup_description1 - id: backup_id - instance_id: backup_instanceId - locationRef: backup_locationRef - name: backup_name - parent_id: backup_parentId1 - size: backup_size - status: backup_status - updated: updated Response Example ---------------- .. literalinclude:: samples/backup-get-response.json :language: javascript Delete database backup ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/backups/{backupId} Deletes a database backup. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - backupId: backup_id././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/conf.py0000755000175000017500000001473000000000000020301 0ustar00coreycorey00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # trove documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys extensions = [ 'os_api_ref', 'openstackdocstheme' ] html_theme = 'openstackdocs' html_theme_options = { "sidebar_mode": "toc", } repository_name = 'openstack/trove' bug_project = 'trove' bug_tag = '' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Database API Reference' copyright = u'2010-present, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'trovedoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Trove.tex', u'OpenStack Database API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/configurations.inc0000644000175000017500000000636500000000000022531 0ustar00coreycorey00000000000000.. -*- rst -*- ==================== Configuration groups ==================== List configuration groups ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/configurations Lists all configuration groups. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Example ---------------- .. literalinclude:: samples/config-groups-list-response.json :language: javascript Create configuration group ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/configurations Creates a configuration group. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore: datastore - name: name - values: values Request Example --------------- .. literalinclude:: samples/config-group-create-request.json :language: javascript Response Example ---------------- .. literalinclude:: samples/config-group-create-response.json :language: javascript Show configuration group details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/configurations/{configId} Lists details about a configuration group, including its values. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - configId: configId Response Example ---------------- .. literalinclude:: samples/config-group-show-response.json :language: javascript List instances applied the configuration group ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/configurations/{configId}/instances Lists the instances associated with the specified configuration group. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - configId: configId Response Example ---------------- .. literalinclude:: samples/config-group-list-instances-response.json :language: javascript Patch configuration group ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PATCH /v1.0/{project_id}/configurations/{configId} Sets new values for a configuration group. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - configId: configId - values: values Request Example --------------- .. literalinclude:: samples/config-group-patch-request.json :language: javascript Update configuration group ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v1.0/{project_id}/configurations/{configId} Sets new values for a configuration group. Also lets you change the name and description of the configuration group. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - configId: configId - values: values - description: description - name: name Request Example --------------- .. literalinclude:: samples/config-group-put-request.json :language: javascript Delete configuration group ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/configurations/{configId} Deletes a configuration group. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - configId: configId././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/databases.inc0000644000175000017500000000545400000000000021424 0ustar00coreycorey00000000000000.. -*- rst -*- ========= Databases ========= Create database ~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/databases Creates a database within an instance. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - name: name - characterSet: characterSet - collate: collate Request Example --------------- .. literalinclude:: samples/databases-create-request.json :language: javascript List instance databases ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/databases Lists databases for an instance. This operation returns only the user-defined databases and not the system databases. Only the database administrator can get system databases such as ``mysql``, ``information_schema``, and ``lost+found``. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/databases-list-response.json :language: javascript List instance databases for a user ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/users/{user_name}/databases Get all the databases that the user has access to. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - user_name: user_name Response Example ---------------- .. literalinclude:: samples/databases-list-response.json :language: javascript Grant databases access ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v1.0/{project_id}/instances/{instanceId}/users/{user_name}/databases Grant user access to the databases. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - user_name: user_name Request Example --------------- .. literalinclude:: samples/user-grant-databases-access-request.json :language: javascript Revoke databases access ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/instances/{instanceId}/users/{user_name}/databases/{database_name} Revoke user access to the database. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - user_name: user_name - database_name: databaseName Delete database ~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/instances/{instanceId}/databases/{database_name} Deletes a database. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - database_name: databaseName././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/datastore-versions.inc0000644000175000017500000001752000000000000023326 0ustar00coreycorey00000000000000.. -*- rst -*- ================== Datastore Versions ================== List datastore versions ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/{datastore}/versions Get all the registered versions for a given datastore Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore: data_store Response Example ---------------- .. literalinclude:: samples/datastore-version-list-response.json :language: javascript Show datastore version ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/{datastore}/versions/{version} Get information for a given datastore version(name or ID) Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore: data_store - version: version Response Example ---------------- .. literalinclude:: samples/datastore-version-show-response.json :language: javascript List datastore version configuration parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/{datastore}/versions/{version}/parameters Get all the config parameters associated with the specified datastore version Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore: data_store - version: version Response Example ---------------- .. literalinclude:: samples/datastore-version-parameter-list-response.json :language: javascript Show datastore version configuration parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/{datastore}/versions/{version}/parameters/{parameter_name} Get the specified config parameter definition. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore: data_store - version: version - parameter_name: parameter_name Response Example ---------------- .. literalinclude:: samples/datastore-version-parameter-show-response.json :language: javascript Show datastore version ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/versions/{datastore_version_id} Get information for a given datastore version ID. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id Response Example ---------------- .. literalinclude:: samples/datastore-version-show-response.json :language: javascript List datastore version configuration parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/versions/{datastore_version_id}/parameters Get all the config parameters associated with the specified datastore version ID. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id Response Example ---------------- .. literalinclude:: samples/datastore-version-parameter-list-response.json :language: javascript Show datastore version configuration parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/versions/{datastore_version_id}/parameters/{parameter_name} Get the specified config parameter definition. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id - parameter_name: parameter_name Response Example ---------------- .. literalinclude:: samples/datastore-version-parameter-show-response.json :language: javascript Create datastore version configuration parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/mgmt/datastores/versions/{datastore_version_id}/parameters Admin only API. Register a configuration parameter for the specified datastore version. The parameter definition contains the type, minimum and maximum values(if type is integer), and whether you must restart the instance after the parameter value is changed. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id Request Example --------------- .. literalinclude:: samples/datastore-version-parameter-create-request.json :language: javascript Response Example ---------------- .. literalinclude:: samples/datastore-version-parameter-create-response.json :language: javascript Update a datastore version configuration parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v1.0/{project_id}/mgmt/datastores/versions/{datastore_version_id}/parameters/{parameter_name} Admin only API. Update a configuration parameter for the specified datastore version. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id - parameter_name: parameter_name Request Example --------------- .. literalinclude:: samples/datastore-version-parameter-update-request.json :language: javascript Response Example ---------------- .. literalinclude:: samples/datastore-version-parameter-update-response.json :language: javascript Delete a datastore version configuration parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/mgmt/datastores/versions/{datastore_version_id}/parameters/{parameter_name} Admin only API. Remove a configuration parameter for the specified datastore version. Normal response codes: 204 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id - parameter_name: parameter_name Create datastore version ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/mgmt/datastore-versions Admin only API. Register a datastore version, create the datastore if doesn't exist. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Request Example --------------- .. literalinclude:: samples/datastore-version-create-request.json :language: javascript List datastore versions ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/mgmt/datastore-versions Admin only API. Get all the datastore versions. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Example ---------------- .. literalinclude:: samples/datastore-version-mgmt-list-response.json :language: javascript Show datastore version details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/mgmt/datastore-versions/{datastore_version_id} Admin only API. Get information for a datastore version. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id Response Example ---------------- .. literalinclude:: samples/datastore-version-mgmt-show-response.json :language: javascript Update datastore version details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PATCH /v1.0/{project_id}/mgmt/datastore-versions/{datastore_version_id} Admin only API. Update a specific datastore version. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id Request Example --------------- .. literalinclude:: samples/datastore-version-mgmt-patch-request.json :language: javascript Delete a datastore version ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/mgmt/datastore-versions/{datastore_version_id} Admin only API. Delete a specific datastore version. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore_version_id: datastore_version_id././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/datastores.inc0000644000175000017500000000174000000000000021640 0ustar00coreycorey00000000000000.. -*- rst -*- ========== Datastores ========== List datastores ~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores Get all the datastores registered in the system Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Example ---------------- .. literalinclude:: samples/datastore-list-response.json :language: javascript Show datastore details ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/datastores/{datastore} Shows datastore details Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore: data_store Response Example ---------------- .. literalinclude:: samples/datastore-show-response.json :language: javascript Delete datastore ~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/datastores/{datastore} Delete a datastore. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - datastore: data_store ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/index.rst0000755000175000017500000000063000000000000020635 0ustar00coreycorey00000000000000:tocdepth: 2 =================== Database API =================== .. rest_expand_all:: .. include:: api-versions.inc .. include:: datastores.inc .. include:: datastore-versions.inc .. include:: instances.inc .. include:: instance-actions.inc .. include:: instance-logs.inc .. include:: backups.inc .. include:: configurations.inc .. include:: databases.inc .. include:: users.inc .. include:: quotas.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/instance-actions.inc0000644000175000017500000001471500000000000022737 0ustar00coreycorey00000000000000.. -*- rst -*- ================ Instance actions ================ Restart instance ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/action Restarts the database service for an instance. The restart operation restarts only the database instance. Restarting the database erases any dynamic configuration settings that you make in the database instance. The database service is unavailable until the instance restart finishes. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-action-restart-request.json :language: javascript Resize instance flavor ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/action Resizes the (Nova)flavor for an instance. If you provide a valid ``flavorRef``, this operation changes the memory size of the instance, and restarts the database. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - flavorRef: flavorRef Request Example --------------- .. literalinclude:: samples/instance-action-resize-request.json :language: javascript Resize instance volume ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/action Resizes the volume that is attached to an instance. You can use this operation to increase but not decrease the volume size. A valid volume size is an integer value in gigabytes (GB). You cannot increase the volume to a size that is larger than the API volume size limit. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - volume: volume Request Example --------------- .. literalinclude:: samples/instance-action-resize-volume-request.json :language: javascript Promote instance to replica master ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/action Promotes a replica. If you have set up replication, and the master instance is still reachable, you can use this operation to promote a replica to be the new master instance. This can be useful if you want to make a configuration change or maintenance to the master instance. If you made the change on the master instance directly, you would need to take the master instance down for the duration of the operation. Instead, you can create a replica, make the configuration change on the replica, and then promote the replica to become the new master instance. Once this command is executed, the status of all the instances will be set to ``PROMOTE`` and Trove will work its magic until all of them to come back to ``HEALTHY``. The ``instanceId`` is the instance ID of the replica you want to promote. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-action-promote-replica-request.json :language: javascript Eject the master instance ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/action Remove the master instance in a replication set. This should be only done in a failed master scenario. This command ejects the current master and then forces a re-election for the new master. The new master is effectively the one with the most current replica of the old master. Once this command is executed, the status of all the instances will be set to ``EJECT`` and Trove will work its magic until all of them to come back to ``HEALTHY``. The ``instanceId`` is the ID of the current unavailable master instance. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-action-eject-replica-request.json :language: javascript Reset instance status ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/action Set instance service status to ``ERROR`` and clear the current task status. Mark any running backup operations as ``FAILED``. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-action-reset-status-request.json :language: javascript Stop database service ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/mgmt/instances/{instanceId}/action Admin only API. Stop database service inside an instance. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-mgmt-action-stop-request.json :language: javascript Reboot instance ~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/mgmt/instances/{instanceId}/action Admin only API. Reboot the database instance, database service will be stopped before rebooting. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-mgmt-action-reboot-request.json :language: javascript Cold Migrate instance ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/mgmt/instances/{instanceId}/action Admin only API. Migrate(resize) the database instance, database service will be stopped before migrating. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-mgmt-action-migrate-request.json :language: javascript Reset instance task status ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/mgmt/instances/{instanceId}/action Admin only API. Reset task status of an instance, mark any running backup operations as ``FAILED``. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/instance-mgmt-action-reset-task-status-request.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/instance-logs.inc0000644000175000017500000001267300000000000022244 0ustar00coreycorey00000000000000.. -*- rst -*- ============= Instance logs ============= List instance logs ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/log Listing all logs for an instance. Different datastore backend has its own ``guest_log_exposed_logs`` configuration to determine which type of logs will be listed by default. Admin user can get all the logs. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Parameters ------------------- .. rest_parameters:: parameters.yaml - logs: instance_logs - name: log_name - type: log_type - status: log_status - published: log_published_size - pending: log_pending_size - container: log_container - prefix: log_prefix - metafile: log_metafile Response Example ---------------- .. literalinclude:: samples/instance-log-list-response.json :language: javascript Show instance log details ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/log Show details for a instance log. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - name: log_name Request Example --------------- .. literalinclude:: samples/instance-log-show-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - log: instance_log - name: log_name - type: log_type - status: log_status - published: log_published_size - pending: log_pending_size - container: log_container - prefix: log_prefix - metafile: log_metafile Response Example ---------------- .. literalinclude:: samples/instance-log-show-response.json :language: javascript Enable instance log ~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/log Enable a log type for a instance. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - name: log_name - enable: log_enable_action Request Example --------------- .. literalinclude:: samples/instance-log-enable-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - log: instance_log - name: log_name - type: log_type - status: log_status - published: log_published_size - pending: log_pending_size - container: log_container - prefix: log_prefix - metafile: log_metafile Response Example ---------------- .. literalinclude:: samples/instance-log-enable-response.json :language: javascript Disable instance log ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/log Disable a log type for a instance. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - name: log_name - disable: log_disable_action Request Example --------------- .. literalinclude:: samples/instance-log-disable-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - log: instance_log - name: log_name - type: log_type - status: log_status - published: log_published_size - pending: log_pending_size - container: log_container - prefix: log_prefix - metafile: log_metafile Response Example ---------------- .. literalinclude:: samples/instance-log-disable-response.json :language: javascript Publish instance log ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/log Publish a log type for a instance. Publish`` will automatically ``enable`` a log. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - name: log_name - publish: log_publish_action Request Example --------------- .. literalinclude:: samples/instance-log-publish-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - log: instance_log - name: log_name - type: log_type - status: log_status - published: log_published_size - pending: log_pending_size - container: log_container - prefix: log_prefix - metafile: log_metafile Response Example ---------------- .. literalinclude:: samples/instance-log-publish-response.json :language: javascript Discard instance log ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/log Discard all previously published logs for a instance. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - name: log_name - discard: log_discard_action Request Example --------------- .. literalinclude:: samples/instance-log-discard-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - log: instance_log - name: log_name - type: log_type - status: log_status - published: log_published_size - pending: log_pending_size - container: log_container - prefix: log_prefix - metafile: log_metafile Response Example ---------------- .. literalinclude:: samples/instance-log-discard-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/instances.inc0000644000175000017500000002421200000000000021455 0ustar00coreycorey00000000000000.. -*- rst -*- ========= Instances ========= List database instances ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances Lists information for all database instances. Supported filters: include_clustered Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Example ---------------- .. literalinclude:: samples/instance-list-response.json :language: javascript List database instances(admin) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/mgmt/instances Admin only API. Get all the instances, supported filters: deleted, include_clustered. Could show more information such as Cinder volume ID, Nova server information, etc. Normal response codes: 200 Response Example ---------------- .. literalinclude:: samples/instance-mgmt-list-response.json :language: javascript List database instances(with details) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/detail Lists information for all database instances with more details, such as created and updated time, service status updated time, the failure message, etc. Supported filters: include_clustered Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Example ---------------- .. literalinclude:: samples/instance-list-detail-response.json :language: javascript Create database instance ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances Creates a database instance. Asynchronously provisions a database instance. You must specify a flavor ID, a volume size and the tenant network ID. The service provisions the instance with a volume of the requested size, which serves as storage for the database instance. The database service can only be access within the tenant network, unless the ``access`` parameter is defined. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - users: users - users.name: user_name1 - users.password: user_password - users.host: user_host - users.databases: user_databases - users.databases.name: user_database_name - datastore: datastore1 - datastore.type: datastore_type - datastore.version: datastore_version - name: instanceName1 - flavorRef: flavorRef - volume: volume - volume.size: volume_size - volume.type: volume_type - modules: modules - modules.id: module_id - restorePoint: restore_point - restorePoint.backupRef: restore_point_backupref - availability_zone: availability_zone - nics: nics - replica_of: replica_of - replica_count: replica_count - locality: locality - region_name: region_name - databases: databases - databases.characterSet: characterSet - databases.collate: collate - instance: instance - configuration: configuration - access: access - access.is_public: access_is_public - access.allowed_cidrs: access_allowed_cidrs Request Example --------------- .. literalinclude:: samples/instance-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: instanceId1 - name: instanceName1 - status: instance_status - links: instance_links - links.href: instance_link_href - links.rel: instance_link_rel - flavor: flavor - flavor.id: flavorId1 - flavor.links: flavor_links - flavor.links.href: flavor_link_href - flavor.links.rel: flavor_link_rel - datastore: datastore2 - datastore.type: datastore_type - datastore.version: datastore_version1 - region: region_name2 - tenant_id: tenant_id - volume: volume - volume.size: volume_size - volume.used: volume_used - hostname: instance_hostname - ip: instance_ip_address - created: created - updated: updated - service_status_updated: service_status_updated - fault: instance_fault - fault.message: instance_fault_message - fault.created: instance_fault_created - fault.details: instance_fault_details - replicas: instance_replicas - replicas.id: instance_replica_id - replicas.links: instance_replica_links - replicas.links.href: instance_replica_link_href - replicas.links.rel: instance_replica_link_rel - configuration: configuration1 - configuration.id: configuration_id - configuration.name: configuration_name - configuration.links: configuration_links - configuration.links.href: configuration_link_href - configuration.links.rel: configuration_link_rel - locality: locality - local_storage_used: local_storage_used - password: root_password - cluster_id: cluster_id - shard_id: shard_id - server_id: server_id - volume_id: volume_id - encrypted_rpc_messaging: encrypted_rpc_messaging - instance: instance Response Example ---------------- .. literalinclude:: samples/instance-create-response.json :language: javascript Show database instance details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId} Shows database instance details. Lists the volume size in gigabytes (GB) and the approximate GB used. After instance creation, the ``used`` value is greater than 0, which is expected as databases may create some basic (non empty) files to represent an empty schema. The response does not include the ``used`` attribute when the instance status is ``BUILD``, ``REBOOT``, ``RESIZE``, or ``ERROR``. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/instance-show-response.json :language: javascript Show database instance details(admin) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/mgmt/instances/{instanceId} Admin only API. Get an instance information, including Cinder volume, Nova server, etc. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/instance-mgmt-show-response.json :language: javascript Attach/Detach configuration group ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v1.0/{project_id}/instances/{instanceId} Attach or detach a configuration group for an instance. When you pass in only an instance ID and omit the configuration ID, this operation detaches any configuration group that was attached to the instance. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - configuration: configuration Request Example --------------- .. literalinclude:: samples/instance-put-attach-config-group-request.json :language: javascript Update instance name ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PATCH /v1.0/{project_id}/instances/{instanceId} Update the instance name. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - instance: instance - name: instanceName Request Example --------------- .. literalinclude:: samples/instance-patch-update-name-request.json :language: javascript Upgrade datastore version for instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PATCH /v1.0/{project_id}/instances/{instanceId} Upgrade datastore version. During datastore version upgrading, the instance status change to ``UPGRADE``, and changes back to ``HEALTHY`` after upgrading finishes, otherwize changes to ``ERROR`` if the upgrading fails. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - instance: instance - datastore_version: datastore_version Request Example --------------- .. literalinclude:: samples/instance-patch-upgrade-datastore-version-request.json :language: javascript Detach replica ~~~~~~~~~~~~~~ .. rest_method:: PATCH /v1.0/{project_id}/instances/{instanceId} Detaches a replica from its replication source. If you created an instance that is a replica of a source instance, you can detach the replica from the source later on. This can be useful if the source becomes unavailable. In this case, you can detach the replica from the source, making the replica a standalone database instance. You can then take the new standalone instance and create a new replica of that instance. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - instance: instance - replica_of: replica_of - slave_of: slave_of Request Example --------------- .. literalinclude:: samples/instance-patch-detach-replica-request.json :language: javascript Delete database instance ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/instances/{instanceId} Deletes a database instance. This operation does not delete any slaves. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId List backups of database instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/backups Get all the backups for an instance. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/instance-backup-list-response.json :language: javascript List default configuration parameters of database instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/configuration Get the default configuration parameters for an instance. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/instance-configuration-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/parameters.yaml0000755000175000017500000004300200000000000022023 0ustar00coreycorey00000000000000# variables in path backup_id: description: | The ID of the backup. in: path required: true type: string configId: description: | The ID of the configuration group. in: path required: false type: string data_store: description: | Name or ID of the data store. in: path required: false type: string databaseName: description: | The name for the database. in: path required: false type: string datastore_name: description: | The name of the data store. in: path required: false type: string datastore_version_id: description: | The UUID of the data store version. in: path required: false type: string flavorId: description: | The ID of the flavor. in: path required: false type: string instanceId: description: | The ID of the database instance. in: path required: true type: string parameter_name: description: | The name of the parameter for which to show details. in: path required: false type: string project_id: description: | The project ID of the instance owner. in: path required: true type: string user_name: description: | The name of the user. in: path required: false type: string user_project: description: | The project ID of the user's project. in: path required: true type: string version: description: | Name or ID of the datastore version. in: path required: false type: string # variables in body access: description: | A ``access`` object defines how the database service is exposed. in: body required: false type: object access_allowed_cidrs: description: | A list of IPv4, IPv6 or mix of both CIDRs that restrict access to the database service. ``0.0.0.0/0`` is used by default if this parameter is not provided. in: body required: false type: array access_is_public: description: | Whether the database service is exposed to the public. in: body required: false type: boolean availability_zone: description: | The availability zone of the instance. in: body required: false type: string backup_description: description: | An optional description for the backup. in: body required: false type: string backup_description1: description: | An optional description for the backup. in: body required: true type: string backup_incremental: description: | Create an incremental backup based on the last full backup by setting this parameter to 1 or 0. It will create a full backup if no existing backup found. in: body required: false type: integer backup_instanceId: description: | The ID of the instance to create backup for. in: body required: true type: string backup_list: description: | A list of ``backup`` objects. in: body required: true type: array backup_locationRef: description: | The URL of the backup location. in: body required: true type: string backup_name: description: | Name of the backup. in: body required: true type: string backup_parentId: description: | ID of the parent backup to perform an incremental backup from. in: body required: false type: string backup_parentId1: description: | ID of the parent backup to perform an incremental backup from. in: body required: true type: string backup_size: description: | Size of the backup, the unit is GB. in: body required: true type: string backup_status: description: | Status of the backup. in: body required: true type: string characterSet: description: | A set of symbols and encodings. Default is ``utf8``. For information about supported character sets and collations, see `Character Sets and Collations in MySQL `_. in: body required: false type: string cluster_id: description: | The cluster ID of an instance. in: body required: false type: string collate: description: | A set of rules for comparing characters in a character set. Default is ``utf8_general_ci``. For information about supported character sets and collations, see `Character Sets and Collations in MySQL `_. in: body required: false type: string configuration: description: | ID of the configuration group that you want to attach to the instance. in: body required: true type: string configuration1: description: | A ``configuration`` object. in: body required: false type: object configuration_id: description: | The ID of a configuration. in: body required: true type: string configuration_link_href: description: | The ``href`` attribute of a configuration link. in: body required: true type: string configuration_link_rel: description: | The ``rel`` attribute of a configuration link. in: body required: true type: string configuration_links: description: | The ``links`` object of a configuration. in: body required: true type: array configuration_name: description: | The name of a configuration. in: body required: true type: string created: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string database_name: description: | The name of a database. in: body required: true type: string databases: description: | A ``databases`` object. in: body required: false type: array datastore: description: | Data store assigned to the configuration group. Required if you did not configure the default data store. in: body required: true type: string datastore1: description: | A ``datastore`` object. in: body required: false type: object datastore2: description: | A ``datastore`` object. in: body required: true type: object datastore_type: description: | The type of a datastore. in: body required: false type: string datastore_type1: description: | The type of a datastore. in: body required: true type: string datastore_version: description: | Name of the datastore version to use when creating/updating the instance. in: body required: false type: string datastore_version1: description: | Name or ID of a datastore version. in: body required: true type: string datastore_version_id1: description: | The UUID of the data store version. in: body required: true type: string datastore_version_name: description: | The name of the datastore version. in: body required: true type: string description: description: | New description of the configuration group. in: body required: true type: string encrypted_rpc_messaging: description: | Whether the instance is using encrypted rpm messaging feature or not. in: body required: false type: boolean flavor: description: | A ``flavor`` object, which includes the flavor ID (integer) and flavor relative links. in: body required: true type: object flavor_link_href: description: | The ``href`` attribute of a flavor link. in: body required: true type: string flavor_link_rel: description: | The ``rel`` attribute of a flavor link. in: body required: true type: string flavor_links: description: | The ``links`` object of a flavor. in: body required: true type: array flavorId1: description: | The ID of the flavor. in: body required: true type: string flavorRef: description: | Reference (href), which is the actual URI to a flavor as it appears in the list flavors response. Rather than the flavor URI, you can also pass the flavor ID (integer) as the ``flavorRef`` value. For example, ``1``. in: body required: true type: string instance: description: | An ``instance`` object. in: body required: true type: object instance_fault: description: | The ``fault`` object of an instance. in: body required: false type: object instance_fault_created: description: | The update timestamp of the fault message for an instance. in: body required: true type: string instance_fault_details: description: | The detail fault explanation of an instance. in: body required: true type: string instance_fault_message: description: | The fault message of an instance. in: body required: true type: string instance_hostname: description: | The hostname of an instance. in: body require: false type: string instance_ip_address: description: | The IP address of an instance. in: body require: false type: string instance_link_href: description: | The ``href`` attribute of an instance link. in: body required: true type: string instance_link_rel: description: | The ``rel`` attribute of an instance link. in: body required: true type: string instance_links: description: | The ``links`` object of the instance. in: body required: true type: array instance_log: description: | A ``log`` objects. in: body required: true type: array instance_logs: description: | A list of ``log`` objects. in: body required: true type: array instance_replica_id: description: The ID of a replica instance. in: body required: true type: string instance_replica_link_href: description: | The ``href`` attribute of a replica instance link. in: body required: true type: string instance_replica_link_rel: description: | The ``rel`` attribute of a replica instance link. in: body required: true type: string instance_replica_links: description: The ``links`` object of a replica instance. in: body required: true type: array instance_replicas: description: | The ``replicas`` object of an instance. in: body required: false type: array instance_status: description: | Status of the instance. in: body required: true type: string instanceId1: description: | The ID of the database instance. in: body required: true type: string instanceName: description: | Name of the instance. in: body required: false type: string instanceName1: description: | Name of the instance. in: body required: true type: string local_storage_used: description: | The used space of the ephemeral disk, in gigabytes (GB). in: body required: false type: float locality: description: | The scheduler hint when creating underlying Nova instances. Valide values are: ``affinity``, ``anti-affinity``. in: body required: false type: string log_container: description: | The object store container where the published log data will be stored. Defaults to ``None`` before the log has been published. in: body required: true type: string log_disable_action: description: | To disable a log type, this should always set to 1. in: body required: false type: integer log_discard_action: description: | To discard a log type which has been published previously, this should always set to 1. in: body required: false type: integer log_enable_action: description: | To enable a log type, this should always set to 1. in: body required: false type: integer log_metafile: description: | The log metafile location. in: body required: true type: string log_name: description: | The name of the log. in: body required: true type: string log_pending_size: description: | Log file size pending to be published. in: body required: true type: string log_prefix: description: | If the log has been published, thi is the prefix location of where the log data are stored. Otherwize the prefix is ``None``. in: body required: true type: string log_publish_action: description: | To publish a log type, this should always set to 1. in: body required: false type: integer log_published_size: description: | Published size of the log. in: body required: true type: string log_status: description: | The log status. in: body required: true type: string log_type: description: | The type of the log. in: body required: true type: string module_id: description: | The ID of a module. in: body required: true type: string modules: description: | The ``modules`` object. in: body required: false type: object name: description: | Name of the configuration group you are creating. in: body required: true type: string nics: description: | Network interfaces for database service inside Nova instances. ``NOTE:`` For backward compatibility, this parameter uses the same schema as novaclient creating servers, but only ``net-id`` is supported and can only be specified once. This parameter is required in service tenant deployment model. in: body required: false type: array quota_in_use: description: | The used quota for a resource. in: body required: true type: integer quota_limit: description: | The limit of a resource quota. in: body required: true type: integer quota_list: description: | A list of resource quotas. in: body required: true type: array quota_reserved: description: | The reserved quota for a resource. in: body required: true type: integer quota_resource: description: | The resource name. in: body required: true type: string quotas: description: | Dictionary that defines the resources quota. in: body required: true type: string region_name: description: | The region name of an instance. in: body required: false type: string region_name2: description: | The region name of an instance. in: body required: true type: string replica_count: description: | Number of replicas to create (defaults to 1). in: body required: false type: integer replica_of: description: | ID or name of an existing instance to replicate from. in: body required: false type: string restore_point: description: | The ``restorePoint`` object. Use this paramter to create an instance from a backup. in: body required: false type: object restore_point_backupref: description: The backup id used from which a new instance is created. in: body required: true type: string root_password: description: | The password of the database root user(i.e. the administrative user). in: body required: false type: string server_id: description: | The ID of the underlying Nova instance for an instance. in: body required: false type: string service_status_updated: description: | The date and time when the database service status was updated. This field can be used to validate if the 'HEALTHY' status is stale or not. in: body required: true type: string shard_id: description: | The shard ID of an instance. in: body required: false type: string slave_of: description: | To detach a replica, set ``slave_of`` to null. in: body required: true type: string tenant_id: description: | The ID of a tenant. in: body required: false type: string updated: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. The UTC time zone is assumed. in: body required: true type: string user_database_name: description: | The name of a database which the user can access. in: body required: true type: string user_databases: description: | The ``databases`` object. This is a list of databases which the user can access. in: body required: false type: array user_host: description: | A host allowed for a user. in: body required: false type: string user_name1: description: | The name of a user. in: body required: true type: string user_password: description: | The password of a user. in: body required: true type: string users: description: | A ``users`` object. in: body required: false type: array values: description: | Dictionary that lists configuration parameter names and associated values. in: body required: true type: string volume: description: | A ``volume`` object. in: body required: false type: object volume_id: description: | The ID of a volume. in: body required: false type: string volume_size: description: | The volume size, in gigabytes (GB). A valid value is from 1 to 50(this limit is controlled by the configuration ``max_accepted_volume_size``). in: body required: true type: integer volume_size2: description: | The volume size, in gigabytes (GB). in: body required: true type: integer volume_type: description: | The volume type to use. You can list the available volume types on your system by using the ``cinder type- list`` command. If you want to specify a volume type, you must also specify a volume size. in: body required: false type: string volume_used: description: | The used space of the volume, in gigabytes (GB). in: body required: false type: float ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/quotas.inc0000644000175000017500000000341200000000000021001 0ustar00coreycorey00000000000000.. -*- rst -*- ====== Quotas ====== Show resources limit for current project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/limits Get resources quota and rate limit for the current project. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Example ---------------- .. literalinclude:: samples/limit-show-response.json :language: javascript Show resources quota for a specific project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/mgmt/quotas/{user_project} Admin only action by default. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - user_project: user_project Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quotas: quota_list - resource: quota_resource - in_use: quota_in_use - limit: quota_limit - reserved: quota_reserved Response Example ---------------- .. literalinclude:: samples/quota-show-response.json :language: javascript Update resources quota for a specific project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v1.0/{project_id}/mgmt/quotas/{user_project} Admin only action by default. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - user_project: user_project - quotas: quotas Request Example --------------- .. literalinclude:: samples/quota-update.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quotas: quotas Response Example ---------------- .. literalinclude:: samples/quota-update.json :language: javascript ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.676109 trove-12.1.0.dev92/api-ref/source/samples/0000755000175000017500000000000000000000000020436 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/backup-create-request.json0000644000175000017500000000025700000000000025531 0ustar00coreycorey00000000000000{ "backup": { "description": "My Backup", "incremental": 0, "instance": "44b277eb-39be-4921-be31-3d61b43651d7", "name": "snapshot" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/backup-create-response.json0000644000175000017500000000104700000000000025675 0ustar00coreycorey00000000000000{ "backup": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Backup", "id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": null, "name": "snapshot", "parent_id": null, "size": null, "status": "NEW", "updated": "2014-10-30T12:30:00" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/backup-get-response.json0000644000175000017500000000111200000000000025202 0ustar00coreycorey00000000000000{ "backup": { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Backup", "id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "snapshot", "parent_id": null, "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/backup-list-response.json0000644000175000017500000000252600000000000025410 0ustar00coreycorey00000000000000{ "backups": [ { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Backup", "id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "snapshot", "parent_id": null, "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" }, { "created": "2014-10-30T12:30:00", "datastore": { "type": "mysql", "version": "5.5", "version_id": "b00000b0-00b0-0b00-00b0-000b000000bb" }, "description": "My Incremental Backup", "id": "2e351a71-dd28-4bcb-a7d6-d36a5b487173", "instance_id": "44b277eb-39be-4921-be31-3d61b43651d7", "locationRef": "http://localhost/path/to/backup", "name": "Incremental Snapshot", "parent_id": "a9832168-7541-4536-b8d9-a8a9b79cf1b4", "size": 0.14, "status": "COMPLETED", "updated": "2014-10-30T12:30:00" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/config-group-create-request.json0000644000175000017500000000027400000000000026662 0ustar00coreycorey00000000000000{ "configuration": { "datastore": { "type": "mysql" }, "values": { "sync_binlog": 1 }, "name": "group1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/config-group-create-response.json0000644000175000017500000000060100000000000027022 0ustar00coreycorey00000000000000{ "updated": "2015-07-01T16:38:27", "name": "group1", "created": "2015-07-01T16:38:27", "instance_count": 0, "values": { "sync_binlog": 1 }, "datastore_version_id": "2dc7faa0-efff-4c2b-8cff-bcd949c518a5", "id": "2aa51628-5c42-4086-8682-137caffd2ba6", "datastore_name": "mysql", "datastore_version_name": "5.6", "description": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/config-group-list-instances-response.json0000644000175000017500000000021200000000000030515 0ustar00coreycorey00000000000000{ "instances": [ { "id": "7fd2d1d6-a2ef-4a76-8c03-e233db4d86da", "name": "master_1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/config-group-patch-request.json0000644000175000017500000000014100000000000026507 0ustar00coreycorey00000000000000{ "configuration": { "values": { "connect_timeout": 17 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/config-group-put-request.json0000644000175000017500000000024700000000000026227 0ustar00coreycorey00000000000000{ "configuration": { "values": { "connect_timeout": 18 }, "name": "new_name", "description": "New description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/config-group-show-response.json0000644000175000017500000000072300000000000026544 0ustar00coreycorey00000000000000{ "configuration": { "datastore_name": "mysql", "updated": "2015-11-22T19:07:20", "values": { "connect_timeout": 17 }, "name": "group1", "created": "2015-11-20T20:51:24", "datastore_version_name": "5.6", "instance_count": 1, "id": "1c8a4fdd-690c-4e6e-b2e1-148b8d738770", "datastore_version_id": "b9f97132-467b-4f8e-b12d-947cfc223ac3", "description": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/config-groups-list-response.json0000644000175000017500000000065200000000000026723 0ustar00coreycorey00000000000000{ "configurations": [ { "datastore_name": "mysql", "updated": "2015-07-01T16:38:27", "name": "group1", "created": "2015-07-01T16:38:27", "datastore_version_name": "5.6", "id": "2aa51628-5c42-4086-8682-137caffd2ba6", "datastore_version_id": "2dc7faa0-efff-4c2b-8cff-bcd949c518a5", "description": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/databases-create-request.json0000644000175000017500000000041200000000000026204 0ustar00coreycorey00000000000000{ "databases": [ { "character_set": "utf8", "collate": "utf8_general_ci", "name": "databaseA" }, { "name": "databaseB" }, { "name": "databaseC" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/databases-list-response.json0000644000175000017500000000027300000000000026067 0ustar00coreycorey00000000000000{ "databases": [ { "name": "databaseB" }, { "name": "databaseC" }, { "name": "databaseD" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-list-response.json0000644000175000017500000000744100000000000026132 0ustar00coreycorey00000000000000{ "datastores": [ { "default_version": "9e46a201-e92e-4ae0-af89-4af12a21bb45", "id": "75dc45e0-2c3c-47ee-a5b8-5213288e3fe2", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/75dc45e0-2c3c-47ee-a5b8-5213288e3fe2", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/75dc45e0-2c3c-47ee-a5b8-5213288e3fe2", "rel": "bookmark" } ], "name": "mariadb", "versions": [ { "id": "9e46a201-e92e-4ae0-af89-4af12a21bb45", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/versions/9e46a201-e92e-4ae0-af89-4af12a21bb45", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/versions/9e46a201-e92e-4ae0-af89-4af12a21bb45", "rel": "bookmark" } ], "name": "10.4" } ] }, { "default_version": "b3d5c099-dbd5-4518-baa3-7c7c195671bf", "id": "3bf89e46-c3ed-4db6-a423-6e6709ec21ef", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/3bf89e46-c3ed-4db6-a423-6e6709ec21ef", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/3bf89e46-c3ed-4db6-a423-6e6709ec21ef", "rel": "bookmark" } ], "name": "mysql", "versions": [ { "id": "b3d5c099-dbd5-4518-baa3-7c7c195671bf", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/versions/b3d5c099-dbd5-4518-baa3-7c7c195671bf", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/versions/b3d5c099-dbd5-4518-baa3-7c7c195671bf", "rel": "bookmark" } ], "name": "5.7" } ] }, { "id": "cc9ee471-e781-43bf-a796-423c5d549997", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/cc9ee471-e781-43bf-a796-423c5d549997", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/cc9ee471-e781-43bf-a796-423c5d549997", "rel": "bookmark" } ], "name": "postgresql", "versions": [ { "id": "4eb0179d-fe11-4556-9422-5267d2fc7625", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "bookmark" } ], "name": "12" } ] } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-show-response.json0000644000175000017500000000215400000000000026133 0ustar00coreycorey00000000000000{ "datastore": { "id": "cc9ee471-e781-43bf-a796-423c5d549997", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/cc9ee471-e781-43bf-a796-423c5d549997", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/cc9ee471-e781-43bf-a796-423c5d549997", "rel": "bookmark" } ], "name": "postgresql", "versions": [ { "id": "4eb0179d-fe11-4556-9422-5267d2fc7625", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "bookmark" } ], "name": "12" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-create-request.json0000644000175000017500000000037500000000000027736 0ustar00coreycorey00000000000000{ "version": { "datastore_name": "mysql", "datastore_manager": "mysql", "name": "test", "image": "58b83318-cb18-4189-8d89-a015dc3839dd", "active": true, "default": false, "packages": [] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-list-response.json0000644000175000017500000000117600000000000027614 0ustar00coreycorey00000000000000{ "versions": [ { "datastore": "cc9ee471-e781-43bf-a796-423c5d549997", "id": "4eb0179d-fe11-4556-9422-5267d2fc7625", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "bookmark" } ], "name": "12" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-mgmt-list-response.json0000644000175000017500000000162400000000000030554 0ustar00coreycorey00000000000000{ "versions": [ { "active": true, "datastore_id": "75dc45e0-2c3c-47ee-a5b8-5213288e3fe2", "datastore_manager": "mariadb", "datastore_name": "mariadb", "default": false, "id": "385fa391-f6f1-4444-9d80-7dc3b2188fa3", "image": "42706631-3b76-4d1c-95c9-6a85e72eebda", "name": "10.4-dev-train", "packages": [ "" ] }, { "active": true, "datastore_id": "cc9ee471-e781-43bf-a796-423c5d549997", "datastore_manager": "postgresql", "datastore_name": "postgresql", "default": false, "id": "4eb0179d-fe11-4556-9422-5267d2fc7625", "image": "2eb8005f-270c-492f-977e-3c041a622ef7", "name": "12", "packages": [ "" ] } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-mgmt-patch-request.json0000644000175000017500000000011300000000000030522 0ustar00coreycorey00000000000000{ "image": "42706631-3b76-4d1c-95c9-6a85e72eebda", "active": true }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-mgmt-show-response.json0000644000175000017500000000063100000000000030556 0ustar00coreycorey00000000000000{ "version": { "active": true, "datastore_id": "75dc45e0-2c3c-47ee-a5b8-5213288e3fe2", "datastore_manager": "mariadb", "datastore_name": "mariadb", "default": false, "id": "385fa391-f6f1-4444-9d80-7dc3b2188fa3", "image": "42706631-3b76-4d1c-95c9-6a85e72eebda", "name": "10.4-dev-train", "packages": [ "" ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-parameter-create-request.json0000644000175000017500000000027600000000000031714 0ustar00coreycorey00000000000000{ "configuration-parameter": { "name": "connect_timeout", "data_type": "integer", "min_size": 64, "max_size": 65535, "restart_required": 0 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-parameter-create-response.json0000644000175000017500000000045000000000000032054 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "datastore_version_id": "4eb0179d-fe11-4556-9422-5267d2fc7625", "max": 65535, "min": 64, "name": "connect_timeout", "restart_required": false, "type": "integer" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-parameter-list-response.json0000644000175000017500000000204400000000000031565 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "datastore_version_id": "b3d5c099-dbd5-4518-baa3-7c7c195671bf", "max": 1, "min": 0, "name": "autocommit", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b3d5c099-dbd5-4518-baa3-7c7c195671bf", "max": 65535, "min": 1, "name": "auto_increment_increment", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b3d5c099-dbd5-4518-baa3-7c7c195671bf", "max": 65535, "min": 1, "name": "auto_increment_offset", "restart_required": false, "type": "integer" }, { "datastore_version_id": "b3d5c099-dbd5-4518-baa3-7c7c195671bf", "max": 31536000, "min": 1, "name": "wait_timeout", "restart_required": false, "type": "integer" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-parameter-show-response.json0000644000175000017500000000027300000000000031574 0ustar00coreycorey00000000000000{ "datastore_version_id": "b3d5c099-dbd5-4518-baa3-7c7c195671bf", "max": 31536000, "min": 1, "name": "wait_timeout", "restart_required": false, "type": "integer" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-parameter-update-request.json0000644000175000017500000000027600000000000031733 0ustar00coreycorey00000000000000{ "configuration-parameter": { "name": "connect_timeout", "data_type": "integer", "min_size": 64, "max_size": 65535, "restart_required": 1 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-parameter-update-response.json0000644000175000017500000000035700000000000032101 0ustar00coreycorey00000000000000{ "datastore_version_id": "4eb0179d-fe11-4556-9422-5267d2fc7625", "deleted": 0, "deleted_at": null, "max_size": 65535, "min_size": 64, "name": "connect_timeout", "restart_required": true, "type": "integer" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/datastore-version-show-response.json0000644000175000017500000000106500000000000027616 0ustar00coreycorey00000000000000{ "version": { "datastore": "cc9ee471-e781-43bf-a796-423c5d549997", "id": "4eb0179d-fe11-4556-9422-5267d2fc7625", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "self" }, { "href": "https://127.0.0.1:8779/datastores/versions/4eb0179d-fe11-4556-9422-5267d2fc7625", "rel": "bookmark" } ], "name": "12" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-action-eject-replica-request.json0000644000175000017500000000004300000000000030600 0ustar00coreycorey00000000000000{ "eject_replica_source": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-action-promote-replica-request.json0000644000175000017500000000005000000000000031171 0ustar00coreycorey00000000000000{ "promote_to_replica_source": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-action-reset-status-request.json0000644000175000017500000000003200000000000030532 0ustar00coreycorey00000000000000{ "reset_status": {} }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-action-resize-request.json0000644000175000017500000000006300000000000027374 0ustar00coreycorey00000000000000{ "resize": { "flavorRef": "2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-action-resize-volume-request.json0000644000175000017500000000011600000000000030700 0ustar00coreycorey00000000000000{ "resize": { "volume": { "size": 4 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-action-restart-request.json0000644000175000017500000000002600000000000027556 0ustar00coreycorey00000000000000{ "restart": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-backup-list-response.json0000644000175000017500000000137300000000000027211 0ustar00coreycorey00000000000000{ "backups": [ { "created": "2019-12-23T22:38:42", "datastore": { "type": "mysql", "version": "5.7", "version_id": "b3d5c099-dbd5-4518-baa3-7c7c195671bf" }, "description": null, "id": "71557643-2245-43ac-b871-3d5a50af21d3", "instance_id": "7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "locationRef": "http://127.0.0.1:8080/v1/AUTH_9f8dd5eacb074c9f87d2d822c9092aa5/database_backups/71557643-2245-43ac-b871-3d5a50af21d3.xbstream.gz.enc", "name": "backup-full", "parent_id": null, "size": 0.12, "status": "COMPLETED", "updated": "2019-12-23T22:38:45" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-configuration-list-response.json0000644000175000017500000000315700000000000030615 0ustar00coreycorey00000000000000{ "instance": { "configuration": { "basedir": "/usr", "connect_timeout": 15, "datadir": "/var/lib/mysql/data", "default_storage_engine": "innodb", "innodb_buffer_pool_size": "2401M", "innodb_data_file_path": "ibdata1:10M:autoextend", "innodb_file_per_table": 1, "innodb_log_buffer_size": "25M", "innodb_log_file_size": "50M", "innodb_log_files_in_group": 2, "join_buffer_size": "1M", "key_buffer_size": "800M", "local-infile": 0, "max_allowed_packet": "16392K", "max_connections": 1600, "max_heap_table_size": "256M", "max_user_connections": 1600, "myisam-recover-options": "BACKUP,FORCE", "open_files_limit": 8196, "performance_schema": "ON", "pid-file": "/var/run/mysqld/mysqld.pid", "port": 3306, "query_cache_limit": "1M", "query_cache_size": "128M", "query_cache_type": 1, "read_buffer_size": "512K", "read_rnd_buffer_size": "512K", "server_id": 1468542390, "skip-external-locking": 1, "socket": "/var/run/mysqld/mysqld.sock", "sort_buffer_size": "1M", "table_definition_cache": 4098, "table_open_cache": 4098, "thread_cache_size": 64, "thread_stack": "192K", "tmp_table_size": "256M", "tmpdir": "/var/tmp", "user": "mysql", "wait_timeout": 120 } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-create-request.json0000644000175000017500000000127000000000000026064 0ustar00coreycorey00000000000000{ "instance": { "name": "test", "flavorRef": 1, "volume": {"size": 2}, "nics": [{"net-id": "a5330d7d-0e8c-48b4-9f6c-0f2c4ab1b854"}], "datastore": { "type": "mysql", "version": "5.7" }, "databases": [ { "character_set": "utf8", "collate": "utf8_general_ci", "name": "sampledb" }, { "name": "nextround" } ], "users": [ { "databases": [ { "name": "sampledb" } ], "name": "demouser", "password": "demopassword" } ], "access": { "is_public": true, "allowed_cidrs": ["202.78.240.0/24"] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-create-response.json0000644000175000017500000000232100000000000026230 0ustar00coreycorey00000000000000{ "instance": { "created": "2019-12-23T20:53:38", "datastore": { "type": "mysql", "version": "5.7" }, "flavor": { "id": "6", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/flavors/6", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/6", "rel": "bookmark" } ] }, "id": "b76a6a76-748b-4064-adec-4c9e6c9abd68", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/instances/b76a6a76-748b-4064-adec-4c9e6c9abd68", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/b76a6a76-748b-4064-adec-4c9e6c9abd68", "rel": "bookmark" } ], "name": "test", "region": "RegionOne", "service_status_updated": "2019-12-23T20:53:38", "status": "BUILD", "updated": "2019-12-23T20:53:38", "volume": { "size": 1 } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-list-detail-response.json0000644000175000017500000000603500000000000027206 0ustar00coreycorey00000000000000{ "instances": [ { "created": "2019-12-23T20:58:35", "datastore": { "type": "mysql", "version": "5.7" }, "flavor": { "id": "6", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/flavors/6", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/6", "rel": "bookmark" } ] }, "id": "7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "ip": [ "10.1.0.62", "172.24.5.114" ], "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "bookmark" } ], "name": "test", "region": "RegionOne", "service_status_updated": "2019-12-23T21:01:11", "status": "HEALTHY", "updated": "2019-12-23T20:58:45", "volume": { "size": 1 } }, { "created": "2019-12-23T20:53:38", "datastore": { "type": "mysql", "version": "5.7" }, "fault": { "created": "2019-12-23T20:53:41", "details": null, "message": "Failed to create User port for instance b76a6a76-748b-4064-adec-4c9e6c9abd68" }, "flavor": { "id": "6", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/flavors/6", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/6", "rel": "bookmark" } ] }, "id": "b76a6a76-748b-4064-adec-4c9e6c9abd68", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/instances/b76a6a76-748b-4064-adec-4c9e6c9abd68", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/b76a6a76-748b-4064-adec-4c9e6c9abd68", "rel": "bookmark" } ], "name": "test", "region": "RegionOne", "service_status_updated": "2019-12-23T20:53:38", "status": "ERROR", "updated": "2019-12-23T20:53:41", "volume": { "size": 1 } } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-list-response.json0000644000175000017500000000250200000000000025741 0ustar00coreycorey00000000000000{ "instances": [ { "datastore": { "type": "mysql", "version": "5.7" }, "flavor": { "id": "6", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/flavors/6", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/6", "rel": "bookmark" } ] }, "id": "7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "ip": [ "10.1.0.62", "172.24.5.114" ], "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "bookmark" } ], "name": "test", "region": "RegionOne", "status": "ACTIVE", "volume": { "size": 1 } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-disable-request.json0000644000175000017500000000005400000000000027002 0ustar00coreycorey00000000000000{ "name": "general", "disable": 1 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-disable-response.json0000644000175000017500000000053200000000000027151 0ustar00coreycorey00000000000000{ "log": { "name": "general", "type": "USER", "status": "Disabled", "published": "4096", "pending": "0", "container": "data_logs", "prefix": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general/", "metafile": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general_metafile" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-discard-request.json0000644000175000017500000000005400000000000027010 0ustar00coreycorey00000000000000{ "name": "general", "discard": 1 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-discard-response.json0000644000175000017500000000044200000000000027157 0ustar00coreycorey00000000000000{ "log": { "name": "general", "type": "USER", "status": "Ready", "published": "0", "pending": "128", "container": "None", "prefix": "None", "metafile": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general_metafile" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-enable-request.json0000644000175000017500000000005300000000000026624 0ustar00coreycorey00000000000000{ "name": "general", "enable": 1 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-enable-response.json0000644000175000017500000000044200000000000026774 0ustar00coreycorey00000000000000{ "log": { "name": "general", "type": "USER", "status": "Eanbled", "published": "0", "pending": "0", "container": "None", "prefix": "None", "metafile": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general_metafile" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-list-response.json0000644000175000017500000000132600000000000026523 0ustar00coreycorey00000000000000{ "logs": [ { "name": "general", "type": "USER", "status": "Partial", "published": "128", "pending": "4096", "container": "data_logs", "prefix": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general/", "metafile": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general_metafile" }, { "name": "slow_query", "type": "USER", "status": "Ready", "published": "0", "pending": "128", "container": "None", "prefix": "None", "metafile": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-slow_query_metafile" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-publish-request.json0000644000175000017500000000005400000000000027045 0ustar00coreycorey00000000000000{ "name": "general", "publish": 1 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-publish-response.json0000644000175000017500000000053200000000000027214 0ustar00coreycorey00000000000000{ "log": { "name": "general", "type": "USER", "status": "Published", "published": "128", "pending": "0", "container": "data_logs", "prefix": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general/", "metafile": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general_metafile" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-show-request.json0000644000175000017500000000003200000000000026353 0ustar00coreycorey00000000000000{ "name": "general" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-log-show-response.json0000644000175000017500000000053300000000000026527 0ustar00coreycorey00000000000000{ "log": { "name": "general", "type": "USER", "status": "Partial", "published": "128", "pending": "4096", "container": "data_logs", "prefix": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general/", "metafile": "5e9e616c-1827-45f5-a487-679084d82f7e/mysql-general_metafile" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-mgmt-action-migrate-request.json0000644000175000017500000000007000000000000030463 0ustar00coreycorey00000000000000{ "migrate": { "host": "compute-001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-mgmt-action-reboot-request.json0000644000175000017500000000002400000000000030324 0ustar00coreycorey00000000000000{ "reboot": {} }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-mgmt-action-reset-task-status-request.json0000644000175000017500000000003700000000000032441 0ustar00coreycorey00000000000000{ "reset-task-status": {} }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-mgmt-action-stop-request.json0000644000175000017500000000002200000000000030015 0ustar00coreycorey00000000000000{ "stop": {} }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-mgmt-list-response.json0000644000175000017500000001015600000000000026707 0ustar00coreycorey00000000000000{ "instances": [ { "created": "2019-12-23T20:58:35", "datastore": { "type": "mysql", "version": "5.7" }, "deleted": false, "deleted_at": null, "encrypted_rpc_messaging": true, "flavor": { "id": "d2", "links": [ { "href": "https://127.0.0.1:8779/v1.0/2afa58fd5db34fd8b7b659d997a5341f/flavors/d2", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/d2", "rel": "bookmark" } ] }, "id": "7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "ip": [ "10.1.0.62", "172.24.5.114" ], "links": [ { "href": "https://127.0.0.1:8779/v1.0/2afa58fd5db34fd8b7b659d997a5341f/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2" } ], "name": "test", "region": "RegionOne", "server": { "host": "bdfe3275004d2a98d38d494fd7adfcd0c6d5d5198e86fa0d6ec5d588", "id": "710a84e5-6adf-4a4c-9167-8bade67a5a1c", "name": "test", "status": "ACTIVE", "tenant_id": "27ac4b83e9d045eb8c373bacaa994eb7" }, "server_id": "710a84e5-6adf-4a4c-9167-8bade67a5a1c", "service_status": "HEALTHY", "service_status_updated": "2019-12-24T00:02:55", "status": "HEALTHY", "task_description": "No tasks for the instance.", "tenant_id": "9f8dd5eacb074c9f87d2d822c9092aa5", "updated": "2019-12-23T23:53:46", "volume": { "size": 1 }, "volume_id": "ccb62e29-73ea-4859-b206-3b3a4e30a991" }, { "created": "2019-12-23T20:53:38", "datastore": { "type": "mysql", "version": "5.7" }, "deleted": false, "deleted_at": null, "encrypted_rpc_messaging": true, "fault": { "created": "2019-12-23T20:53:41", "details": "Failed to create User port for instance b76a6a76-748b-4064-adec-4c9e6c9abd68", "message": "Failed to create User port for instance b76a6a76-748b-4064-adec-4c9e6c9abd68" }, "flavor": { "id": "6", "links": [ { "href": "https://127.0.0.1:8779/v1.0/2afa58fd5db34fd8b7b659d997a5341f/flavors/6", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/6", "rel": "bookmark" } ] }, "id": "b76a6a76-748b-4064-adec-4c9e6c9abd68", "links": [ { "href": "https://127.0.0.1:8779/v1.0/2afa58fd5db34fd8b7b659d997a5341f/instances/b76a6a76-748b-4064-adec-4c9e6c9abd68", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/b76a6a76-748b-4064-adec-4c9e6c9abd68", "rel": "bookmark" } ], "name": "test", "region": "RegionOne", "server": null, "server_id": null, "service_status": "NEW", "service_status_updated": "2019-12-23T20:53:38", "status": "ERROR", "task_description": "Build error: Port.", "tenant_id": "9f8dd5eacb074c9f87d2d822c9092aa5", "updated": "2019-12-23T20:53:41", "volume": { "size": 1 }, "volume_id": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-mgmt-show-response.json0000644000175000017500000000724400000000000026720 0ustar00coreycorey00000000000000{ "instance": { "created": "2019-12-23T20:58:35", "datastore": { "type": "mysql", "version": "5.7" }, "deleted": false, "deleted_at": null, "encrypted_rpc_messaging": true, "flavor": { "id": "d2", "links": [ { "href": "https://127.0.0.1:8779/v1.0/2afa58fd5db34fd8b7b659d997a5341f/flavors/d2", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/d2", "rel": "bookmark" } ] }, "guest_status": { "state_description": "healthy" }, "id": "7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "ip": [ "10.1.0.62", "172.24.5.114" ], "links": [ { "href": "https://127.0.0.1:8779/v1.0/2afa58fd5db34fd8b7b659d997a5341f/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "bookmark" } ], "name": "test", "region": "RegionOne", "server": { "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:fc:eb:4a", "OS-EXT-IPS:type": "fixed", "addr": "10.1.0.62", "version": 4 }, { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:fc:eb:4a", "OS-EXT-IPS:type": "floating", "addr": "172.24.5.114", "version": 4 } ], "trove-mgmt": [ { "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:f9:0f:6a", "OS-EXT-IPS:type": "fixed", "addr": "192.168.254.125", "version": 4 } ] }, "host": "bdfe3275004d2a98d38d494fd7adfcd0c6d5d5198e86fa0d6ec5d588", "id": "710a84e5-6adf-4a4c-9167-8bade67a5a1c", "name": "test", "status": "ACTIVE", "tenant_id": "27ac4b83e9d045eb8c373bacaa994eb7" }, "server_id": "710a84e5-6adf-4a4c-9167-8bade67a5a1c", "service_status": "HEALTHY", "service_status_updated": "2019-12-24T00:08:55", "status": "HEALTHY", "task_description": "No tasks for the instance.", "tenant_id": "9f8dd5eacb074c9f87d2d822c9092aa5", "updated": "2019-12-23T23:53:46", "volume": { "attachments": [ { "attached_at": "2019-12-23T23:11:39.000000", "attachment_id": "a31526ba-884a-4e1f-8c64-4dc4b987ba4e", "device": "/dev/vdb", "host_name": "node.trove.magnum-pg0.utah.cloudlab.us", "id": "ccb62e29-73ea-4859-b206-3b3a4e30a991", "server_id": "710a84e5-6adf-4a4c-9167-8bade67a5a1c", "volume_id": "ccb62e29-73ea-4859-b206-3b3a4e30a991" } ], "availability_zone": "nova", "created_at": "2019-12-23T20:58:39.000000", "id": "ccb62e29-73ea-4859-b206-3b3a4e30a991", "size": 1, "status": "in-use", "total": 0.95, "used": 0.12 }, "volume_id": "ccb62e29-73ea-4859-b206-3b3a4e30a991" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-patch-detach-replica-request.json0000644000175000017500000000012100000000000030555 0ustar00coreycorey00000000000000{ "instance": { "replica_of": null, "slave_of": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-patch-update-name-request.json0000644000175000017500000000007600000000000030121 0ustar00coreycorey00000000000000{ "instance": { "name": "sample_instance" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-patch-upgrade-datastore-version-request.json0000644000175000017500000000012400000000000033011 0ustar00coreycorey00000000000000{ "instance": { "datastore_version": "sample_datastore_version" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-put-attach-config-group-request.json0000644000175000017500000000013400000000000031266 0ustar00coreycorey00000000000000{ "instance": { "configuration": "2aa51628-5c42-4086-8682-137caffd2ba6" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/instance-show-response.json0000644000175000017500000000247400000000000025756 0ustar00coreycorey00000000000000{ "instance": { "created": "2019-12-23T20:58:35", "datastore": { "type": "mysql", "version": "5.7" }, "flavor": { "id": "6", "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/flavors/6", "rel": "self" }, { "href": "https://127.0.0.1:8779/flavors/6", "rel": "bookmark" } ] }, "id": "7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "ip": [ "10.1.0.62", "172.24.5.114" ], "links": [ { "href": "https://127.0.0.1:8779/v1.0/9f8dd5eacb074c9f87d2d822c9092aa5/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "self" }, { "href": "https://127.0.0.1:8779/instances/7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "rel": "bookmark" } ], "name": "test", "region": "RegionOne", "service_status_updated": "2019-12-23T22:15:11", "status": "HEALTHY", "updated": "2019-12-23T20:58:45", "volume": { "size": 1, "used": 0.12 } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/limit-show-response.json0000644000175000017500000000253200000000000025263 0ustar00coreycorey00000000000000{ "limits": [ { "max_backups": 50, "max_instances": 10, "max_volumes": 40, "verb": "ABSOLUTE" }, { "nextAvailable": "2019-12-23T22:38:41Z", "regex": ".*", "remaining": 199, "unit": "MINUTE", "uri": "*", "value": 200, "verb": "POST" }, { "nextAvailable": "2019-12-24T02:49:24Z", "regex": ".*", "remaining": 199, "unit": "MINUTE", "uri": "*", "value": 200, "verb": "PUT" }, { "nextAvailable": "2019-12-24T03:42:19Z", "regex": ".*", "remaining": 200, "unit": "MINUTE", "uri": "*", "value": 200, "verb": "DELETE" }, { "nextAvailable": "2019-12-24T03:42:19Z", "regex": ".*", "remaining": 199, "unit": "MINUTE", "uri": "*", "value": 200, "verb": "GET" }, { "nextAvailable": "2019-12-24T03:42:19Z", "regex": "^/mgmt", "remaining": 200, "unit": "MINUTE", "uri": "*/mgmt", "value": 200, "verb": "POST" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/api-ref/source/samples/quota-show-response.json0000644000175000017500000000064600000000000025302 0ustar00coreycorey00000000000000{ "quotas": [ { "in_use": 5, "limit": 15, "reserved": 0, "resource": "instances" }, { "in_use": 2, "limit": 50, "reserved": 0, "resource": "backups" }, { "in_use": 1, "limit": 40, "reserved": 0, "resource": "volumes" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/api-ref/source/samples/quota-update.json0000644000175000017500000000006100000000000023737 0ustar00coreycorey00000000000000{ "quotas": { "instances": 20 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-check-root-response.json0000644000175000017500000000003400000000000026174 0ustar00coreycorey00000000000000{ "rootEnabled": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-create-request.json0000644000175000017500000000112500000000000025235 0ustar00coreycorey00000000000000{ "users": [ { "databases": [ { "name": "databaseA" } ], "name": "dbuser1", "password": "password" }, { "databases": [ { "name": "databaseB" }, { "name": "databaseC" } ], "name": "dbuser2", "password": "password" }, { "name": "dbuser3", "password": "password" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-enable-root-response.json0000644000175000017500000000015300000000000026347 0ustar00coreycorey00000000000000{ "user": { "name": "root", "password": "q5BXXMUMbc8XBf5kkVToKZW4Kx3Egv5VjY0P" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-grant-databases-access-request.json0000644000175000017500000000011200000000000030264 0ustar00coreycorey00000000000000{ "databases":[ { "name": "test" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-list-response.json0000644000175000017500000000112100000000000025107 0ustar00coreycorey00000000000000{ "users": [ { "databases": [ { "name": "databaseA" } ], "host": "%", "name": "dbuser1" }, { "databases": [ { "name": "databaseB" }, { "name": "databaseC" } ], "host": "%", "name": "dbuser2" }, { "databases": [], "host": "%", "name": "dbuser3" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-put-request.json0000644000175000017500000000016700000000000024607 0ustar00coreycorey00000000000000{ "user": { "name": "new_name", "password": "new_password", "host": "192.168.30.98" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-show-response.json0000644000175000017500000000025000000000000025116 0ustar00coreycorey00000000000000{ "user": { "databases": [ { "name": "databaseA" } ], "host": "%", "name": "dbuser1" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/user-show-root-history-response.json0000644000175000017500000000026200000000000027601 0ustar00coreycorey00000000000000{ "root_history": { "enabled": "2019-12-24T03:10:16", "id": "7de1bed8-6983-4d46-9a52-0abfbb0d27a2", "user": "7578487d3cc843dca904d2f4eced7dd2" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/users-put-request.json0000644000175000017500000000023400000000000024765 0ustar00coreycorey00000000000000{ "users": [ { "name": "new_name", "password": "another_password", "host": "192.168.30.98" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/samples/versions-response.json0000644000175000017500000000050000000000000025030 0ustar00coreycorey00000000000000{ "versions": [ { "id": "v1.0", "links": [ { "href": "http://127.0.0.1:8779/v1.0/", "rel": "self" } ], "status": "CURRENT", "updated": "2012-08-01T00:00:00Z" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/api-ref/source/users.inc0000644000175000017500000001160500000000000020631 0ustar00coreycorey00000000000000.. -*- rst -*- ===== Users ===== Create user ~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/users Creates users for a database instance. When specifying user name and password, you can also specify databases that the user can access. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/user-create-request.json :language: javascript List database instance users ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/users Lists the users in a database instance and the associated databases for that user. This operation does not return system users. A system user is a database administrator who administers the health of the database. Also, this operation returns the ``root`` user only if it is enabled. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/user-list-response.json :language: javascript Show database instance user ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/users/{user_name} Get a specific user information in an instance. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - user_name: user_name Response Example ---------------- .. literalinclude:: samples/user-show-response.json :language: javascript Delete user ~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/instances/{instanceId}/users/{user_name} Deletes a user for a database instance. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - user_name: user_name Update a user ~~~~~~~~~~~~~ .. rest_method:: PUT /v1.0/{project_id}/instances/{instanceId}/users/{user_name} Modify attributes(e.g. user name, allowed host, password) for a specific user. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId - user_name: user_name Request Example --------------- .. literalinclude:: samples/user-put-request.json :language: javascript Update users passwords ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v1.0/{project_id}/instances/{instanceId}/users Modify passwords for users. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Request Example --------------- .. literalinclude:: samples/users-put-request.json :language: javascript Show root-enabled status for database instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/instances/{instanceId}/root Shows root-enabled status for a database instance. Returns ``true`` if root user is enabled for a database instance. Otherwise, returns ``false``. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/user-check-root-response.json :language: javascript Enable root user ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v1.0/{project_id}/instances/{instanceId}/root Enables the root user for a database instance and returns the root password. This operation generates a root password for the root user and enables the root user to log in from any host. Enabling root user should only be restricted to admin user, as with root user, the cloud user can change the database settings, e.g. remove users which bypassing the Trove API. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/user-enable-root-response.json :language: javascript Disable root user ~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v1.0/{project_id}/instances/{instanceId}/root Disables the root user. Normal response codes: 204 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Show root-enabled history for database instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v1.0/{project_id}/mgmt/instances/{instanceId}/root Admin only API. Show the date and time that root user was enabled(if ever) for an instance. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - instanceId: instanceId Response Example ---------------- .. literalinclude:: samples/user-show-root-history-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/babel.cfg0000644000175000017500000000002100000000000015666 0ustar00coreycorey00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/bindep.txt0000644000175000017500000000167600000000000016163 0ustar00coreycorey00000000000000gettext [doc test] language-pack-en [platform:ubuntu] libffi-dev [platform:dpkg test] libffi-devel [platform:rpm test] libmysqlclient-dev [platform:dpkg] libpq-dev [platform:dpkg test] libsqlite3-dev [platform:dpkg test] libxml2-dev [platform:dpkg test] libxslt-devel [platform:rpm test] libxslt1-dev [platform:dpkg test] locales [platform:debian] mysql [platform:rpm] mysql-client [platform:dpkg] mysql-devel [platform:rpm test] mysql-server pkg-config [platform:dpkg test] pkgconfig [platform:rpm test] postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm test] postgresql-server [platform:rpm] python-dev [platform:dpkg test] python-devel [platform:rpm test] python3-all [platform:dpkg !platform:ubuntu-precise] python3-all-dev [platform:dpkg !platform:ubuntu-precise] python3-devel [platform:fedora] python34-devel [platform:centos] sqlite-devel [platform:rpm test] libpcre3-dev [platform:dpkg test] pcre-devel [platform:rpm test] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.676109 trove-12.1.0.dev92/contrib/0000755000175000017500000000000000000000000015607 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/contrib/trove-guestagent0000755000175000017500000000233000000000000021036 0ustar00coreycorey00000000000000#!/usr/bin/python3 # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This is necessary currently because the guest needs a init script. When the guest is moved out of the application, this will no longer be needed in the project.""" import os import sys possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'trove', '__init__.py')): sys.path.insert(0, possible_topdir) from trove.cmd.guest import main if __name__ == "__main__": sys.exit(main()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.676109 trove-12.1.0.dev92/devstack/0000755000175000017500000000000000000000000015753 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/devstack/README.rst0000644000175000017500000000210200000000000017435 0ustar00coreycorey00000000000000=========================== Enabling Trove in DevStack =========================== To enable Trove in DevStack, perform the following steps: :: Note: The python-troveclient is automatically installed. If you need to control how the client gets installed, set the TROVECLIENT_REPO, TROVECLIENT_DIR and TROVECLIENT_BRANCH environment variables appropriately. Download DevStack ================= .. code-block:: bash export DEVSTACK_DIR=~/devstack git clone https://opendev.org/openstack/devstack.git $DEVSTACK_DIR Enable the Trove plugin ======================= Enable the plugin by adding the following section to ``$DEVSTACK_DIR/local.conf`` .. code-block:: bash [[local|localrc]] enable_plugin trove https://opendev.org/openstack/trove Optionally, a git refspec (branch or tag or commit) may be provided as follows: .. code-block:: bash [[local|localrc]] enable_plugin trove https://opendev.org/openstack/trove Run the DevStack utility ======================== .. code-block:: bash cd $DEVSTACK_DIR ./stack.sh ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.676109 trove-12.1.0.dev92/devstack/files/0000755000175000017500000000000000000000000017055 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/devstack/files/apache-trove-api.template0000644000175000017500000000301300000000000023734 0ustar00coreycorey00000000000000# Copyright 2017 Amrith Kumar. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # Watcher API through mod_wsgi. This version assumes you are # running devstack to configure the software. Listen %TROVE_SERVICE_PORT% WSGIDaemonProcess trove-api user=%USER% processes=%APIWORKERS% threads=1 display-name=%{GROUP} WSGIScriptAlias / %TROVE_WSGI_DIR%/app.wsgi WSGIApplicationGroup %{GLOBAL} WSGIProcessGroup trove-api WSGIPassAuthorization On ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/trove-api.log CustomLog /var/log/%APACHE_NAME%/trove-api-access.log combined WSGIProcessGroup trove-api WSGIApplicationGroup %{GLOBAL} = 2.4> Require all granted Order allow,deny Allow from all ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.676109 trove-12.1.0.dev92/devstack/files/debs/0000755000175000017500000000000000000000000017772 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/devstack/files/debs/trove0000644000175000017500000000003200000000000021047 0ustar00coreycorey00000000000000libxslt1-dev # testonly ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.676109 trove-12.1.0.dev92/devstack/files/rpms/0000755000175000017500000000000000000000000020036 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/devstack/files/rpms/trove0000644000175000017500000000003300000000000021114 0ustar00coreycorey00000000000000libxslt-devel # testonly ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.680109 trove-12.1.0.dev92/devstack/files/rpms-suse/0000755000175000017500000000000000000000000021013 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/devstack/files/rpms-suse/trove0000644000175000017500000000003200000000000022070 0ustar00coreycorey00000000000000libxslt1-dev # testonly ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/devstack/plugin.sh0000644000175000017500000006325500000000000017620 0ustar00coreycorey00000000000000#!/bin/bash # # lib/trove # Functions to control the configuration and operation of the **Trove** service # Dependencies: # ``functions`` file # ``DEST``, ``STACK_USER`` must be defined # ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined # ``stack.sh`` calls the entry points in this order: # # install_trove # install_python_troveclient # configure_trove # init_trove # start_trove # stop_trove # cleanup_trove # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Functions # --------- # Test if any Trove services are enabled # is_trove_enabled function is_trove_enabled { [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0 return 1 } # setup_trove_logging() - Adds logging configuration to conf files function setup_trove_logging { local CONF=$1 iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CONF DEFAULT use_syslog $SYSLOG if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output setup_colorized_logging $CONF DEFAULT tenant user fi } # create_trove_accounts() - Set up common required trove accounts # Tenant User Roles # ------------------------------------------------------------------ # service trove admin # if enabled function create_trove_accounts { if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then create_service_user "trove" "admin" # Add trove user to the clouds.yaml CLOUDS_YAML=${CLOUDS_YAML:-/etc/openstack/clouds.yaml} $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \ --file $CLOUDS_YAML \ --os-cloud trove \ --os-region-name $REGION_NAME \ $CA_CERT_ARG \ --os-auth-url $KEYSTONE_SERVICE_URI \ --os-username trove \ --os-password $SERVICE_PASSWORD \ --os-project-name $SERVICE_PROJECT_NAME local trove_service=$(get_or_create_service "trove" \ "database" "Trove Service") get_or_create_endpoint $trove_service \ "$REGION_NAME" \ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" fi } # Removes all the WSGI related files and restart apache. function cleanup_trove_apache_wsgi { sudo rm -rf $TROVE_WSGI_DIR sudo rm -f $(apache_site_config_for trove-api) restart_apache_server } # stack.sh entry points # --------------------- # cleanup_trove() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_trove { # Clean up dirs rm -fr $TROVE_CONF_DIR/* if is_service_enabled horizon; then cleanup_trove_dashboard fi if [[ "${TROVE_USE_MOD_WSGI}" == "TRUE" ]]; then echo "Cleaning up Trove's WSGI setup" cleanup_trove_apache_wsgi fi } # cleanup_trove_dashboard() - Remove Trove dashboard files from Horizon function cleanup_trove_dashboard { rm -f $HORIZON_DIR/openstack_dashboard/local/enabled/_17*database*.py } # iniset_conditional() - Sets the value in the inifile, but only if it's # actually got a value function iniset_conditional { local FILE=$1 local SECTION=$2 local OPTION=$3 local VALUE=$4 if [[ -n "$VALUE" ]]; then iniset ${FILE} ${SECTION} ${OPTION} ${VALUE} fi } # configure_keystone_token_life() - update the keystone token life to 3h function configure_keystone_token_life() { KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/nova} KEYSTONE_CONF=${KEYSTONE_CONF:-${KEYSTONE_CONF_DIR}/keystone.conf} KEYSTONE_TOKEN_LIFE=${KEYSTONE_TOKEN_LIFE:-10800} iniset $KEYSTONE_CONF token expiration ${KEYSTONE_TOKEN_LIFE} echo "configure_keystone_token_life: setting keystone token life to ${KEYSTONE_TOKEN_LIFE}" echo "configure_keystone_token_life: restarting Keystone" stop_keystone start_keystone } # configure_nova_kvm() - update the nova hypervisor configuration if possible function configure_nova_kvm { cpu="unknown" if [ -e /sys/module/kvm_*/parameters/nested ]; then reconfigure_nova="F" if [ -e /sys/module/kvm_intel/parameters/nested ]; then cpu="Intel" if [[ "$(cat /sys/module/kvm_*/parameters/nested)" == "Y" ]]; then reconfigure_nova="Y" fi elif [ -e /sys/module/kvm_amd/parameters/nested ]; then cpu="AMD" if [[ "$(cat /sys/module/kvm_*/parameters/nested)" == "1" ]]; then reconfigure_nova="Y" fi fi if [ "${reconfigure_nova}" == "Y" ]; then NOVA_CONF_DIR=${NOVA_CONF_DIR:-/etc/nova} NOVA_CONF=${NOVA_CONF:-${NOVA_CONF_DIR}/nova.conf} iniset $NOVA_CONF libvirt cpu_mode "none" iniset $NOVA_CONF libvirt virt_type "kvm" fi fi virt_type=$(iniget $NOVA_CONF libvirt virt_type) echo "configure_nova_kvm: using virt_type: ${virt_type} for cpu: ${cpu}." } # Setup WSGI config files for Trove and enable the site function config_trove_apache_wsgi { local trove_apache_conf sudo mkdir -p ${TROVE_WSGI_DIR} sudo cp $TROVE_DIR/trove/cmd/app.wsgi $TROVE_WSGI_DIR/app.wsgi trove_apache_conf=$(apache_site_config_for trove-api) sudo cp $TROVE_DEVSTACK_FILES/apache-trove-api.template ${trove_apache_conf} sudo sed -e " s|%TROVE_SERVICE_PORT%|${TROVE_SERVICE_PORT}|g; s|%TROVE_WSGI_DIR%|${TROVE_WSGI_DIR}|g; s|%USER%|${STACK_USER}|g; s|%APACHE_NAME%|${APACHE_NAME}|g; s|%APIWORKERS%|${API_WORKERS}|g; " -i ${trove_apache_conf} enable_apache_site trove-api tail_log trove-access /var/log/${APACHE_NAME}/trove-api-access.log tail_log trove-api /var/log/${APACHE_NAME}/trove-api.log } # configure_trove() - Set config files, create data dirs, etc function configure_trove { setup_develop $TROVE_DIR # Temporarily disable re-configuring nova_kvm until # more nodes in the pool can support it without crashing. # configure_nova_kvm configure_keystone_token_life # Create the trove conf dir and cache dirs if they don't exist sudo install -d -o $STACK_USER ${TROVE_CONF_DIR} # Copy api-paste file over to the trove conf dir cp $TROVE_LOCAL_API_PASTE_INI $TROVE_API_PASTE_INI # configure apache related files if [[ "${TROVE_USE_MOD_WSGI}" == "TRUE" ]]; then echo "Configuring Trove to use mod-wsgi and Apache" config_trove_apache_wsgi fi # (Re)create trove conf files rm -f $TROVE_CONF $TROVE_GUESTAGENT_CONF TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION ################################################################ trove conf setup_trove_logging $TROVE_CONF iniset_conditional $TROVE_CONF DEFAULT max_accepted_volume_size $TROVE_MAX_ACCEPTED_VOLUME_SIZE iniset_conditional $TROVE_CONF DEFAULT max_instances_per_tenant $TROVE_MAX_INSTANCES_PER_TENANT iniset_conditional $TROVE_CONF DEFAULT max_volumes_per_tenant $TROVE_MAX_VOLUMES_PER_TENANT iniset_conditional $TROVE_CONF DEFAULT agent_call_low_timeout $TROVE_AGENT_CALL_LOW_TIMEOUT iniset_conditional $TROVE_CONF DEFAULT agent_call_high_timeout $TROVE_AGENT_CALL_HIGH_TIMEOUT iniset_conditional $TROVE_CONF DEFAULT resize_time_out $TROVE_RESIZE_TIME_OUT iniset_conditional $TROVE_CONF DEFAULT usage_timeout $TROVE_USAGE_TIMEOUT iniset_conditional $TROVE_CONF DEFAULT state_change_wait_time $TROVE_STATE_CHANGE_WAIT_TIME iniset_conditional $TROVE_CONF DEFAULT reboot_time_out 300 configure_keystone_authtoken_middleware $TROVE_CONF trove iniset $TROVE_CONF service_credentials username trove iniset $TROVE_CONF service_credentials user_domain_name Default iniset $TROVE_CONF service_credentials project_domain_name Default iniset $TROVE_CONF service_credentials password $SERVICE_PASSWORD iniset $TROVE_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $TROVE_CONF service_credentials region_name $REGION_NAME iniset $TROVE_CONF service_credentials auth_url $TROVE_AUTH_ENDPOINT iniset $TROVE_CONF database connection `database_connection_url trove` iniset $TROVE_CONF DEFAULT rpc_backend "rabbit" iniset $TROVE_CONF DEFAULT control_exchange trove iniset $TROVE_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/ iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS" iniset $TROVE_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE iniset $TROVE_CONF cassandra tcp_ports 7000,7001,7199,9042,9160 iniset $TROVE_CONF couchbase tcp_ports 8091,8092,4369,11209-11211,21100-21199 iniset $TROVE_CONF couchdb tcp_ports 5984 iniset $TROVE_CONF db2 tcp_ports 50000 iniset $TROVE_CONF mariadb tcp_ports 3306,4444,4567,4568 iniset $TROVE_CONF mongodb tcp_ports 2500,27017,27019 iniset $TROVE_CONF mysql tcp_ports 3306 iniset $TROVE_CONF percona tcp_ports 3306 iniset $TROVE_CONF postgresql tcp_ports 5432 iniset $TROVE_CONF pxc tcp_ports 3306,4444,4567,4568 iniset $TROVE_CONF redis tcp_ports 6379,16379 iniset $TROVE_CONF vertica tcp_ports 5433,5434,5444,5450,4803 ################################################################ trove guest agent conf setup_trove_logging $TROVE_GUESTAGENT_CONF iniset_conditional $TROVE_GUESTAGENT_CONF DEFAULT state_change_wait_time $TROVE_STATE_CHANGE_WAIT_TIME iniset_conditional $TROVE_GUESTAGENT_CONF DEFAULT command_process_timeout $TROVE_COMMAND_PROCESS_TIMEOUT iniset $TROVE_GUESTAGENT_CONF DEFAULT rpc_backend "rabbit" iniset $TROVE_GUESTAGENT_CONF DEFAULT transport_url rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$TROVE_HOST_GATEWAY:5672/ iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/ iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log iniset $TROVE_GUESTAGENT_CONF service_credentials username trove iniset $TROVE_GUESTAGENT_CONF service_credentials user_domain_name Default iniset $TROVE_GUESTAGENT_CONF service_credentials project_domain_name Default iniset $TROVE_GUESTAGENT_CONF service_credentials password $SERVICE_PASSWORD iniset $TROVE_GUESTAGENT_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $TROVE_GUESTAGENT_CONF service_credentials region_name $REGION_NAME iniset $TROVE_GUESTAGENT_CONF service_credentials auth_url $TROVE_AUTH_ENDPOINT # 1. To avoid 'Connection timed out' error of sudo command inside the guest agent # 2. Config the controller IP address used by guest-agent to download Trove code during initialization (only valid for dev_mode=true). common_cloudinit=/etc/trove/cloudinit/common.cloudinit sudo mkdir -p $(dirname ${common_cloudinit}) sudo touch ${common_cloudinit} sudo tee ${common_cloudinit} >/dev/null <> ${SSH_DIR}/authorized_keys else # This is to allow guest agent ssh into the controller in dev mode. cat ${SSH_DIR}/id_rsa.pub >> ${SSH_DIR}/authorized_keys sort ${SSH_DIR}/authorized_keys | uniq > ${SSH_DIR}/authorized_keys.uniq mv ${SSH_DIR}/authorized_keys.uniq ${SSH_DIR}/authorized_keys chmod 600 ${SSH_DIR}/authorized_keys fi echo "Creating Trove management keypair ${TROVE_MGMT_KEYPAIR_NAME}" openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} --os-project-name service --os-username trove \ keypair create --public-key ${SSH_DIR}/id_rsa.pub ${TROVE_MGMT_KEYPAIR_NAME} iniset $TROVE_CONF DEFAULT nova_keypair ${TROVE_MGMT_KEYPAIR_NAME} } function config_cinder_volume_type { volume_type=$(openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} \ --os-project-name service --os-username trove \ volume type list -c Name -f value | awk 'NR==1 {print}') iniset $TROVE_CONF DEFAULT cinder_volume_type ${volume_type} } function config_mgmt_security_group { local sgid echo "Creating Trove management security group." sgid=$(openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} --os-project-name service --os-username trove security group create ${TROVE_MGMT_SECURITY_GROUP} -f value -c id) # Allow ICMP openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} --os-project-name service --os-username trove \ security group rule create --proto icmp $sgid # Allow SSH openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} --os-project-name service --os-username trove \ security group rule create --protocol tcp --dst-port 22 $sgid iniset $TROVE_CONF DEFAULT management_security_groups $sgid } # Dispatcher for trove plugin if is_service_enabled trove; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Trove" install_trove install_python_troveclient elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then if is_service_enabled key; then create_trove_accounts fi echo_summary "Configuring Trove" configure_trove elif [[ "$1" == "stack" && "$2" == "extra" ]]; then init_trove_db config_nova_keypair config_cinder_volume_type config_mgmt_security_group config_trove_network create_guest_image echo_summary "Starting Trove" start_trove # Guarantee the file permission in the trove code repo in order to # download trove code from trove-guestagent. sudo chown -R $STACK_USER:$STACK_USER "$DEST/trove" elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then echo_summary "Configuring Tempest for Trove" configure_tempest_for_trove fi if [[ "$1" == "unstack" ]]; then stop_trove cleanup_trove fi fi # Restore xtrace $XTRACE # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/devstack/settings0000644000175000017500000001035600000000000017543 0ustar00coreycorey00000000000000# Settings needed for Trove plugin # -------------------------------- # Set up default directories TROVE_DIR=${TROVE_DIR:-${DEST}/trove} TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} TROVE_BRANCH=${TROVE_BRANCH:-master} TROVE_CLIENT_DIR=${TROVE_CLIENT_DIR:-${TROVECLIENT_DIR:-${DEST}/python-troveclient}} TROVE_CLIENT_REPO=${TROVE_CLIENT_REPO:-${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git}} TROVE_CLIENT_BRANCH=${TROVE_CLIENT_BRANCH:-${TROVECLIENT_BRANCH:-master}} TROVE_DASHBOARD_DIR=${TROVE_DASHBOARD_DIR:-${DEST}/trove-dashboard} TROVE_DASHBOARD_REPO=${TROVE_DASHBOARD_REPO:-${GIT_BASE}/openstack/trove-dashboard.git} TROVE_DASHBOARD_BRANCH=${TROVE_DASHBOARD_BRANCH:-master} TRIPLEO_IMAGES_DIR=${TRIPLEO_IMAGES_DIR:-${DEST}/tripleo-image-elements} TRIPLEO_IMAGES_REPO=${TRIPLEO_IMAGES_REPO:-${GIT_BASE}/openstack/tripleo-image-elements.git} TRIPLEO_IMAGES_BRANCH=${TRIPLEO_IMAGES_BRANCH:-master} # Set up configuration directory and files TROVE_CONF_DIR=${TROVE_CONF_DIR:-/etc/trove} TROVE_CONF=${TROVE_CONF:-${TROVE_CONF_DIR}/trove.conf} TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-${TROVE_CONF_DIR}/trove-guestagent.conf} TROVE_API_PASTE_INI=${TROVE_API_PASTE_INI:-${TROVE_CONF_DIR}/api-paste.ini} TROVE_LOCAL_CONF_DIR=${TROVE_LOCAL_CONF_DIR:-${TROVE_DIR}/etc/trove} TROVE_LOCAL_API_PASTE_INI=${TROVE_LOCAL_API_PASTE_INI:-${TROVE_LOCAL_CONF_DIR}/api-paste.ini} TROVE_LOCAL_POLICY_JSON=${TROVE_LOCAL_POLICY_JSON:-${TROVE_LOCAL_CONF_DIR}/policy.json} TROVE_IMAGE_OS=${TROVE_IMAGE_OS:-"ubuntu"} TROVE_IMAGE_OS_RELEASE=${TROVE_IMAGE_OS_RELEASE:-"xenial"} TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"} if [[ "$DISTRO" == "xenial" || "$DISTRO" == "bionic" ]]; then TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.7"} TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.7"} else TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"} TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.6"} fi # Configuration values listed here for reference TROVE_MAX_ACCEPTED_VOLUME_SIZE=${TROVE_MAX_ACCEPTED_VOLUME_SIZE} TROVE_MAX_INSTANCES_PER_TENANT=${TROVE_MAX_INSTANCES_PER_TENANT} TROVE_MAX_VOLUMES_PER_TENANT=${TROVE_MAX_VOLUMES_PER_TENANT} TROVE_AGENT_CALL_LOW_TIMEOUT=${TROVE_AGENT_CALL_LOW_TIMEOUT} TROVE_AGENT_CALL_HIGH_TIMEOUT=${TROVE_AGENT_CALL_HIGH_TIMEOUT:-1200} TROVE_RESIZE_TIME_OUT=${TROVE_RESIZE_TIME_OUT} TROVE_USAGE_TIMEOUT=${TROVE_USAGE_TIMEOUT:-900} TROVE_STATE_CHANGE_WAIT_TIME=${TROVE_STATE_CHANGE_WAIT_TIME} TROVE_COMMAND_PROCESS_TIMEOUT=${TROVE_COMMAND_PROCESS_TIMEOUT:-60} # Set up the host gateway if is_service_enabled neutron; then TROVE_HOST_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} TROVE_MGMT_NETWORK_NAME=${TROVE_MGMT_NETWORK_NAME:-"trove-mgmt"} TROVE_MGMT_SUBNET_NAME=${TROVE_MGMT_SUBNET_NAME:-${TROVE_MGMT_NETWORK_NAME}-subnet} TROVE_MGMT_SUBNET_RANGE=${TROVE_MGMT_SUBNET_RANGE:-"192.168.254.0/24"} TROVE_MGMT_SUBNET_START=${TROVE_MGMT_SUBNET_START:-"192.168.254.2"} TROVE_MGMT_SUBNET_END=${TROVE_MGMT_SUBNET_END:-"192.168.254.200"} else TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} fi TROVE_SHARE_NETWORKS=$(trueorfalse TRUE TROVE_SHARE_NETWORKS) TROVE_MGMT_KEYPAIR_NAME=${TROVE_MGMT_KEYPAIR_NAME:-"trove-mgmt"} TROVE_MGMT_SECURITY_GROUP=${TROVE_MGMT_SECURITY_GROUP:-"trove-mgmt"} # Support entry points installation of console scripts if [[ -d $TROVE_DIR/bin ]]; then TROVE_BIN_DIR=$TROVE_DIR/bin else TROVE_BIN_DIR=$(get_python_exec_prefix) fi TROVE_MANAGE=$TROVE_BIN_DIR/trove-manage # By default enable Trove API behind mod-wsgi. Change this to FALSE # if you don't want Apache/mod-wsgi TROVE_USE_MOD_WSGI=$(trueorfalse TRUE TROVE_USE_MOD_WSGI) TROVE_SERVICE_PORT=${TROVE_SERVICE_PORT:-8779} TROVE_DEVSTACK_DIR=${TROVE_DIR}/devstack TROVE_DEVSTACK_FILES=${TROVE_DEVSTACK_DIR}/files TROVE_WSGI_DIR=${TROVE_WSGI_DIR:-/var/www/trove} enable_service trove tr-api tr-tmgr tr-cond # Trove CI tests server group anti-affinity policies and therefore needs # Nova to use a single MQ for the computes to talk to the scheduler. CELLSV2_SETUP=singleconductor # Enable or disable the Trove guest image build during devstack installation. TROVE_ENABLE_IMAGE_BUILD=${TROVE_ENABLE_IMAGE_BUILD:-"true"} TROVE_NON_DEV_IMAGE_URL_MYSQL=${TROVE_NON_DEV_IMAGE_URL_MYSQL:-""} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6481085 trove-12.1.0.dev92/doc/0000755000175000017500000000000000000000000014714 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.680109 trove-12.1.0.dev92/doc/source/0000755000175000017500000000000000000000000016214 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.680109 trove-12.1.0.dev92/doc/source/admin/0000755000175000017500000000000000000000000017304 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/admin/building_guest_images.rst0000644000175000017500000001561100000000000024373 0ustar00coreycorey00000000000000.. _build_guest_images: .. role:: bash(code) :language: bash ========================================= Building Guest Images for OpenStack Trove ========================================= .. If section numbers are desired, unindent this .. sectnum:: .. If a TOC is desired, unindent this .. contents:: Overview ======== When Trove receives a command to create a database instance, it does so by launching a Nova instance based on the appropriate guest image that is stored in Glance. This document shows you the steps to build the guest images. .. note:: For testing purpose, the Trove guest images of some specific databases are periodically built and published in http://tarballs.openstack.org/trove/images/ in Trove upstream CI. Additionally, if you install Trove in devstack environment, a MySQL image is created and registered in Glance automatically, unless it's disabled by setting ``TROVE_ENABLE_IMAGE_BUILD=false`` in devstack local.conf file. High Level Overview of a Trove Guest Instance ============================================= At the most basic level, a Trove Guest Instance is a Nova instance launched by Trove in response to a create command. For most of this document, we will confine ourselves to single instance databases; in other words, without the additional complexity of replication or mirroring. Guest instances and Guest images for replicated and mirrored database instances will be addressed specifically in later sections of this document. This section describes the various components of a Trove Guest Instance. ----------------------------- Operating System and Database ----------------------------- A Trove Guest Instance contains at least a functioning Operating System and the database software that the instance wishes to provide (as a Service). For example, if your chosen operating system is Ubuntu and you wish to deliver MySQL version 5.7, then your guest instance is a Nova instance running the Ubuntu operating system and will have MySQL version 5.7 installed on it. ----------------- Trove Guest Agent ----------------- Trove supports multiple databases, some of them are relational (RDBMS) and some are non-relational (NoSQL). In order to provide a common management interface to all of these, the Trove Guest Instance has on it a 'Guest Agent'. The Trove Guest Agent is a component of the Trove system that is specific to the database running on that Guest Instance. The purpose of the Trove Guest Agent is to implement the Trove Guest Agent API for the specific database. This includes such things as the implementation of the database 'start' and 'stop' commands. The Trove Guest Agent API is the common API used by Trove to communicate with any guest database, and the Guest Agent is the implementation of that API for the specific database. The Trove Guest Agent runs inside the Trove Guest Instance. ------------------------------------------ Injected Configuration for the Guest Agent ------------------------------------------ When TaskManager launches the guest VM it injects config files into the VM, including: * ``/etc/trove/conf.d/guest_info.conf``: Contains some information about the guest, e.g. the guest identifier, the tenant ID, etc. * ``/etc/trove/conf.d/trove-guestagent.conf``: The config file for the guest agent service. ------------------------------ Persistent Storage, Networking ------------------------------ The database stores data on persistent storage on Cinder (if ``CONF.volume_support=True``) or ephemeral storage on the Nova instance. The database service is accessible over the tenant network provided when creating the database instance. The cloud administrator is able to config management networks(``CONF.management_networks``) that is invisible to the cloud tenants, but used for communication between database instance and the control plane services(e.g. the message queue). Building Guest Images ===================== ----------------------------- Build images using trovestack ----------------------------- ``trovestack`` is the recommended tooling provided by Trove community to build the guest images. Before running ``trovestack`` command, go to the scripts folder: .. code-block:: console git clone https://opendev.org/openstack/trove cd trove/integration/scripts The trove guest agent image could be created by running the following command: .. code-block:: console $ ./trovestack build-image \ ${datastore_type} \ ${guest_os} \ ${guest_os_release} \ ${dev_mode} \ ${guest_username} \ ${imagepath} * Currently, only ``guest_os=ubuntu`` and ``guest_os_release=xenial`` are fully tested and supported. * Default input values: .. code-block:: ini datastore_type=mysql guest_os=ubuntu guest_os_release=xenial dev_mode=true guest_username=ubuntu imagepath=$HOME/images/trove-${guest_os}-${guest_os_release}-${datastore_type} * ``dev_mode=true`` is mainly for testing purpose for trove developers and it's necessary to build the image on the trove controller host, because the host and the guest VM need to ssh into each other without password. In this mode, when the trove guest agent code is changed, the image doesn't need to be rebuilt which is convenient for debugging. Trove guest agent will ssh into the controller node and download trove code during the service initialization. * if ``dev_mode=false``, the trove code for guest agent is injected into the image at the building time. Now ``dev_mode=false`` is still in experimental and not considered production ready yet. * Some other global variables: * ``HOST_SCP_USERNAME``: Only used in dev mode, this is the user name used by guest agent to connect to the controller host, e.g. in devstack environment, it should be the ``stack`` user. * ``GUEST_WORKING_DIR``: The place to save the guest image, default value is ``$HOME/images``. * ``TROVE_BRANCH``: Only used in dev mode. The branch name of Trove code repository, by default it's master, use other branches as needed such as stable/train. For example, in order to build a MySQL image for Ubuntu Xenial operating system in development mode: .. code-block:: console $ ./trovestack build-image mysql ubuntu xenial true Once the image build is finished, the cloud administrator needs to register the image in Glance and register a new datastore or version in Trove using ``trove-manage`` command, e.g. after building an image for MySQL 5.7.1: .. code-block:: console $ openstack image create ubuntu-mysql-5.7.1-dev \ --public \ --disk-format qcow2 \ --container-format bare \ --file ~/images/ubuntu-xenial-mysql.qcow2 $ trove-manage datastore_version_update mysql 5.7.1 mysql $image_id "" 1 If you see anything error or need help for the image creation, please ask help either in ``#openstack-trove`` IRC channel or sending emails to openstack-discuss@lists.openstack.org mailing list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/admin/database_module_usage.rst0000644000175000017500000005724600000000000024351 0ustar00coreycorey00000000000000.. _database_module_usage: ===================================== Create and use modules for a database ===================================== To continue with this document, we recommend that you have installed the Database service and populated your data store with images for the type and versions of databases that you want, and that you can create and access a database. This example shows you how to create and apply modules to a MySQL 5.6 database and redis 3.2.6 database cluster. Create and apply a module to a mysql database ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. **Create the module file and trove module** If you wish to apply a module, you must create the module first and register it with the trove service. A user can not directly apply a module to a trove instance. The module created here is a demo module called ping. It is the basic type made for testing purposes. To create it, it is as simple as the following :command: ``echo`` command: .. code-block:: console $ echo "message=Module.V1" > ping1.dat You can create a test module and mysql database with the module applied by doing the following: .. code-block:: console $ trove module-create mymod ping ping1.dat --live_update \ --datastore mysql +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | apply_order | 5 | | auto_apply | False | | created | 2017-06-02T17:06:21 | | datastore | all | | datastore_id | None | | datastore_version | all | | datastore_version_id | None | | description | None | | id | 0065a8ed-0668-4db5-a4ad-d88d0a166388 | | instance_count | 2 | | is_admin | True | | live_update | True | | md5 | 7f700cc7b99606615f8b51946f6d3228 | | name | mymod | | priority_apply | False | | tenant | eac1e46e5f7840e39012aff46a92073a | | tenant_id | eac1e46e5f7840e39012aff46a92073a | | type | ping | | updated | 2017-06-02T17:06:21 | | visible | True | +----------------------+--------------------------------------+ $ trove create myinst 15 --size 1 --module mymod --datastore mysql +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | created | 2017-06-02T17:22:24 | | datastore | mysql | | datastore_version | 5.6 | | encrypted_rpc_messaging | True | | flavor | 15 | | id | 6221b30c-8292-4378-b624-c7e9b0f8ba9e | | name | myinst | | region | RegionOne | | server_id | None | | status | BUILD | | tenant_id | eac1e46e5f7840e39012aff46a92073a | | updated | 2017-06-02T17:22:24 | | volume | 1 | | volume_id | None | +-------------------------+--------------------------------------+ .. _show_and_list_modules: #. **Show and list modules** You can view the modules on your instance by doing the following: .. code-block:: console $ trove module-query myinst +-------+------+-----------+---------+--------+-----------+------------------------+------------------------+ | Name | Type | Datastore | Version | Status | Message | Created | Updated | +-------+------+-----------+---------+--------+-----------+------------------------+------------------------+ | mymod | ping | all | all | OK | Module.V1 | 2017-06-02 17:23:40.50 | 2017-06-02 17:23:40.50 | +-------+------+-----------+---------+--------+-----------+------------------------+------------------------+ You can count the instances each module is applied to by doing the following: .. code-block:: console $ trove module-instance-count mymod +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | True | 1 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ You can list the instances that have a particular module applied by doing the following: .. code-block:: console $ trove module-instances mymod +--------------------------------------+--------+-----------+-------------------+--------+-----------+------+-----------+----------------------------------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | Tenant ID | +--------------------------------------+--------+-----------+-------------------+--------+-----------+------+-----------+----------------------------------+ | 6221b30c-8292-4378-b624-c7e9b0f8ba9e | myinst | mysql | 5.6 | ACTIVE | 15 | 1 | RegionOne | eac1e46e5f7840e39012aff46a92073a | +--------------------------------------+--------+-----------+-------------------+--------+-----------+------+-----------+----------------------------------+ Updating and creating a second module for a redis cluster ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To update a module you should have another file ready to update the module with: .. code-block:: console $ echo "message=Module.V2" > ping2.dat $ trove module-update mymod --file ping2.dat +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | apply_order | 5 | | auto_apply | False | | created | 2017-06-02T17:06:21 | | datastore | all | | datastore_id | None | | datastore_version | all | | datastore_version_id | None | | description | None | | id | 0065a8ed-0668-4db5-a4ad-d88d0a166388 | | is_admin | True | | live_update | True | | md5 | ba7c204979c8de54be6efb70a17d40b9 | | name | mymod | | priority_apply | False | | tenant | eac1e46e5f7840e39012aff46a92073a | | tenant_id | eac1e46e5f7840e39012aff46a92073a | | type | ping | | updated | 2017-06-02T17:56:12 | | visible | True | +----------------------+--------------------------------------+ Now to show the usage with a redis cluster, create as follows: .. code-block:: console $ trove cluster-create myclust redis 3.2.6 \ --instance=flavor=15,volume=1,module=mymod \ --instance=flavor=15,volume=1,module=mymod \ --instance=flavor=15,volume=1,module=mymod +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2017-06-02T18:00:17 | | datastore | redis | | datastore_version | 3.2.6 | | id | e4d91ca6-5980-430c-94d0-bf7abc63f712 | | instance_count | 3 | | name | myclust | | task_description | Building the initial cluster. | | task_name | BUILDING | | updated | 2017-06-02T18:00:17 | +-------------------+--------------------------------------+ The original :command: ``count`` command will show the first instance, unless the ``--include_clustered`` option is used. You can see the MD5 from each applied module, and you know that the single instance one is not current. .. code-block:: console $ trove module-instance-count mymod +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | False | 1 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ $ trove module-instance-count mymod --include_clustered +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | False | 1 | | mymod | 2017-06-02T18:00:18 | 2017-06-02T18:00:18 | ba7c204979c8de54be6efb70a17d40b9 | True | 3 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ Update the module again. By doing this, it will cause the instances to report their module is not current. .. code-block:: console $ echo "message=Module.V3" > ping3.dat $ trove module-update mymod --file ping3.dat +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | apply_order | 5 | | auto_apply | False | | created | 2017-06-02T17:06:21 | | datastore | all | | datastore_id | None | | datastore_version | all | | datastore_version_id | None | | description | None | | id | 0065a8ed-0668-4db5-a4ad-d88d0a166388 | | is_admin | True | | live_update | True | | md5 | 869744bdd18e306a96c145df562065ab | | name | mymod | | priority_apply | False | | tenant | eac1e46e5f7840e39012aff46a92073a | | tenant_id | eac1e46e5f7840e39012aff46a92073a | | type | ping | | updated | 2017-06-02T18:06:53 | | visible | True | +----------------------+--------------------------------------+ $ trove module-instance-count mymod --include_clustered +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | False | 1 | | mymod | 2017-06-02T18:00:18 | 2017-06-02T18:00:18 | ba7c204979c8de54be6efb70a17d40b9 | False | 3 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ To update an instance in a cluster you can use the :command:`trove module-apply` command: .. code-block:: console $ trove cluster-instances myclust +--------------------------------------+------------------+-----------+------+--------+ | ID | Name | Flavor ID | Size | Status | +--------------------------------------+------------------+-----------+------+--------+ | 393462d5-906d-4214-af0d-538b7f618b2d | myclust-member-2 | 15 | 1 | ACTIVE | | a3fc5326-e1b6-456a-a8b1-08ad6bbb2278 | myclust-member-3 | 15 | 1 | ACTIVE | | cba31d4b-d038-42c2-ab03-56c6c176b49d | myclust-member-1 | 15 | 1 | ACTIVE | +--------------------------------------+------------------+-----------+------+--------+ $ trove module-apply 393462d5-906d-4214-af0d-538b7f618b2d mymod +-------+------+-----------+---------+--------+-----------+ | Name | Type | Datastore | Version | Status | Message | +-------+------+-----------+---------+--------+-----------+ | mymod | ping | all | all | OK | Module.V3 | +-------+------+-----------+---------+--------+-----------+ $ trove module-instance-count mymod --include_clustered +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | False | 1 | | mymod | 2017-06-02T18:00:18 | 2017-06-02T18:00:18 | ba7c204979c8de54be6efb70a17d40b9 | False | 2 | | mymod | 2017-06-02T18:18:37 | 2017-06-02T18:18:37 | 869744bdd18e306a96c145df562065ab | True | 1 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ For variety in this example, create one more instance and module: .. code-block:: console $ trove create myinst_2 15 --size 1 --module mymod +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | created | 2017-06-02T18:21:56 | | datastore | redis | | datastore_version | 3.2.6 | | encrypted_rpc_messaging | True | | flavor | 15 | | id | cdd85d94-13a0-4d90-89eb-9c05523d2ac6 | | name | myinst_2 | | region | RegionOne | | server_id | None | | status | BUILD | | tenant_id | eac1e46e5f7840e39012aff46a92073a | | updated | 2017-06-02T18:21:56 | | volume | 1 | | volume_id | None | +-------------------------+--------------------------------------+ $ echo "message=Module.V4" > ping4.dat $ trove module-update mymod --file ping4.dat +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | apply_order | 5 | | auto_apply | False | | created | 2017-06-02T17:06:21 | | datastore | all | | datastore_id | None | | datastore_version | all | | datastore_version_id | None | | description | None | | id | 0065a8ed-0668-4db5-a4ad-d88d0a166388 | | is_admin | True | | live_update | True | | md5 | 6e2c81c1547d640b4c6e7752ed0e33ab | | name | mymod | | priority_apply | False | | tenant | eac1e46e5f7840e39012aff46a92073a | | tenant_id | eac1e46e5f7840e39012aff46a92073a | | type | ping | | updated | 2017-06-02T18:26:22 | | visible | True | +----------------------+--------------------------------------+ Now we have 2 single instances, and 3 cluster instances on various versions of the module, none current. .. code-block:: console $ trove list +--------------------------------------+----------+-----------+-------------------+--------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | +--------------------------------------+----------+-----------+-------------------+--------+-----------+------+-----------+ | 6221b30c-8292-4378-b624-c7e9b0f8ba9e | myinst | mysql | 5.6 | ACTIVE | 15 | 1 | RegionOne | | cdd85d94-13a0-4d90-89eb-9c05523d2ac6 | myinst_2 | redis | 3.2.6 | ACTIVE | 15 | 1 | RegionOne | +--------------------------------------+----------+-----------+-------------------+--------+-----------+------+-----------+ $ trove module-instance-count mymod --include_clustered +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | False | 1 | | mymod | 2017-06-02T18:00:18 | 2017-06-02T18:00:18 | ba7c204979c8de54be6efb70a17d40b9 | False | 2 | | mymod | 2017-06-02T18:18:37 | 2017-06-02T18:21:57 | 869744bdd18e306a96c145df562065ab | False | 2 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ When the latest module was created, the ``--include_clustered`` was not used. Use the :command:`trove module-reapply` command: .. code-block:: console $ trove module-reapply mymod --md5=869744bdd18e306a96c145df562065ab --include_clustered $ trove module-instance-count mymod --include_clustered +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | False | 1 | | mymod | 2017-06-02T18:00:18 | 2017-06-02T18:00:18 | ba7c204979c8de54be6efb70a17d40b9 | False | 2 | | mymod | 2017-06-02T18:38:48 | 2017-06-02T18:38:48 | 6e2c81c1547d640b4c6e7752ed0e33ab | True | 2 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ Now they are both updated. If the ``--force`` flag is used, it can reapply to already applied instances. Notice that the only thing that changes is the minimum and maximum updated date fields. .. code-block:: console $ trove module-reapply mymod --md5=6e2c81c1547d640b4c6e7752ed0e33ab --include_clustered --force $ trove module-instance-count mymod --include_clustered +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T17:22:25 | 2017-06-02T17:22:25 | 7f700cc7b99606615f8b51946f6d3228 | False | 1 | | mymod | 2017-06-02T18:00:18 | 2017-06-02T18:00:18 | ba7c204979c8de54be6efb70a17d40b9 | False | 2 | | mymod | 2017-06-02T18:40:45 | 2017-06-02T18:40:46 | 6e2c81c1547d640b4c6e7752ed0e33ab | True | 2 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ To bring every instance to the current version, use some of the optional arguments to control how many instances are updated at the same time. This is useful to avoid potential network issues, if the module payload is large. Since we are not using the ``--force`` flag, the minimum updated date will not change. .. code-block:: console $ trove module-reapply mymod --include_clustered --batch_size=1 --delay=3 $ trove module-instance-count mymod --include_clustered +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | Module Name | Min Updated Date | Max Updated Date | Module MD5 | Current | Count | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ | mymod | 2017-06-02T18:40:45 | 2017-06-02T18:44:10 | 6e2c81c1547d640b4c6e7752ed0e33ab | True | 5 | +-------------+---------------------+---------------------+----------------------------------+---------+-------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/admin/datastore.rst0000644000175000017500000004057200000000000022034 0ustar00coreycorey00000000000000.. _database: ========= Datastore ========= The Database service provides database management features. Introduction ~~~~~~~~~~~~ The Database service provides scalable and reliable cloud provisioning functionality for both relational and non-relational database engines. Users can quickly and easily use database features without the burden of handling complex administrative tasks. Cloud users and database administrators can provision and manage multiple database instances as needed. The Database service provides resource isolation at high performance levels, and automates complex administrative tasks such as deployment, configuration, patching, backups, restores, and monitoring. You can modify various cluster characteristics by editing the ``/etc/trove/trove.conf`` file. A comprehensive list of the Database service configuration options is described in the `Database service `_ chapter in the *Configuration Reference*. Create a data store ~~~~~~~~~~~~~~~~~~~ An administrative user can create data stores for a variety of databases. This section assumes you do not yet have a MySQL data store, and shows you how to create a MySQL data store and populate it with a MySQL 5.5 data store version. **To create a data store** #. **Create a trove image** Create an image for the type of database you want to use, for example, MySQL, MongoDB, Cassandra. This image must have the trove guest agent installed, and it must have the ``trove-guestagent.conf`` file configured to connect to your OpenStack environment. To configure ``trove-guestagent.conf``, add the following lines to ``trove-guestagent.conf`` on the guest instance you are using to build your image: .. code-block:: ini rabbit_host = controller rabbit_password = RABBIT_PASS trove_auth_url = http://controller:35357/v2.0 This example assumes you have created a MySQL 5.5 image called ``mysql-5.5.qcow2``. .. important:: If you have a guest image that was created with an OpenStack version before Kilo, modify the guest agent init script for the guest image to read the configuration files from the directory ``/etc/trove/conf.d``. For a backwards compatibility with pre-Kilo guest instances, set the database service configuration options ``injected_config_location`` to ``/etc/trove`` and ``guest_info`` to ``/etc/guest_info``. #. **Register image with Image service** You need to register your guest image with the Image service. In this example, you use the :command:`openstack image create` command to register a ``mysql-5.5.qcow2`` image. .. code-block:: console $ openstack image create mysql-5.5 --disk-format qcow2 --container-format bare --public < mysql-5.5.qcow2 +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | 133eae9fb1c98f45894a4e60d8736619 | | container_format | bare | | created_at | 2016-12-21T12:10:02Z | | disk_format | qcow2 | | file | /v2/images/d1afb4f0-2360-4400-8d97-846b1ab6af52/file | | id | d1afb4f0-2360-4400-8d97-846b1ab6af52 | | min_disk | 0 | | min_ram | 0 | | name | mysql-5.5 | | owner | 5669caad86a04256994cdf755df4d3c1 | | protected | False | | schema | /v2/schemas/image | | size | 13200896 | | status | active | | tags | | | updated_at | 2016-12-21T12:10:03Z | | virtual_size | None | | visibility | public | +------------------+------------------------------------------------------+ #. **Create the data store** Create the data store that will house the new image. To do this, use the :command:`trove-manage` :command:`datastore_update` command. This example uses the following arguments: .. list-table:: :header-rows: 1 :widths: 20 20 20 * - Argument - Description - In this example: * - config file - The configuration file to use. - ``--config-file=/etc/trove/trove.conf`` * - name - Name you want to use for this data store. - ``mysql`` * - default version - You can attach multiple versions/images to a data store. For example, you might have a MySQL 5.5 version and a MySQL 5.6 version. You can designate one version as the default, which the system uses if a user does not explicitly request a specific version. - ``""`` At this point, you do not yet have a default version, so pass in an empty string. | Example: .. code-block:: console $ trove-manage --config-file=/etc/trove/trove.conf datastore_update mysql "" #. **Add a version to the new data store** Now that you have a MySQL data store, you can add a version to it, using the :command:`trove-manage` :command:`datastore_version_update` command. The version indicates which guest image to use. This example uses the following arguments: .. list-table:: :header-rows: 1 :widths: 20 20 20 * - Argument - Description - In this example: * - config file - The configuration file to use. - ``--config-file=/etc/trove/trove.conf`` * - data store - The name of the data store you just created via ``trove-manage`` :command:`datastore_update`. - ``mysql`` * - version name - The name of the version you are adding to the data store. - ``mysql-5.5`` * - data store manager - Which data store manager to use for this version. Typically, the data store manager is identified by one of the following strings, depending on the database: * cassandra * couchbase * couchdb * db2 * mariadb * mongodb * mysql * percona * postgresql * pxc * redis * vertica - ``mysql`` * - glance ID - The ID of the guest image you just added to the Image service. You can get this ID by using the glance :command:`image-show` IMAGE_NAME command. - bb75f870-0c33-4907-8467-1367f8cb15b6 * - packages - If you want to put additional packages on each guest that you create with this data store version, you can list the package names here. - ``""`` In this example, the guest image already contains all the required packages, so leave this argument empty. * - active - Set this to either 1 or 0: * ``1`` = active * ``0`` = disabled - 1 | Example: .. code-block:: console $ trove-manage --config-file=/etc/trove/trove.conf datastore_version_update mysql mysql-5.5 mysql GLANCE_ID "" 1 **Optional.** Set your new version as the default version. To do this, use the :command:`trove-manage` :command:`datastore_update` command again, this time specifying the version you just created. .. code-block:: console $ trove-manage --config-file=/etc/trove/trove.conf datastore_update mysql mysql-5.5 #. **Load validation rules for configuration groups** .. note:: **Applies only to MySQL and Percona data stores** * If you just created a MySQL or Percona data store, then you need to load the appropriate validation rules, as described in this step. * If you just created a different data store, skip this step. **Background.** You can manage database configuration tasks by using configuration groups. Configuration groups let you set configuration parameters, in bulk, on one or more databases. When you set up a configuration group using the trove :command:`configuration-create` command, this command compares the configuration values you are setting against a list of valid configuration values that are stored in the ``validation-rules.json`` file. .. list-table:: :header-rows: 1 :widths: 20 20 20 * - Operating System - Location of :file:`validation-rules.json` - Notes * - Ubuntu 14.04 - :file:`/usr/lib/python2.7/dist-packages/trove/templates/DATASTORE_NAME` - DATASTORE_NAME is the name of either the MySQL data store or the Percona data store. This is typically either ``mysql`` or ``percona``. * - RHEL 7, CentOS 7, Fedora 20, and Fedora 21 - :file:`/usr/lib/python2.7/site-packages/trove/templates/DATASTORE_NAME` - DATASTORE_NAME is the name of either the MySQL data store or the Percona data store. This is typically either ``mysql`` or ``percona``. | Therefore, as part of creating a data store, you need to load the ``validation-rules.json`` file, using the :command:`trove-manage` :command:`db_load_datastore_config_parameters` command. This command takes the following arguments: * Data store name * Data store version * Full path to the ``validation-rules.json`` file | This example loads the ``validation-rules.json`` file for a MySQL database on Ubuntu 14.04: .. code-block:: console $ trove-manage db_load_datastore_config_parameters mysql mysql-5.5 /usr/lib/python2.7/dist-packages/trove/templates/mysql/validation-rules.json #. **Validate data store** To validate your new data store and version, start by listing the data stores on your system: .. code-block:: console $ trove datastore-list +--------------------------------------+--------------+ | id | name | +--------------------------------------+--------------+ | 10000000-0000-0000-0000-000000000001 | Legacy MySQL | | e5dc1da3-f080-4589-a4c2-eff7928f969a | mysql | +--------------------------------------+--------------+ Take the ID of the MySQL data store and pass it in with the :command:`datastore-version-list` command: .. code-block:: console $ trove datastore-version-list DATASTORE_ID +--------------------------------------+-----------+ | id | name | +--------------------------------------+-----------+ | 36a6306b-efd8-4d83-9b75-8b30dd756381 | mysql-5.5 | +--------------------------------------+-----------+ Data store classifications -------------------------- The Database service supports a variety of both relational and non-relational database engines, but to a varying degree of support for each *data store*. The Database service project has defined several classifications that indicate the quality of support for each data store. Data stores also implement different extensions. An extension is called a *strategy* and is classified similar to data stores. Valid classifications for a data store and a strategy are: * Experimental * Technical preview * Stable Each classification builds on the previous one. This means that a data store that meets the ``technical preview`` requirements must also meet all the requirements for ``experimental``, and a data store that meets the ``stable`` requirements must also meet all the requirements for ``technical preview``. **Requirements** * Experimental A data store is considered to be ``experimental`` if it meets these criteria: * It implements a basic subset of the Database service API including ``create`` and ``delete``. * It has guest agent elements that allow guest agent creation. * It has a definition of supported operating systems. * It meets the other `Documented Technical Requirements `_. A strategy is considered ``experimental`` if: * It meets the `Documented Technical Requirements `_. * Technical preview A data store is considered to be a ``technical preview`` if it meets the requirements of ``experimental`` and further: * It implements APIs required to plant and start the capabilities of the data store as defined in the `Datastore Compatibility Matrix `_. .. note:: It is not required that the data store implements all features like resize, backup, replication, or clustering to meet this classification. * It provides a mechanism for building a guest image that allows you to exercise its capabilities. * It meets the other `Documented Technical Requirements `_. .. important:: A strategy is not normally considered to be ``technical preview``. * Stable A data store or a strategy is considered ``stable`` if: * It meets the requirements of ``technical preview``. * It meets the other `Documented Technical Requirements `_. **Initial Classifications** The following table shows the current classification assignments for the different data stores. .. list-table:: :header-rows: 1 :widths: 30 30 * - Classification - Data store * - Stable - MySQL * - Technical Preview - Cassandra, MongoDB * - Experimental - All others Redis data store replication ---------------------------- Replication strategies are available for Redis with several commands located in the Redis data store manager: - :command:`create` - :command:`detach-replica` - :command:`eject-replica-source` - :command:`promote-to-replica-source` Additional arguments for the :command:`create` command include :command:`--replica_of` and :command:`--replica_count`. Redis integration and unit tests -------------------------------- Unit tests and integration tests are also available for Redis. #. Install trovestack: .. code-block:: console $ ./trovestack install .. note:: Trovestack is a development script used for integration testing and Database service development installations. Do not use Trovestack in a production environment. For more information, see `the Database service developer docs `_ #. Start Redis: .. code-block:: console $ ./trovestack kick-start redis #. Run integration tests: .. code-block:: console $ ./trovestack int-tests --group=replication You can run :command:`--group=redis_supported` instead of :command:`--group=replication` if needed. Configure a cluster ~~~~~~~~~~~~~~~~~~~ An administrative user can configure various characteristics of a MongoDB cluster. **Query routers and config servers** **Background.** Each cluster includes at least one query router and one config server. Query routers and config servers count against your quota. When you delete a cluster, the system deletes the associated query router(s) and config server(s). **Configuration.** By default, the system creates one query router and one config server per cluster. You can change this by editing the ``/etc/trove/trove.conf`` file. These settings are in the ``mongodb`` section of the file: .. list-table:: :header-rows: 1 :widths: 30 30 * - Setting - Valid values are: * - num_config_servers_per_cluster - 1 or 3 * - num_query_routers_per_cluster - 1 or 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/admin/index.rst0000644000175000017500000000033100000000000021142 0ustar00coreycorey00000000000000======================= Administrator's Guide ======================= .. toctree:: :maxdepth: 2 run_trove_in_production datastore building_guest_images secure_oslo_messaging database_module_usage ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/admin/run_trove_in_production.rst0000644000175000017500000003267500000000000025032 0ustar00coreycorey00000000000000.. Copyright (c) 2020 Catalyst Cloud Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================== Running Trove in production =========================== This document is not a definitive guide for deploying Trove in every production environment. There are many ways to deploy Trove depending on the specifics and limitations of your situation. We hope this document provides the cloud operator or distribution creator with a basic understanding of how the Trove components fit together practically. Through this, it should become more obvious how components of Trove can be divided or duplicated across physical hardware in a production cloud environment to aid in achieving scalability and resiliency for the database as a service software. In the interest of keeping this guide somewhat high-level and avoiding obsolescence or operator/distribution-specific environment assumptions by specifying exact commands that should be run to accomplish the tasks below, we will instead just describe what needs to be done and leave it to the cloud operator or distribution creator to "do the right thing" to accomplish the task for their environment. If you need guidance on specific commands to run to accomplish the tasks described below, we recommend reading through the ``plugin.sh`` script in devstack subdirectory of this project. The devstack plugin exercises all the essential components of Trove in the right order, and this guide will mostly be an elaboration of this process. Environment Assumptions ----------------------- The scope of this guide is to provide a basic overview of setting up all the components of Trove in a production environment, assuming that the default in-tree drivers and components are going to be used. For the purposes of this guide, we will therefore assume the following core components have already been set up for your production OpenStack environment: * RabbitMQ * MySQL * Keystone * Nova * Cinder * Neutron * Glance * Swift Production Deployment Walkthrough --------------------------------- Create Trove Service User ~~~~~~~~~~~~~~~~~~~~~~~~~ By default Trove will use the 'trove' user with 'admin' role in 'service' tenant for both keystone authentication and interactions with all other services. Service Tenant Deployment ~~~~~~~~~~~~~~~~~~~~~~~~~ In production, almost all the cloud resources(except the Swift objects for backup data) created for a Trove instance should be only visible to the Trove service user. As DBaaS users, they should only see a Trove instance after creating, and know nothing about the Nova VM, Cinder volume, Neutron management network and security groups under the hood. The only way to operate Trove instance is to interact with `Trove API `_. Service tenant deployment is the default configuration in Trove since Ussuri release. Install Trove Controller Software ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Trove controller services should be put somewhere that has access to the database, the oslo messaging system, and other OpenStack services. Trove uses the standard python setuptools, so installation of the software itself should be straightforward. Running multiple instances of the individual Trove controller components on separate physical hosts is recommended in order to provide scalability and availability of the controller software. Management Network ~~~~~~~~~~~~~~~~~~ Trove makes use of a "Management Network" exclusively that the controller uses to talk to guest agent running inside Trove instance and vice versa. All the instances that Trove deploys will have interfaces on this network. Therefore, it's important that the subnet deployed on this network be sufficiently large to allow for the maximum number of instances and controllers likely to be deployed throughout the lifespan of the cloud installation. Usually, after a Trove instance is created, there are 2 nics attached to the instance VM, one for the database traffic on user-defined network, one for management purpose. Trove will check if the user's subnet conflicts with the management network. You can also create a management Neutron security group that will be applied to the management port. Basically, nothing needs to be allowed to access the management port, most of the network communication within the Trove instance is egress traffic(e.g. the guest agent initiates connection with RabbitMQ). However, It can be helpful to allow SSH access to the Trove instance from the controller for troubleshooting purposes (ie. TCP port 22), though this is not strictly necessary in production environments. In order to SSH into the Trove instance(as mentioned above, it's helpful but not necessary), the cloud administrators need to create and config a Nova keypair. Finally, you need to add routing or interfaces to this network so that the Trove guest agent running inside the instance is able to connect with RabbitMQ. RabbitMQ Considerations ~~~~~~~~~~~~~~~~~~~~~~~ Both trove-taskmanager and trove-conductor talk to guest agent inside Trove instance via the messaging system, ie. RabbitMQ. Once the guest agent is up and running, it's listening on a message queue named ``guestagent.`` specifically set up for that particular instance, receiving requests from trove-taskmanager for operations like set up the database software, create databases and users, restart database service etc. At the mean while, trove-guestagent periodically sends status update information to trove-conductor through the messaging system. With all that said, a proper RabbitMQ user name and password need to be configured in the trove-guestagent config file, which may bring security concern for the cloud deployers. If the guest instance is compromised, then guest credentials are compromised, which means the messaging system is compromised. As part of the solution, Trove introduced a `security enhancement `_ in Ocata release, using encryption keys to protect the messages between the control plane and the guest instances, which guarantees that one compromised guest instance doesn't affect other instances nor other cloud users. Configuring Trove ~~~~~~~~~~~~~~~~~ The default Trove configuration file location is ``/etc/trove/trove.conf``. The typical config options (not a full list) are: DEFAULT group enable_secure_rpc_messaging Should RPC messaging traffic be secured by encryption. taskmanager_rpc_encr_key The key (OpenSSL aes_cbc) used to encrypt RPC messages sent to trove-taskmanager, used by trove-api. instance_rpc_encr_key The key (OpenSSL aes_cbc) used to encrypt RPC messages sent to guest instance from trove-taskmanager and the messages sent from guest instance to trove-conductor. This key is generated by trove-taskmanager automatically and is injected into the guest instance when creating. inst_rpc_key_encr_key The database encryption key to encrypt per-instance PRC encryption key before storing to Trove database. management_networks The management network, currently only one management network is allowed. management_security_groups List of the management security groups that are applied to the management port of the database instance. cinder_volume_type Cinder volume type used to create volume that is attached to Trove instance. nova_keypair Name of a Nova keypair to inject into a database instance to enable SSH access. default_datastore The default datastore id or name to use if one is not provided by the user. If the default value is None, the field becomes required in the instance create request. max_accepted_volume_size The default maximum volume size (in GB) for an instance. max_instances_per_tenant Default maximum number of instances per tenant. max_backups_per_tenant Default maximum number of backups per tenant. transport_url The messaging server connection URL, e.g. ``rabbit://stackrabbit:password@10.0.119.251:5672/`` control_exchange The Trove exchange name for the messaging service, could be overridden by an exchange name specified in the transport_url option. reboot_time_out Maximum time (in seconds) to wait for a server reboot. usage_timeout Maximum time (in seconds) to wait for Trove instance to become ACTIVE for creation. restore_usage_timeout Maximum time (in seconds) to wait for Trove instance to become ACTIVE for restore. agent_call_high_timeout Maximum time (in seconds) to wait for Guest Agent 'slow' requests (such as restarting the instance server) to complete. keystone_authtoken group Like most of other OpenStack services, Trove uses `Keystone Authentication Middleware `_ for authentication and authorization. service_credentials group Options in this section are pretty much like the options in ``keystone_authtoken``, but you can config another service user for Trove to communicate with other OpenStack services like Nova, Neutron, Cinder, etc. * auth_url * region_name * project_name * username * password * project_domain_name * user_domain_name database group connection The SQLAlchemy connection string to use to connect to the database, e.g. ``mysql+pymysql://root:password@127.0.0.1/trove?charset=utf8`` The cloud administrator also needs to provide a policy file ``/etc/trove/policy.json`` if the default API access policies don't satisfy the requirement. To generate a sample policy file with all the default policies, run ``tox -egenpolicy`` in the repo folder and the new file will be located in ``etc/trove/policy.yaml.sample``. Initialize Trove Database ~~~~~~~~~~~~~~~~~~~~~~~~~ This is controlled through `sqlalchemy-migrate `_ scripts under the trove/db/sqlalchemy/migrate_repo/versions directory in this repository. The script ``trove-manage`` (which should be installed together with Trove controller software) could be used to aid in the initialization of the Trove database. Note that this tool looks at the ``/etc/trove/trove.conf`` file for its database credentials, so initializing the database must happen after Trove is configured. Launching the Trove Controller ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We recommend using upstart / systemd scripts to ensure the components of the Trove controller are all started and kept running. Preparing the Guest Images ~~~~~~~~~~~~~~~~~~~~~~~~~~ Now that the Trove system is installed, the next step is to build the images that we will use for the DBaaS to function properly. This is possibly the most important step as this will be the gold standard that Trove will use for a particular data store. .. note:: For the sake of simplicity and especially for testing, we can use the prebuilt images that are available from OpenStack itself. These images should strictly be used for testing and development use and should not be used in a production environment. The images are available for download and are located at http://tarballs.openstack.org/trove/images/. For use with production systems, it is recommended to create and maintain your own images in order to conform to standards set by the company's security team. In Trove community, we use `Disk Image Builder(DIB) `_ to create Trove images, all the elements are located in ``integration/scripts/files/elements`` folder in the repo. Trove provides a script named ``trovestack`` to help build the image, refer to `Build images using trovestack `_ for more information. Make sure to use ``dev_mode=false`` for production environment. After image is created successfully, the cloud administrator needs to upload the image to Glance and make it only accessible to service users. Preparing the Datastore ~~~~~~~~~~~~~~~~~~~~~~~ After image is uploaded, the cloud administrator should create datastores, datastore versions and the configuration parameters for the particular version. It's recommended to config a default version for each datastore. Quota Management ~~~~~~~~~~~~~~~~ The amount of resources that could be created by each OpenStack project is controlled by quota. The default resource quota for each project is set in Trove config file as follows unless changed by the cloud administrator via `Quota API `_. .. code-block:: ini [DEFAULT] max_instances_per_tenant = 10 max_backups_per_tenant = 50 Trove Deployment Verfication ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If all of the above instructions have been followed, it should now be possible to deploy Trove instances using the OpenStack CLI, communicating with the Trove V1 API. Refer to `Create and access a database `_ for detailed steps. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/admin/secure_oslo_messaging.rst0000644000175000017500000013442600000000000024427 0ustar00coreycorey00000000000000.. _secure_rpc_messaging: ====================== Secure RPC messaging ====================== Background ---------- Trove uses oslo_messaging.rpc for communication amongst the various control plane components and the guest agents. For secure operation of the system, these RPC calls can be fully encrypted. A control plane encryption key is used for communications between the API service and the taskmanager, and system generated per-instance keys are used for communication between the control plane and guest instances. This document provides some useful tips on how to use this mechanism. The default system behavior --------------------------- By default, the system will attempt to encrypt all RPC communication. This behavior is controlled by the following configuration parameters: - enable_secure_rpc_messaging boolean that determines whether rpc messages will be secured by encryption. The default value is True. - taskmanager_rpc_encr_key the key used for encrypting messages sent to the taskmanager. A default value is provided for this and it is important that deployers change this. - inst_rpc_key_encr_key the key used for encrypting the per-instance keys when they are stored in the trove infrastructure database (catalog). A default is provided for this and it is important that deployers change this. Interoperability and Upgrade ---------------------------- Consider the system as shown below which runs a version of code prior to the introduciton of this oslo_messaging.rpc security. Observe, for example that the instances table in the system catalog does not include the per-instance encrypted key column:: mysql> describe instances; +----------------------+--------------+------+-----+---------+-------+ | Field | Type | Null | Key | Default | Extra | +----------------------+--------------+------+-----+---------+-------+ | id | varchar(36) | NO | PRI | NULL | | | created | datetime | YES | | NULL | | | updated | datetime | YES | | NULL | | | name | varchar(255) | YES | | NULL | | | hostname | varchar(255) | YES | | NULL | | | compute_instance_id | varchar(36) | YES | | NULL | | | task_id | int(11) | YES | | NULL | | | task_description | varchar(255) | YES | | NULL | | | task_start_time | datetime | YES | | NULL | | | volume_id | varchar(36) | YES | | NULL | | | flavor_id | varchar(255) | YES | | NULL | | | volume_size | int(11) | YES | | NULL | | | tenant_id | varchar(36) | YES | MUL | NULL | | | server_status | varchar(64) | YES | | NULL | | | deleted | tinyint(1) | YES | MUL | NULL | | | deleted_at | datetime | YES | | NULL | | | datastore_version_id | varchar(36) | NO | MUL | NULL | | | configuration_id | varchar(36) | YES | MUL | NULL | | | slave_of_id | varchar(36) | YES | MUL | NULL | | | cluster_id | varchar(36) | YES | MUL | NULL | | | shard_id | varchar(36) | YES | | NULL | | | type | varchar(64) | YES | | NULL | | | region_id | varchar(255) | YES | | NULL | | +----------------------+--------------+------+-----+---------+-------+ 23 rows in set (0.00 sec) We launch an instance of MySQL using this version of the software:: amrith@amrith-work:/opt/stack/trove/integration/scripts$ openstack network list +--------------------------------------+-------------+--------------------------------------+ | ID | Name | Subnets | +--------------------------------------+-------------+--------------------------------------+ [...] | 4bab02e7-87bb-4cc0-8c07-2f282c777c85 | public | e620c4f5-749c-4212-b1d1-4a6e2c0a3f16 | [...] +--------------------------------------+-------------+--------------------------------------+ amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove create m2 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2017-01-09T18:17:13 | | datastore | mysql | | datastore_version | 5.6 | | flavor | 25 | | id | bb0c9213-31f8-4427-8898-c644254b3642 | | name | m2 | | region | RegionOne | | server_id | None | | status | BUILD | | updated | 2017-01-09T18:17:13 | | volume | 3 | | volume_id | None | +-------------------+--------------------------------------+ amrith@amrith-work:/opt/stack/trove/integration/scripts$ nova list +--------------------------------------+------+--------+------------+-------------+-------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+------+--------+------------+-------------+-------------------+ | a4769ce2-4e22-4134-b958-6db6c23cb221 | m2 | BUILD | spawning | NOSTATE | public=172.24.4.4 | +--------------------------------------+------+--------+------------+-------------+-------------------+ And on that machine, the configuration file looks like this:: amrith@m2:~$ cat /etc/trove/conf.d/guest_info.conf [DEFAULT] guest_id=bb0c9213-31f8-4427-8898-c644254b3642 datastore_manager=mysql tenant_id=56cca8484d3e48869126ada4f355c284 The instance goes online:: amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove show m2 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2017-01-09T18:17:13 | | datastore | mysql | | datastore_version | 5.6 | | flavor | 25 | | id | bb0c9213-31f8-4427-8898-c644254b3642 | | name | m2 | | region | RegionOne | | server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 | | status | ACTIVE | | updated | 2017-01-09T18:17:17 | | volume | 3 | | volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c | | volume_used | 0.11 | +-------------------+--------------------------------------+ For testing later, we launch a few more instances:: amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove create m3 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85 amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove create m4 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85 amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove list +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ In this condition, we take down the control plane and upgrade the software running on it. This will result in a catalog upgrade. Since this system is based on devstack, here's what that looks like:: amrith@amrith-work:/opt/stack/trove$ git branch * master review/amrith/bp/secure-oslo-messaging-messages amrith@amrith-work:/opt/stack/trove$ git checkout review/amrith/bp/secure-oslo-messaging-messages Switched to branch 'review/amrith/bp/secure-oslo-messaging-messages' Your branch is ahead of 'gerrit/master' by 1 commit. (use "git push" to publish your local commits) amrith@amrith-work:/opt/stack/trove$ find . -name '*.pyc' -delete amrith@amrith-work:/opt/stack/trove$ amrith@amrith-work:/opt/stack/trove$ trove-manage db_sync [...] 2017-01-09 13:24:25.251 DEBUG migrate.versioning.repository [-] Config: OrderedDict([('db_settings', OrderedDict([('__name__', 'db_settings'), ('repository_id', 'Trove Migrations'), ('version_table', 'migrate_version'), ('required_dbs', "['mysql','postgres','sqlite']")]))]) from (pid=96180) __init__ /usr/local/lib/python2.7/dist-packages/migrate/versioning/repository.py:83 2017-01-09 13:24:25.260 INFO migrate.versioning.api [-] 40 -> 41... 2017-01-09 13:24:25.328 INFO migrate.versioning.api [-] done 2017-01-09 13:24:25.329 DEBUG migrate.versioning.util [-] Disposing SQLAlchemy engine Engine(mysql+pymysql://root:***@127.0.0.1/trove?charset=utf8) from (pid=96180) with_engine /usr/local/lib/python2.7/dist-packages/migrate/versioning/util/__init__.py:163 [...] We observe that the new table in the system has the encrypted_key column:: mysql> describe instances; +----------------------+--------------+------+-----+---------+-------+ | Field | Type | Null | Key | Default | Extra | +----------------------+--------------+------+-----+---------+-------+ | id | varchar(36) | NO | PRI | NULL | | | created | datetime | YES | | NULL | | | updated | datetime | YES | | NULL | | | name | varchar(255) | YES | | NULL | | | hostname | varchar(255) | YES | | NULL | | | compute_instance_id | varchar(36) | YES | | NULL | | | task_id | int(11) | YES | | NULL | | | task_description | varchar(255) | YES | | NULL | | | task_start_time | datetime | YES | | NULL | | | volume_id | varchar(36) | YES | | NULL | | | flavor_id | varchar(255) | YES | | NULL | | | volume_size | int(11) | YES | | NULL | | | tenant_id | varchar(36) | YES | MUL | NULL | | | server_status | varchar(64) | YES | | NULL | | | deleted | tinyint(1) | YES | MUL | NULL | | | deleted_at | datetime | YES | | NULL | | | datastore_version_id | varchar(36) | NO | MUL | NULL | | | configuration_id | varchar(36) | YES | MUL | NULL | | | slave_of_id | varchar(36) | YES | MUL | NULL | | | cluster_id | varchar(36) | YES | MUL | NULL | | | shard_id | varchar(36) | YES | | NULL | | | type | varchar(64) | YES | | NULL | | | region_id | varchar(255) | YES | | NULL | | | encrypted_key | varchar(255) | YES | | NULL | | +----------------------+--------------+------+-----+---------+-------+ mysql> select id, encrypted_key from instances; +--------------------------------------+---------------+ | id | encrypted_key | +--------------------------------------+---------------+ | 13a787f2-b699-4867-a727-b3f4d8040a12 | NULL | +--------------------------------------+---------------+ 1 row in set (0.00 sec) amrith@amrith-work:/opt/stack/trove$ sudo python setup.py install -f [...] We can now relaunch the control plane software but before we do that, we inspect the configuration parameters and disable secure RPC messaging by adding this line into the configuration files:: amrith@amrith-work:/etc/trove$ grep enable_secure_rpc_messaging *.conf trove.conf:enable_secure_rpc_messaging = False The first thing we observe is that heartbeat messages from the existing instance are still properly handled by the conductor and the instance remains active:: 2017-01-09 13:26:57.742 DEBUG oslo_messaging._drivers.amqpdriver [-] received message with unique_id: eafe22c08bae485e9346ce0fbdaa4d6c from (pid=96551) __call__ /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:196 2017-01-09 13:26:57.744 DEBUG trove.conductor.manager [-] Instance ID: bb0c9213-31f8-4427-8898-c644254b3642, Payload: {u'service_status': u'running'} from (pid=96551) heartbeat /opt/stack/trove/trove/conductor/manager.py:88 2017-01-09 13:26:57.748 DEBUG trove.conductor.manager [-] Instance bb0c9213-31f8-4427-8898-c644254b3642 sent heartbeat at 1483986416.52 from (pid=96551) _message_too_old /opt/stack/trove/trove/conductor/manager.py:54 2017-01-09 13:26:57.750 DEBUG trove.conductor.manager [-] [Instance bb0c9213-31f8-4427-8898-c644254b3642] Rec'd message is younger than last seen. Updating. from (pid=96551) _message_too_old /opt/stack/trove/trove/conductor/manager.py:76 2017-01-09 13:27:01.197 DEBUG oslo_messaging._drivers.amqpdriver [-] received message with unique_id: df62b76523004338876bc7b08f8b7711 from (pid=96552) __call__ /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:196 2017-01-09 13:27:01.200 DEBUG trove.conductor.manager [-] Instance ID: 9ceebd62-e13d-43c5-953a-c0f24f08757e, Payload: {u'service_status': u'running'} from (pid=96552) heartbeat /opt/stack/trove/trove/conductor/manager.py:88 2017-01-09 13:27:01.219 DEBUG oslo_db.sqlalchemy.engines [-] Parent process 96542 forked (96552) with an open database connection, which is being discarded and recreated. from (pid=96552) checkout /usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/engines.py:362 2017-01-09 13:27:01.225 DEBUG trove.conductor.manager [-] Instance 9ceebd62-e13d-43c5-953a-c0f24f08757e sent heartbeat at 1483986419.99 from (pid=96552) _message_too_old /opt/stack/trove/trove/conductor/manager.py:54 2017-01-09 13:27:01.231 DEBUG trove.conductor.manager [-] [Instance 9ceebd62-e13d-43c5-953a-c0f24f08757e] Rec'd message is younger than last seen. Updating. from (pid=96552) _message_too_old /opt/stack/trove/trove/conductor/manager.py:76 amrith@amrith-work:/etc/trove$ trove list +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ amrith@amrith-work:/etc/trove$ trove show m2 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2017-01-09T18:17:13 | | datastore | mysql | | datastore_version | 5.6 | | flavor | 25 | | id | bb0c9213-31f8-4427-8898-c644254b3642 | | name | m2 | | region | RegionOne | | server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 | | status | ACTIVE | | updated | 2017-01-09T18:17:17 | | volume | 3 | | volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c | | volume_used | 0.11 | +-------------------+--------------------------------------+ We now launch a new instance, recall that secure_rpc_messaging is disabled:: amrith@amrith-work:/etc/trove$ trove create m10 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2017-01-09T18:28:56 | | datastore | mysql | | datastore_version | 5.6 | | flavor | 25 | | id | 514ef051-0bf7-48a5-adcf-071d4a6625fb | | name | m10 | | region | RegionOne | | server_id | None | | status | BUILD | | updated | 2017-01-09T18:28:56 | | volume | 3 | | volume_id | None | +-------------------+--------------------------------------+ Observe that the task manager does not create a password for the instance:: 2017-01-09 13:29:00.111 INFO trove.instance.models [-] Resetting task status to NONE on instance 514ef051-0bf7-48a5-adcf-071d4a6625fb. 2017-01-09 13:29:00.115 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'514ef051-0bf7-48a5-adcf-071d4a6625fb', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'No tasks for the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 29, 0, 114971), '_sa_instance_state': , u'encrypted_key': None, u'deleted': 0, u'configuration_id': None, u'volume_id': u'cee2e17b-80fa-48e5-a488-da8b7809373a', u'slave_of_id': None, u'task_start_time': None, u'name': u'm10', u'task_id': 1, u'created': datetime.datetime(2017, 1, 9, 18, 28, 56), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'2452263e-3d33-48ec-8f24-2851fe74db28', u'flavor_id': u'25'} from (pid=96635) save /opt/stack/trove/trove/db/models.py:64 The configuration file for this instance is:: amrith@m10:~$ cat /etc/trove/conf.d/guest_info.conf [DEFAULT] guest_id=514ef051-0bf7-48a5-adcf-071d4a6625fb datastore_manager=mysql tenant_id=56cca8484d3e48869126ada4f355c284 We can now shutdown the control plane again and enable the secure RPC capability. Observe that we've just commented out the lines (below):: trove.conf:# enable_secure_rpc_messaging = False And create another database instance:: amrith@amrith-work:/etc/trove$ trove create m20 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2017-01-09T18:31:48 | | datastore | mysql | | datastore_version | 5.6 | | flavor | 25 | | id | 792fa220-2a40-4831-85af-cfb0ded8033c | | name | m20 | | region | RegionOne | | server_id | None | | status | BUILD | | updated | 2017-01-09T18:31:48 | | volume | 3 | | volume_id | None | +-------------------+--------------------------------------+ Observe that a unique per-instance encryption key was created for this instance:: 2017-01-09 13:31:52.474 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'792fa220-2a40-4831-85af-cfb0ded8033c', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'No tasks for the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 31, 52, 473552), '_sa_instance_state': , u'encrypted_key': u'fVpHrkUIjVsXe7Fj7Lm4u2xnJUsWX2rMC9GL0AppILJINBZxLvkowY8FOa+asKS+8pWb4iNyukQQ4AQoLEUHUQ==', u'deleted': 0, u'configuration_id': None, u'volume_id': u'4cd563dc-fe08-477b-828f-120facf4351b', u'slave_of_id': None, u'task_start_time': None, u'name': u'm20', u'task_id': 1, u'created': datetime.datetime(2017, 1, 9, 18, 31, 49), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'1e62a192-83d3-43fd-b32e-b5ee2fa4e24b', u'flavor_id': u'25'} from (pid=97562) save /opt/stack/trove/trove/db/models.py:64 And the configuration file on that instance includes an encryption key:: amrith@m20:~$ cat /etc/trove/conf.d/guest_info.conf [DEFAULT] guest_id=792fa220-2a40-4831-85af-cfb0ded8033c datastore_manager=mysql tenant_id=56cca8484d3e48869126ada4f355c284 instance_rpc_encr_key=eRz43LwE6eaxIbBlA2pNukzPjSdcQkVi amrith@amrith-work:/etc/trove$ trove list +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ At this point communication between API service and Task Manager, and between the control plane and instance m20 is encrypted but communication between control plane and all other instances is not encrypted. In this condition we can attempt some operations on the various instances. First with the legacy instances created on software that predated the secure RPC mechanism:: amrith@amrith-work:/etc/trove$ trove database-list m2 +------+ | Name | +------+ +------+ amrith@amrith-work:/etc/trove$ trove database-create m2 foo2 amrith@amrith-work:/etc/trove$ trove database-list m2 +------+ | Name | +------+ | foo2 | +------+ And at the same time with the instance m10 which is created with the current software but without RPC encryption:: amrith@amrith-work:/etc/trove$ trove database-list m10 +------+ | Name | +------+ +------+ amrith@amrith-work:/etc/trove$ trove database-create m10 foo10 amrith@amrith-work:/etc/trove$ trove database-list m10 +-------+ | Name | +-------+ | foo10 | +-------+ amrith@amrith-work:/etc/trove$ And finally with an instance that uses encrypted RPC communications:: amrith@amrith-work:/etc/trove$ trove database-list m20 +------+ | Name | +------+ +------+ amrith@amrith-work:/etc/trove$ trove database-create m20 foo20 amrith@amrith-work:/etc/trove$ trove database-list m20 +-------+ | Name | +-------+ | foo20 | +-------+ Finally, we can upgrade an instance that has no encryption to have rpc encryption:: amrith@amrith-work:/etc/trove$ trove datastore-list +--------------------------------------+------------------+ | ID | Name | +--------------------------------------+------------------+ | 8e052edb-5f14-4aec-9149-0a80a30cf5e4 | mysql | +--------------------------------------+------------------+ amrith@amrith-work:/etc/trove$ trove datastore-version-list mysql +--------------------------------------+------------------+ | ID | Name | +--------------------------------------+------------------+ | 4a881cb5-9e48-4cb2-a209-4283ed44eb01 | 5.6 | +--------------------------------------+------------------+ Let's look at instance m2:: mysql> select id, name, encrypted_key from instances where id = 'bb0c9213-31f8-4427-8898-c644254b3642'; +--------------------------------------+------+---------------+ | id | name | encrypted_key | +--------------------------------------+------+---------------+ | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | NULL | +--------------------------------------+------+---------------+ 1 row in set (0.00 sec) amrith@amrith-work:/etc/trove$ trove upgrade m2 4a881cb5-9e48-4cb2-a209-4283ed44eb01 amrith@amrith-work:/etc/trove$ trove list +--------------------------------------+------+-----------+-------------------+---------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | +--------------------------------------+------+-----------+-------------------+---------+-----------+------+-----------+ | 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | UPGRADE | 25 | 3 | RegionOne | +--------------------------------------+------+-----------+-------------------+---------+-----------+------+-----------+ amrith@amrith-work:/etc/trove$ nova list +--------------------------------------+------+---------+------------+-------------+--------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+------+---------+------------+-------------+--------------------+ [...] | a4769ce2-4e22-4134-b958-6db6c23cb221 | m2 | REBUILD | rebuilding | Running | public=172.24.4.4 | [...] +--------------------------------------+------+---------+------------+-------------+--------------------+ 2017-01-09 13:47:24.337 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'bb0c9213-31f8-4427-8898-c644254b3642', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'Upgrading the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 47, 24, 337400), '_sa_instance_state': , u'encrypted_key': u'gMrlHkEVxKgEFMTabzZr2TLJ6r5+wgfJfhohs7K/BzutWxs1wXfBswyV5Bgw4qeD212msmgSdOUCFov5otgzyg==', u'deleted': 0, u'configuration_id': None, u'volume_id': u'16e57e3f-b462-4db2-968b-3c284aa2751c', u'slave_of_id': None, u'task_start_time': None, u'name': u'm2', u'task_id': 89, u'created': datetime.datetime(2017, 1, 9, 18, 17, 13), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'a4769ce2-4e22-4134-b958-6db6c23cb221', u'flavor_id': u'25'} from (pid=97562) save /opt/stack/trove/trove/db/models.py:64 2017-01-09 13:47:24.347 DEBUG trove.taskmanager.models [-] Generated unique RPC encryption key for instance = bb0c9213-31f8-4427-8898-c644254b3642, key = gMrlHkEVxKgEFMTabzZr2TLJ6r5+wgfJfhohs7K/BzutWxs1wXfBswyV5Bgw4qeD212msmgSdOUCFov5otgzyg== from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1440 2017-01-09 13:47:24.350 DEBUG trove.taskmanager.models [-] Rebuilding instance m2(bb0c9213-31f8-4427-8898-c644254b3642) with image ea05cba7-2f70-4745-abea-136d7bcc16c7. from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1445 The instance now has an encryption key in its configuration:: amrith@m2:~$ cat /etc/trove/conf.d/guest_info.conf [DEFAULT] guest_id=bb0c9213-31f8-4427-8898-c644254b3642 datastore_manager=mysql tenant_id=56cca8484d3e48869126ada4f355c284 instance_rpc_encr_key=pN2hHEl171ngyD0mPvyV1xKJF2im01Gv amrith@amrith-work:/etc/trove$ trove list +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ [...] | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | [...] +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ amrith@amrith-work:/etc/trove$ trove show m2 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2017-01-09T18:17:13 | | datastore | mysql | | datastore_version | 5.6 | | flavor | 25 | | id | bb0c9213-31f8-4427-8898-c644254b3642 | | name | m2 | | region | RegionOne | | server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 | | status | ACTIVE | | updated | 2017-01-09T18:50:07 | | volume | 3 | | volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c | | volume_used | 0.13 | +-------------------+--------------------------------------+ amrith@amrith-work:/etc/trove$ trove database-list m2 +------+ | Name | +------+ | foo2 | +------+ We can similarly upgrade m4:: 2017-01-09 13:51:43.078 DEBUG trove.instance.models [-] Instance 6d55ab3a-267f-4b95-8ada-33fc98fd1767 service status is running. from (pid=97562) load_instance /opt/stack/trove/trove/instance/models.py:534 2017-01-09 13:51:43.083 DEBUG trove.taskmanager.models [-] Upgrading instance m4(6d55ab3a-267f-4b95-8ada-33fc98fd1767) to new datastore version 5.6(4a881cb5-9e48-4cb2-a209-4283ed44eb01) from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1410 2017-01-09 13:51:43.087 DEBUG trove.guestagent.api [-] Sending the call to prepare the guest for upgrade. from (pid=97562) pre_upgrade /opt/stack/trove/trove/guestagent/api.py:351 2017-01-09 13:51:43.087 DEBUG trove.guestagent.api [-] Calling pre_upgrade with timeout 600 from (pid=97562) _call /opt/stack/trove/trove/guestagent/api.py:86 2017-01-09 13:51:43.088 DEBUG oslo_messaging._drivers.amqpdriver [-] CALL msg_id: 41dbb7fff3dc4f8fa69d8b5f219809e0 exchange 'trove' topic 'guestagent.6d55ab3a-267f-4b95-8ada-33fc98fd1767' from (pid=97562) _send /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:442 2017-01-09 13:51:45.452 DEBUG oslo_messaging._drivers.amqpdriver [-] received reply msg_id: 41dbb7fff3dc4f8fa69d8b5f219809e0 from (pid=97562) __call__ /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:299 2017-01-09 13:51:45.452 DEBUG trove.guestagent.api [-] Result is {u'mount_point': u'/var/lib/mysql', u'save_etc_dir': u'/var/lib/mysql/etc', u'home_save': u'/var/lib/mysql/trove_user', u'save_dir': u'/var/lib/mysql/etc_mysql'}. from (pid=97562) _call /opt/stack/trove/trove/guestagent/api.py:91 2017-01-09 13:51:45.544 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'6d55ab3a-267f-4b95-8ada-33fc98fd1767', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'Upgrading the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 51, 45, 544496), '_sa_instance_state': , u'encrypted_key': u'0gBkJl5Aqb4kFIPeJDMTNIymEUuUUB8NBksecTiYyQl+Ibrfi7ME8Bi58q2n61AxbG2coOqp97ETjHRyN7mYTg==', u'deleted': 0, u'configuration_id': None, u'volume_id': u'b7dc17b5-d0a8-47bb-aef4-ef9432c269e9', u'slave_of_id': None, u'task_start_time': None, u'name': u'm4', u'task_id': 89, u'created': datetime.datetime(2017, 1, 9, 18, 20, 58), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'f43bba63-3be6-4993-b2d0-4ddfb7818d27', u'flavor_id': u'25'} from (pid=97562) save /opt/stack/trove/trove/db/models.py:64 2017-01-09 13:51:45.557 DEBUG trove.taskmanager.models [-] Generated unique RPC encryption key for instance = 6d55ab3a-267f-4b95-8ada-33fc98fd1767, key = 0gBkJl5Aqb4kFIPeJDMTNIymEUuUUB8NBksecTiYyQl+Ibrfi7ME8Bi58q2n61AxbG2coOqp97ETjHRyN7mYTg== from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1440 2017-01-09 13:51:45.560 DEBUG trove.taskmanager.models [-] Rebuilding instance m4(6d55ab3a-267f-4b95-8ada-33fc98fd1767) with image ea05cba7-2f70-4745-abea-136d7bcc16c7. from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1445 amrith@amrith-work:/etc/trove$ nova list +--------------------------------------+------+---------+------------+-------------+--------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+------+---------+------------+-------------+--------------------+ [...] | f43bba63-3be6-4993-b2d0-4ddfb7818d27 | m4 | REBUILD | rebuilding | Running | public=172.24.4.11 | [...] +--------------------------------------+------+---------+------------+-------------+--------------------+ 2017-01-09 13:53:26.581 DEBUG trove.guestagent.api [-] Recover the guest after upgrading the guest's image. from (pid=97562) post_upgrade /opt/stack/trove/trove/guestagent/api.py:359 2017-01-09 13:53:26.581 DEBUG trove.guestagent.api [-] Recycling the client ... from (pid=97562) post_upgrade /opt/stack/trove/trove/guestagent/api.py:361 2017-01-09 13:53:26.581 DEBUG trove.guestagent.api [-] Calling post_upgrade with timeout 600 from (pid=97562) _call /opt/stack/trove/trove/guestagent/api.py:86 2017-01-09 13:53:26.583 DEBUG oslo_messaging._drivers.amqpdriver [-] CALL msg_id: 2e9ccc88715b4b98848a017e19b2938d exchange 'trove' topic 'guestagent.6d55ab3a-267f-4b95-8ada-33fc98fd1767' from (pid=97562) _send /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:442 mysql> select id, name, encrypted_key from instances where name in ('m2', 'm4', 'm10', 'm20'); +--------------------------------------+------+------------------------------------------------------------------------------------------+ | id | name | encrypted_key | +--------------------------------------+------+------------------------------------------------------------------------------------------+ | 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | NULL | | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | 0gBkJl5Aqb4kFIPeJDMTNIymEUuUUB8NBksecTiYyQl+Ibrfi7ME8Bi58q2n61AxbG2coOqp97ETjHRyN7mYTg== | | 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | fVpHrkUIjVsXe7Fj7Lm4u2xnJUsWX2rMC9GL0AppILJINBZxLvkowY8FOa+asKS+8pWb4iNyukQQ4AQoLEUHUQ== | | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | gMrlHkEVxKgEFMTabzZr2TLJ6r5+wgfJfhohs7K/BzutWxs1wXfBswyV5Bgw4qeD212msmgSdOUCFov5otgzyg== | +--------------------------------------+------+------------------------------------------------------------------------------------------+ amrith@amrith-work:/etc/trove$ trove list +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ | 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | | bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne | +--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+ Inspecting which instances are using secure RPC communications -------------------------------------------------------------- An additional field is returned in the trove show command output to indicate whether any given instance is using secure RPC communication or not. .. note:: This field is only returned if the user is an 'admin'. Non admin users do not see the field. :: amrith@amrith-work:/opt/stack/trove$ trove show m20 +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | created | 2017-01-09T18:31:49 | | datastore | mysql | | datastore_version | 5.6 | | encrypted_rpc_messaging | True | | flavor | 25 | | id | 792fa220-2a40-4831-85af-cfb0ded8033c | | name | m20 | | region | RegionOne | | server_id | 1e62a192-83d3-43fd-b32e-b5ee2fa4e24b | | status | ACTIVE | | updated | 2017-01-09T18:31:52 | | volume | 3 | | volume_id | 4cd563dc-fe08-477b-828f-120facf4351b | | volume_used | 0.11 | +-------------------------+--------------------------------------+ amrith@amrith-work:/opt/stack/trove$ trove show m10 +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | created | 2017-01-09T18:28:56 | | datastore | mysql | | datastore_version | 5.6 | | encrypted_rpc_messaging | False | | flavor | 25 | | id | 514ef051-0bf7-48a5-adcf-071d4a6625fb | | name | m10 | | region | RegionOne | | server_id | 2452263e-3d33-48ec-8f24-2851fe74db28 | | status | ACTIVE | | updated | 2017-01-09T18:29:00 | | volume | 3 | | volume_id | cee2e17b-80fa-48e5-a488-da8b7809373a | | volume_used | 0.11 | +-------------------------+--------------------------------------+ amrith@amrith-work:/opt/stack/trove$ trove show m2 +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | created | 2017-01-09T18:17:13 | | datastore | mysql | | datastore_version | 5.6 | | encrypted_rpc_messaging | True | | flavor | 25 | | id | bb0c9213-31f8-4427-8898-c644254b3642 | | name | m2 | | region | RegionOne | | server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 | | status | ACTIVE | | updated | 2017-01-09T18:50:07 | | volume | 3 | | volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c | | volume_used | 0.13 | +-------------------------+--------------------------------------+ amrith@amrith-work:/opt/stack/trove$ trove show m4 +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | created | 2017-01-09T18:20:58 | | datastore | mysql | | datastore_version | 5.6 | | encrypted_rpc_messaging | True | | flavor | 25 | | id | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | | name | m4 | | region | RegionOne | | server_id | f43bba63-3be6-4993-b2d0-4ddfb7818d27 | | status | ACTIVE | | updated | 2017-01-09T18:54:30 | | volume | 3 | | volume_id | b7dc17b5-d0a8-47bb-aef4-ef9432c269e9 | | volume_used | 0.13 | +-------------------------+--------------------------------------+ amrith@amrith-work:/opt/stack/trove$ In the API response, note that the additional key "encrypted_rpc_messaging" has been added (as below). .. note:: This field is only returned if the user is an 'admin'. Non admin users do not see the field. :: RESP BODY: {"instance": {"status": "ACTIVE", "updated": "2017-01-09T18:29:00", "name": "m10", "links": [{"href": "https://192.168.126.130:8779/v1.0/56cca8484d3e48869126ada4f355c284/instances/514ef051-0bf7-48a5-adcf-071d4a6625fb", "rel": "self"}, {"href": "https://192.168.126.130:8779/instances/514ef051-0bf7-48a5-adcf-071d4a6625fb", "rel": "bookmark"}], "created": "2017-01-09T18:28:56", "region": "RegionOne", "server_id": "2452263e-3d33-48ec-8f24-2851fe74db28", "id": "514ef051-0bf7-48a5-adcf-071d4a6625fb", "volume": {"used": 0.11, "size": 3}, "volume_id": "cee2e17b-80fa-48e5-a488-da8b7809373a", "flavor": {"id": "25", "links": [{"href": "https://192.168.126.130:8779/v1.0/56cca8484d3e48869126ada4f355c284/flavors/25", "rel": "self"}, {"href": "https://192.168.126.130:8779/flavors/25", "rel": "bookmark"}]}, "datastore": {"version": "5.6", "type": "mysql"}, "encrypted_rpc_messaging": false}} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.680109 trove-12.1.0.dev92/doc/source/cli/0000755000175000017500000000000000000000000016763 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/cli/index.rst0000644000175000017500000000021700000000000020624 0ustar00coreycorey00000000000000========================== Trove Command Line Tools ========================== .. toctree:: :maxdepth: 1 trove-manage trove-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/cli/trove-manage.rst0000644000175000017500000002234600000000000022111 0ustar00coreycorey00000000000000.. This file is manually generated, unlike many of the other chapters. ================================ trove-manage command-line client ================================ The :command:`trove-manage` client is the command-line interface (CLI) for the Database Management Utility API and its extensions. This chapter documents :command:`trove-manage` version ``5.0.1``. For help on a specific :command:`trove-manage` command, enter: .. code-block:: console $ trove-manage COMMAND --help .. _trove-manage_command_usage: trove-manage usage ~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage [-h] [--config-dir DIR] [--config-file PATH] [--debug] [--log-config-append PATH] [--log-date-format DATE_FORMAT] [--log-dir LOG_DIR] [--log-file PATH] [--nodebug] [--nouse-syslog] [--noverbose] [--nowatch-log-file] [--syslog-log-facility SYSLOG_LOG_FACILITY] [--use-syslog] [--verbose] [--version] [--watch-log-file] {db_sync,db_upgrade,db_downgrade,datastore_update, datastore_version_update,db_recreate, db_load_datastore_config_parameters, datastore_version_flavor_add, datastore_version_flavor_delete} ... .. _trove-manage_command_options: trove-manage optional arguments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``-h, --help`` show this help message and exit ``--config-dir DIR`` Path to a config directory to pull ``*.conf`` files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. ``--config-file PATH`` Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. The default files used are: ``None``. ``--debug, -d`` Print debugging output (set logging level to ``DEBUG`` instead of default ``INFO`` level). ``--log-config-append PATH, --log_config PATH`` The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, ``logging_context_format_string``). ``--log-date-format DATE_FORMAT`` Format string for ``%(asctime)s`` in log records. Default: ``None``. This option is ignored if ``log_config_append`` is set. ``--log-dir LOG_DIR, --logdir LOG_DIR`` (Optional) The base directory used for relative ``--log-file`` paths. This option is ignored if ``log_config_append`` is set. ``--log-file PATH, --logfile PATH`` (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by ``use_stderr``. This option is ignored if ``log_config_append`` is set. ``--nodebug`` The inverse of --debug ``--nouse-syslog`` The inverse of --use-syslog ``--nouse-syslog-rfc-format`` The inverse of --use-syslog-rfc-format ``--noverbose`` The inverse of --verbose ``--syslog-log-facility SYSLOG_LOG_FACILITY`` Syslog facility to receive log lines. This option is ignored if ``log_config_append`` is set. ``--use-syslog`` Use syslog for logging. Existing syslog format is **DEPRECATED** and will be changed later to honor RFC5424. This option is ignored if ``log_config_append`` is set. ``--verbose, -v`` If set to false, the logging level will be set to ``WARNING`` instead of the default ``INFO`` level. ``--version`` show program's version number and exit ``--watch-log-file`` Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if ``log_file`` option is specified and Linux platform is used. This option is ignored if ``log_config_append`` is set. trove-manage datastore_update ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage datastore_update [-h] datastore_name default_version Add or update a datastore. If the datastore already exists, the default version will be updated. **positional arguments:** ``datastore_name`` Name of the datastore. ``default_version`` Name or ID of an existing datastore version to set as the default. When adding a new datastore, use an empty string. **optional arguments:** ``-h, --help`` show this help message and exit trove-manage datastore_version_flavor_add ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage datastore_version_flavor_add [-h] datastore_name datastore_version_name flavor_ids **positional arguments:** ``datastore_name`` Name of the datastore. ``datastore_version_name`` Name of the datastore version. ``flavor_ids`` Comma separated list of flavor ids. **optional arguments:** ``-h, --help`` show this help message and exit trove-manage datastore_version_flavor_delete ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage datastore_version_flavor_delete [-h] datastore_name datastore_version_name flavor_id **positional arguments:** ``datastore_name`` Name of the datastore. ``datastore_version_name`` Name of the datastore version. ``flavor_id`` The flavor to be deleted for a given datastore and datastore version. **optional arguments:** ``-h, --help`` show this help message and exit trove-manage datastore_version_update ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage datastore_version_update [-h] datastore version_name manager image_id packages active Add or update a datastore version. If the datastore version already exists, all values except the datastore name and version will be updated. **positional arguments:** ``datastore`` Name of the datastore. ``version_name`` Name of the datastore version. ``manager`` Name of the manager that will administer the datastore version. ``image_id`` ID of the image used to create an instance of the datastore version. ``packages`` Packages required by the datastore version that are installed on the guest image. ``active`` Whether the datastore version is active or not. Accepted values are ``0`` and ``1``. **optional arguments:** ``-h, --help`` show this help message and exit trove-manage db_downgrade ~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage db_downgrade [-h] [--repo_path REPO_PATH] version Downgrade the database to the specified version. **positional arguments:** ``version`` Target version. **optional arguments:** ``-h, --help`` show this help message and exit ``--repo_path REPO_PATH`` SQLAlchemy Migrate repository path. trove-manage db_load_datastore_config_parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage db_load_datastore_config_parameters [-h] datastore datastore_version config_file_location Loads configuration group parameter validation rules for a datastore version into the database. **positional arguments:** ``datastore`` Name of the datastore. ``datastore_version`` Name of the datastore version. ``config_file_location`` Fully qualified file path to the configuration group parameter validation rules. **optional arguments:** ``-h, --help`` show this help message and exit trove-manage db_recreate ~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage db_recreate [-h] [--repo_path REPO_PATH] Drop the database and recreate it. **optional arguments:** ``-h, --help`` show this help message and exit ``--repo_path REPO_PATH`` SQLAlchemy Migrate repository path. trove-manage db_sync ~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage db_sync [-h] [--repo_path REPO_PATH] Populate the database structure **optional arguments:** ``-h, --help`` show this help message and exit ``--repo_path REPO_PATH`` SQLAlchemy Migrate repository path. trove-manage db_upgrade ~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: trove-manage db_upgrade [-h] [--version VERSION] [--repo_path REPO_PATH] Upgrade the database to the specified version. **optional arguments:** ``-h, --help`` show this help message and exit ``--version VERSION`` Target version. Defaults to the latest version. ``--repo_path REPO_PATH`` SQLAlchemy Migrate repository path. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/cli/trove-status.rst0000644000175000017500000000362600000000000022204 0ustar00coreycorey00000000000000============ trove-status ============ Synopsis ======== :: trove-status [] Description =========== :program:`trove-status` is a tool that provides routines for checking the status of a Trove deployment. Options ======= The standard pattern for executing a :program:`trove-status` command is:: trove-status [] Run without arguments to see a list of available command categories:: trove-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: trove-status upgrade These sections describe the available categories and arguments for :program:`trove-status`. Upgrade ~~~~~~~ .. _trove-status-checks: ``trove-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **11.0.0 (Stein)** * Checks if any Trove instance with assigned task is running. The assigned tasks may fail during the upgrade process due to transient unavailability of Trove control plane. Upgrade should be postponed until all instances are in the Active state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/conf.py0000644000175000017500000002173400000000000017522 0ustar00coreycorey00000000000000# -*- coding: utf-8 -*- # # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'openstackdocstheme', 'stevedore.sphinxext'] # openstackdocstheme options repository_name = 'openstack/trove' bug_project = 'trove' bug_tag = '' html_theme = 'openstackdocs' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Trove' copyright = u'2013, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['trove.'] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['_static'] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]) latex_documents = [ ( 'index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual' ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). # man_pages = [ # ( # 'index', # '%s' % project, # u'%s Documentation' % project, # u'OpenStack Foundation', # 1 # ), # ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( 'index', '%s' % project, u'%s Documentation' % project, u'OpenStack Foundation', '%s' % project, 'Database as a service.', 'Miscellaneous' 'manual' ), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'%s' % project epub_author = u'OpenStack Foundation' epub_publisher = u'OpenStack Foundation' epub_copyright = u'2013, OpenStack Foundation' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Fix unsupported image types using the PIL. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # If 'no', URL addresses will not be shown. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.680109 trove-12.1.0.dev92/doc/source/contributor/0000755000175000017500000000000000000000000020566 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/contributor/contributing.rst0000644000175000017500000000663100000000000024035 0ustar00coreycorey00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Trove. Communication ~~~~~~~~~~~~~~ .. This would be a good place to put the channel you chat in as a project; when/ where your meeting is, the tags you prepend to your ML threads, etc. - IRC channel: #openstack-trove - Mailing list's prefix: [trove] - Currently, we don't have team meeting given we have a small group of core reviewers and their timezones, the situation may change in the future. Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should list the core team, their irc nicks, emails, timezones etc. If all this info is maintained elsewhere (i.e. a wiki), you can link to that instead of enumerating everyone here. The list of current Trove core reviewers is available on `gerrit `_. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ .. This section is for talking about the process to get a new feature in. Some projects use blueprints, some want specs, some want both! Some projects stick to a strict schedule when selecting what new features will be reviewed for a release. #. Talk to the team via IRC (meeting) or ML (with [trove] prefix) about the feature requested. We will discuss if a spec is needed based on the implementation complexity and the installation/configuration/upgrade/user-facing impacts. #. If a spec is need, a patch needs to be submitted to `trove-specs repo `_ before the code being reviewed. #. Code implementation and review Task Tracking ~~~~~~~~~~~~~~ .. This section is about where you track tasks- launchpad? storyboard? is there more than one launchpad project? what's the name of the project group in storyboard? We track our tasks in `Storyboard `_ If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag. Reporting a Bug ~~~~~~~~~~~~~~~ .. Pretty self explanatory section, link directly to where people should report bugs for your project. You found an issue and want to make sure we are aware of it? You can do so on `Storyboard `_. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should have info about what it takes to get something merged. Do you require one or two +2's before +W? Do some of your repos require unit test changes with all patches? etc. Due to the small number of core reviewers of the Trove project, we only need one +2 before ``Workflow +1``. Project Team Lead Duties ------------------------ .. this section is where you can put PTL specific duties not already listed in the common PTL guide (linked below) or if you already have them written up elsewhere, you can link to that doc here. All common PTL duties are enumerated here in the `PTL guide `_.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/contributor/design.rst0000644000175000017500000001177400000000000022603 0ustar00coreycorey00000000000000.. _design: ============ Trove Design ============ High Level description ====================== Trove is designed to support a single-tenant database within a Nova instance. There will be no restrictions on how Nova is configured, since Trove interacts with other OpenStack components purely through the API. Trove-api ========= The trove-api service provides a RESTful API that supports JSON and XML to provision and manage Trove instances. * A REST-ful component * Entry point - trove/bin/trove-api * Uses a WSGI launcher configured by etc/trove/api-paste.ini * Defines the pipeline of filters; authtoken, ratelimit, etc. * Defines the app_factory for the troveapp as trove.common.api:app_factory * The API class (a wsgi Router) wires the REST paths to the appropriate Controllers * Implementation of the Controllers are under the relevant module (versions/instance/flavor/limits), in the service.py module * Controllers usually redirect implementation to a class in the models.py module * At this point, an api module of another component (TaskManager, GuestAgent, etc.) is used to send the request onwards through RabbitMQ Trove-taskmanager ================= The trove-taskmanager service does the heavy lifting as far as provisioning instances, managing the lifecycle of instances, and performing operations on the Database instance. * A service that listens on a RabbitMQ topic * Entry point - trove/bin/trove-taskmanager * Runs as a RpcService configured by etc/trove/trove.conf.sample which defines trove.taskmanager.manager.Manager as the manager - basically this is the entry point for requests arriving through the queue * As described above, requests for this component are pushed to MQ from another component using the TaskManager's api module using _cast() or _call() (sync/a-sync) and putting the method's name as a parameter * In module oslo.messaging, oslo_messaging/rpc/dispatcher.py - RpcDispatcher.dispatch() invokes the proper method in the Manager by some equivalent to reflection * The Manager then redirect the handling to an object from the models.py module. It loads an object from the relevant class with the context and instance_id * Actual handling is usually done in the models.py module Trove-guestagent ================ The guestagent is a service that runs within the guest instance, responsible for managing and performing operations on the Database itself. The Guest Agent listens for RPC messages through the message bus and performs the requested operation. * Similar to TaskManager in the sense of running as a service that listens on a RabbitMQ topic * GuestAgent runs on every DB instance, and a dedicated MQ topic is used (identified as the instance's id) * Entry point - trove/bin/trove-guestagent * Runs as a RpcService configured by etc/trove/trove-guestagent.conf.sample which defines trove.guestagent.datastore.manager.Manager as the manager - basically this is the entry point for requests arriving through the queue * As described above, requests for this component are pushed to MQ from another component using the GuestAgent's api module using _cast() or _call() (sync/a-sync) and putting the method's name as a parameter * In module oslo.messaging, oslo_messaging/rpc/dispatcher.py - RpcDispatcher.dispatch()invokes the proper method in the Manager by some equivalent to reflection * The Manager then redirect the handling to an object (usually) from the dbaas.py module. * Actual handling is usually done in the dbaas.py module Trove-conductor =============== Conductor is a service that runs on the host, responsible for receiving messages from guest instances to update information on the host. For example, instance statuses and the current status of a backup. With conductor, guest instances do not need a direct connection to the host's database. Conductor listens for RPC messages through the message bus and performs the relevant operation. * Similar to guest-agent in that it is a service that listens to a RabbitMQ topic. The difference is conductor lives on the host, not the guest. * Guest agents communicate to conductor by putting messages on the topic defined in cfg as conductor_queue. By default this is "trove-conductor". * Entry point - trove/bin/trove-conductor * Runs as RpcService configured by etc/trove/trove.conf.sample which defines trove.conductor.manager.Manager as the manager. This is the entry point for requests arriving on the queue. * As guestagent above, requests are pushed to MQ from another component using _cast() (synchronous), generally of the form {"method": "", "args": {}} * Actual database update work is done by trove/conductor/manager.py * The "heartbeat" method updates the status of an instance. This is used to report that instance has changed from NEW to BUILDING to ACTIVE and so on. * The "update_backup" method changes the details of a backup, including its current status, size of the backup, type, and checksum. .. Trove - Database as a Service: https://wiki.openstack.org/wiki/Trove ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/contributor/index.rst0000644000175000017500000000040200000000000022423 0ustar00coreycorey00000000000000======================= Contributor Resources ======================= For those wishing to develop Trove itself, or to extend Trove's functionality, the following resources are provided. .. toctree:: :maxdepth: 1 contributing design testing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/contributor/testing.rst0000644000175000017500000001243200000000000022777 0ustar00coreycorey00000000000000.. _testing: ========================= Notes on Trove Unit Tests ========================= Mock Object Library ------------------- Trove unit tests make a frequent use of the Python Mock library. This library lets the caller replace (*"mock"*) parts of the system under test with mock objects and make assertions about how they have been used. [1]_ The Problem of Dangling Mocks ----------------------------- Often one needs to mock global functions in shared system modules. The caller must restore the original state of the module after it is no longer required. Dangling mock objects in global modules (mocked members of imported modules that never get restored) have been causing various transient failures in the unit test suite. The main issues posed by dangling mock objects include:: - Such object references propagate across the entire test suite. Any caller may be hit by a non-functional - or worse - crippled module member because some other (potentially totally unrelated) test case failed to restore it. - Dangling mock references shared across different test modules may lead to unexpected results/behavior in multi-threaded environments. One example could be a test case failing because a mock got called multiple times from unrelated modules. Such issues are likely to exhibit transient random behavior depending on the runtime environment, making them difficult to debug. There are several possible strategies available for dealing with dangling mock objects (see the section on recommended patterns). Further information is available in [1]_, [2]_, [3]_. Dangling Mock Detector ---------------------- All Trove unit tests should extend 'trove_testtools.TestCase'. It is a subclass of 'testtools.TestCase' which automatically checks for dangling mock objects at each test class teardown. It marks the tests as failed and reports the leaked reference if it finds any. Recommended Mocking Patterns ---------------------------- Mocking a class or object shared across multiple test cases. Use the patcher pattern in conjunction with the setUp() method [ see section 26.4.3.5. of [1]_ ]. .. code-block:: python def setUp(self): super(CouchbaseBackupTests, self).setUp() self.exe_timeout_patch = patch.object(utils, 'execute_with_timeout') self.addCleanup(self.exe_timeout_patch.stop) def test_case(self): mock_exe_timeout = self.exe_timeout_patch.start() If the mock object is required in the majority of test cases the following pattern may be more efficient. .. code-block:: python def setUp(self): super(CouchbaseBackupTests, self).setUp() self.exe_timeout_patch = patch.object(utils, 'execute_with_timeout') self.addCleanup(self.exe_timeout_patch.stop) self.mock_exe_timeout = self.exe_timeout_patch.start() def test_case(self): # All test cases can now reference 'self.mock_exe_timeout'. - Note also: patch.stopall() This method stops all active patches that were started with start. Mocking a class or object for a single entire test case. Use the decorator pattern. .. code-block:: python @patch.object(utils, 'execute_with_timeout') @patch.object(os, 'popen') def test_case(self, popen_mock, execute_with_timeout_mock): pass @patch.multiple(utils, execute_with_timeout=DEFAULT, generate_random_password=MagicMock(return_value=1)) def test_case(self, generate_random_password, execute_with_timeout): pass Mocking a class or object for a smaller scope within one test case. Use the context manager pattern. .. code-block:: python def test_case(self): # Some code using real implementation of 'generate_random_password'. with patch.object(utils, 'generate_random_password') as pwd_mock: # Using the mocked implementation of 'generate_random_password'. # Again code using the actual implementation of the method. def test_case(self): with patch.multiple(utils, execute_with_timeout_mock=DEFAULT, generate_random_password=MagicMock( return_value=1)) as mocks: password_mock = mocks['generate_random_password'] execute_mock = mocks['execute_with_timeout_mock'] Mocking global configuration properties. Use 'patch_conf_property' method from 'trove_testtools.TestCase'. .. code-block:: python def test_case(self): self.patch_conf_property('max_accepted_volume_size', 10) Datastore-specific configuration properties can be mocked by passing an optional 'section' argument to the above call. .. code-block:: python def test_case(self): self.patch_conf_property('cluster_support', False, section='redis') - Note also: 'patch_datastore_manager()' 'datastore_manager' name has to be set properly when testing datastore-specific code to ensure correct configuration options get loaded. This is a convenience method for mocking 'datastore_manager' name. .. code-block:: python def test_case(self): self.patch_datastore_manager('cassandra') References ---------- .. [1] Mock Guide: https://docs.python.org/3/library/unittest.mock.html .. [2] Python Mock Gotchas: http://alexmarandon.com/articles/python_mock_gotchas/ .. [3] Mocking Mistakes: http://engineroom.trackmaven.com/blog/mocking-mistakes/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/index.rst0000644000175000017500000000260600000000000020061 0ustar00coreycorey00000000000000================================= Welcome to Trove's documentation! ================================= Trove is Database as a Service for OpenStack. It's designed to run entirely on OpenStack, with the goal of allowing users to quickly and easily utilize the features of a relational database without the burden of handling complex administrative tasks. Cloud users and database administrators can provision and manage multiple database instances as needed. Initially, the service will focus on providing resource isolation at high performance while automating complex administrative tasks including deployment, configuration, patching, backups, restores, and monitoring. For an in-depth look at the project's design and structure, see the :doc:`contributor/design` page. .. toctree:: :maxdepth: 2 install/index user/index admin/index cli/index contributor/index reference/index * Source Code Repositories - `Trove`_ - `Trove Client`_ * `Trove API Documentation`_ on docs.openstack.org * `Trove storyboard`_ on storyboard.openstack.org Search Trove Documentation ========================== * :ref:`search` .. _Trove: https://opendev.org/openstack/trove .. _Trove Client: https://opendev.org/openstack/python-troveclient .. _Trove API Documentation: https://docs.openstack.org/api-ref/database/ .. _Trove storyboard: https://storyboard.openstack.org/#!/project/openstack/trove ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.680109 trove-12.1.0.dev92/doc/source/install/0000755000175000017500000000000000000000000017662 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/apache-mod-wsgi.rst0000644000175000017500000000272400000000000023366 0ustar00coreycorey00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ Installing API behind mod_wsgi ============================== #. Install the Apache Service:: RHEL7/CentOS7: sudo yum install httpd mod_wsgi RHEL8/CentOS8: sudo dnf install httpd python3-mod_wsgi Fedora: sudo dnf install httpd mod_wsgi Debian/Ubuntu: sudo apt-get install apache2 libapache2-mod-wsgi-py3 #. Copy ``etc/apache2/trove`` under the apache sites:: Fedora/RHEL/CentOS: sudo cp etc/apache2/trove /etc/httpd/conf.d/trove-api.conf Debian/Ubuntu: sudo cp etc/apache2/trove /etc/apache2/sites-available/trove-api.conf #. Edit ``/trove-api.conf`` according to installation and environment. * Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and ``group`` values to appropriate user on your server. * Modify the ``WSGIScriptAlias`` directive to point to the trove/api/app.wsgi script. * Modify the ``Directory`` directive to set the path to the Trove API code. * Modify the ``ErrorLog and CustomLog`` to redirect the logs to the right directory. #. Enable the apache trove site and reload:: Fedora/RHEL7/CentOS7: sudo systemctl reload httpd Debian/Ubuntu: sudo a2ensite trove-api sudo service apache2 reload ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/common_configure.txt0000644000175000017500000000577600000000000023773 0ustar00coreycorey000000000000002. In the ``/etc/trove`` directory, edit the ``trove.conf`` file, here is an example: .. code-block:: ini [DEFAULT] network_driver = trove.network.neutron.NeutronDriver management_networks = ef7541ad-9599-4285-878a-e0ab62032b03 management_security_groups = d0d797f7-11d4-436e-89a3-ac8bca829f81 cinder_volume_type = lvmdriver-1 nova_keypair = trove-mgmt default_datastore = mysql taskmanager_manager = trove.taskmanager.manager.Manager trove_api_workers = 5 transport_url = rabbit://stackrabbit:password@192.168.1.34:5672/ control_exchange = trove rpc_backend = rabbit reboot_time_out = 300 usage_timeout = 900 agent_call_high_timeout = 1200 use_syslog = False debug = True [keystone_authtoken] memcached_servers = localhost:11211 cafile = /devstack/stack/data/ca-bundle.pem project_domain_name = Default project_name = service user_domain_name = Default password = password username = trove auth_url = http://192.168.1.34/identity auth_type = password [service_credentials] auth_url = http://192.168.1.34/identity/v3 region_name = RegionOne project_name = service password = password project_domain_name = Default user_domain_name = Default username = trove [database] connection = mysql+pymysql://root:password@127.0.0.1/trove?charset=utf8 [mariadb] tcp_ports = 3306,4444,4567,4568 [mysql] tcp_ports = 3306 [postgresql] tcp_ports = 5432 3. Verify that the ``api-paste.ini`` file is present in ``/etc/trove``. If the file is not present, you can get it from this `location `__. 4. Edit the ``/etc/trove/trove-guestagent.conf`` file so that future trove guests can connect to your OpenStack environment, here is an example: .. code-block:: ini [DEFAULT] log_file = trove-guestagent.log log_dir = /var/log/trove/ ignore_users = os_admin control_exchange = trove transport_url = rabbit://stackrabbit:password@172.24.5.1:5672/ rpc_backend = rabbit command_process_timeout = 60 use_syslog = False debug = True [service_credentials] auth_url = http://192.168.1.34/identity/v3 region_name = RegionOne project_name = service password = password project_domain_name = Default user_domain_name = Default username = trove 5. Populate the trove database you created earlier in this procedure: .. code-block:: console # su -s /bin/sh -c "trove-manage db_sync" trove ... 2016-04-06 22:00:17.771 10706 INFO trove.db.sqlalchemy.migration [-] Upgrading mysql+pymysql://trove:dbaasdb@controller/trove to version latest .. note:: Ignore any deprecation messages in this output. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/install/common_prerequisites.txt0000644000175000017500000001322700000000000024704 0ustar00coreycorey00000000000000Prerequisites ------------- Before you install and configure the Database service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the ``trove`` database: .. code-block:: console CREATE DATABASE trove; * Grant proper access to the ``trove`` database: .. code-block:: console GRANT ALL PRIVILEGES ON trove.* TO 'trove'@'localhost' \ IDENTIFIED BY 'TROVE_DBPASS'; GRANT ALL PRIVILEGES ON trove.* TO 'trove'@'%' \ IDENTIFIED BY 'TROVE_DBPASS'; Replace ``TROVE_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: * Create the ``trove`` user: .. code-block:: console $ openstack user create --domain default --password-prompt trove User Password: Repeat User Password: +-----------+-----------------------------------+ | Field | Value | +-----------+-----------------------------------+ | domain_id | default | | enabled | True | | id | ca2e175b851943349be29a328cc5e360 | | name | trove | +-----------+-----------------------------------+ * Add the ``admin`` role to the ``trove`` user: .. code-block:: console $ openstack role add --project service --user trove admin .. note:: This command provides no output. * Create the ``trove`` service entity: .. code-block:: console $ openstack service create --name trove \ --description "Database" database +-------------+-----------------------------------+ | Field | Value | +-------------+-----------------------------------+ | description | Database | | enabled | True | | id | 727841c6f5df4773baa4e8a5ae7d72eb | | name | trove | | type | database | +-------------+-----------------------------------+ #. Create the Database service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ database public http://controller:8779/v1.0/%\(tenant_id\)s +--------------+----------------------------------------------+ | Field | Value | +--------------+----------------------------------------------+ | enabled | True | | id | 3f4dab34624e4be7b000265f25049609 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 727841c6f5df4773baa4e8a5ae7d72eb | | service_name | trove | | service_type | database | | url | http://controller:8779/v1.0/%\(tenant_id\)s | +--------------+----------------------------------------------+ $ openstack endpoint create --region RegionOne \ database internal http://controller:8779/v1.0/%\(tenant_id\)s +--------------+----------------------------------------------+ | Field | Value | +--------------+----------------------------------------------+ | enabled | True | | id | 9489f78e958e45cc85570fec7e836d98 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 727841c6f5df4773baa4e8a5ae7d72eb | | service_name | trove | | service_type | database | | url | http://controller:8779/v1.0/%\(tenant_id\)s | +--------------+----------------------------------------------+ $ openstack endpoint create --region RegionOne \ database admin http://controller:8779/v1.0/%\(tenant_id\)s +--------------+----------------------------------------------+ | Field | Value | +--------------+----------------------------------------------+ | enabled | True | | id | 76091559514b40c6b7b38dde790efe99 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 727841c6f5df4773baa4e8a5ae7d72eb | | service_name | trove | | service_type | database | | url | http://controller:8779/v1.0/%\(tenant_id\)s | +--------------+----------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/install/dashboard.rst0000644000175000017500000000134200000000000022343 0ustar00coreycorey00000000000000 .. _dashboard: Install and configure the Trove dashboard ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Installation of the Trove dashboard for Horizon is straightforward. It is best to install it via pip. .. code-block:: console # pip install trove-dashboard * The command above will install the latest version which is appropriate if you are running the latest Trove. If you are running an earlier version of Trove you may need to specify a compatible version of trove-dashboard. * After pip installs it, locate the trove-dashboard directory and copy the contents of the ``enabled/`` directory to your horizon ``openstack_dashboard/local/enabled/`` directory. * Reload Apache to pick up the changes to Horizon. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/get_started.rst0000644000175000017500000000620100000000000022720 0ustar00coreycorey00000000000000========================= Database service overview ========================= The Database service provides scalable and reliable cloud provisioning functionality for both relational and non-relational database engines. Users can quickly and easily use database features without the burden of handling complex administrative tasks. Cloud users and database administrators can provision and manage multiple database instances as needed. The Database service provides resource isolation at high performance levels, and automates complex administrative tasks such as deployment, configuration, patching, backups, restores, and monitoring. **Process flow example** This example is a high-level process flow for using Database services: #. The OpenStack Administrator configures the basic infrastructure using the following steps: #. Install the Database service. #. Create an image for each type of database. For example, one for MySQL and one for MongoDB. #. Use the :command:`trove-manage` command to import images and offer them to tenants. #. The OpenStack end user deploys the Database service using the following steps: #. Create a Database service instance using the ``openstack database instance create`` command. #. Use the :command:`openstack database instance list` command to get the ID of the instance, followed by the :command:`openstack database instance show` command to get the IP address of it. #. Access the Database service instance using typical database access commands. For example, with MySQL: .. code-block:: console $ mysql -u myuser -p -h TROVE_IP_ADDRESS mydb **Components** The Database service includes the following components: ``python-troveclient`` command-line client A CLI that communicates with the ``trove-api`` component. ``trove-api`` component This component is responsible for providing the RESTful API. It talks to the task manager for complex tasks, but it can also talk to the guest agent directly to perform simple tasks, such as retrieving databases or users from trove instance. ``trove-conductor`` service The conductor component is responsible for updating the Trove backend database with the information that the guest agent sends regarding the instances. It eliminates the need for direct database access by all the guest agents for updating information. ``trove-taskmanager`` service The task manager is the engine responsible for doing the majority of the work. It is responsible for provisioning instances, managing the life cycle, and performing different operations. The task manager normally sends common commands to trove guest agent, which are of an abstract nature; it is the responsibility of the guest agent to read them and issue database-specific commands in order to execute them. ``trove-guestagent`` service The guest agent runs inside the Nova instances that are used to run the database engines. The agent listens to the messaging bus for the topic and is responsible for actually translating and executing the commands that are sent to it by the task manager component for the particular datastore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/index.rst0000644000175000017500000000067000000000000021526 0ustar00coreycorey00000000000000================ Database service ================ The Database service (Trove) provides scalable and reliable Cloud Database as a Service provisioning functionality for both relational and non-relational database engines. .. toctree:: get_started.rst apache-mod-wsgi.rst install-devstack.rst install-manual.rst install-ubuntu.rst install-redhat.rst install-suse.rst dashboard.rst verify.rst next-steps.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/install-devstack.rst0000644000175000017500000000765400000000000023700 0ustar00coreycorey00000000000000.. Copyright 2019 Catalyst Cloud All Rights Reserved. not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Install Trove in DevStack ========================= This page describes how to set up a working development environment that can be used in deploying Trove on latest releases of Ubuntu. Following these instructions will allow you to have a fully functional Trove environment using the DevStack on Ubuntu 16.04 or 18.04. Config DevStack with Trove ~~~~~~~~~~~~~~~~~~~~~~~~~~ Trove can be enabled in devstack by using the plug-in based interface it offers. .. note:: The following steps have been fully verified both on Ubuntu 16.04 and 18.04. Start by cloning the devstack repository using a non-root user(the default user is ``ubuntu``) and change to devstack directory: .. code-block:: console git clone https://opendev.org/openstack/devstack cd devstack/ Create the ``local.conf`` file with the following minimal devstack configuration, change the ``HOST_IP`` to your own devstack host IP address: .. code-block:: ini [[local|localrc]] RECLONE=False HOST_IP= enable_plugin trove https://opendev.org/openstack/trove enable_plugin trove-dashboard https://opendev.org/openstack/trove-dashboard LIBS_FROM_GIT+=,python-troveclient DATABASE_PASSWORD=password ADMIN_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password RABBIT_PASSWORD=password LOGFILE=$DEST/logs/stack.sh.log VERBOSE=True LOG_COLOR=False LOGDAYS=1 IPV4_ADDRS_SAFE_TO_USE=10.111.0.0/26 FIXED_RANGE=10.111.0.0/26 NETWORK_GATEWAY=10.111.0.1 FLOATING_RANGE=172.30.5.0/24 PUBLIC_NETWORK_GATEWAY=172.30.5.1 # Pre-requisites ENABLED_SERVICES=rabbit,mysql,key # Nova enable_service horizon # Nova enable_service n-api enable_service n-cpu enable_service n-cond enable_service n-sch enable_service n-api-meta enable_service placement-api enable_service placement-client # Glance enable_service g-api enable_service g-reg # Cinder enable_service cinder enable_service c-api enable_service c-vol enable_service c-sch # Neutron enable_service q-svc enable_service q-agt enable_service q-dhcp enable_service q-l3 enable_service q-meta # enable DVR Q_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan Q_DVR_MODE=legacy # Swift ENABLED_SERVICES+=,swift SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 SWIFT_REPLICAS=1 Take a look at the `options `_ you could use to customize the Trove installation. Running devstack ~~~~~~~~~~~~~~~~ Run the ``stack.sh`` script: .. code-block:: console ./stack.sh After it completes, you can see there is a MySQL datastore available to create Trove instance: .. code-block:: console $ openstack datastore version list mysql +--------------------------------------+------------------+ | ID | Name | +--------------------------------------+------------------+ | 9726354d-f989-4a68-9c5f-6e37b1bccc74 | 5.7 | | f81a8448-2f6e-4746-8d97-866ab7dcccee | inactive_version | +--------------------------------------+------------------+ Create your first Trove instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Refer to `Create and access a database `_ for the detailed steps. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/install-manual.rst0000644000175000017500000001560000000000000023337 0ustar00coreycorey00000000000000.. _install-manual: Manual Trove Installation ========================= Objectives ~~~~~~~~~~ This document provides a step-by-step guide for manual installation of Trove with an existing OpenStack environment for development purposes. This document will not cover OpenStack setup for other services. Requirements ~~~~~~~~~~~~ A running OpenStack environment installed on Ubuntu 16.04 or 18.04 LTS is required, including the following components: - Compute (Nova) - Image Service (Glance) - Identity (Keystone) - Network (Neutron) - If you want to provision databases on block-storage volumes, you also need Block Storage (Cinder) - If you want to do backup/restore or replication, you also need Object Storage (Swift) - AMQP service (RabbitMQ or QPID) - MySQL (SQLite, PostgreSQL) database Networking requirements ----------------------- Trove makes use of an "Management Network" that the controller uses to talk to trove instance and vice versa. All the trove instance that Trove deploys will have interfaces and IP addresses on this network. Therefore, it’s important that the subnet deployed on this network be sufficiently large to allow for the maximum number of trove instance and controllers likely to be deployed throughout the lifespan of the cloud installation. You must also create a Neutron security group which will be applied to trove instance port created on the management network. The cloud admin has full control of the security group, e.g it can be helpful to allow SSH access to the trove instance from the controller for troubleshooting purposes (ie. TCP port 22), though this is not strictly necessary in production environments. Finally, you need to add routing or interfaces to this network so that the Trove controller is able to communicate with Nova servers on this network. Trove Installation ~~~~~~~~~~~~~~~~~~ Required packages for Trove --------------------------- List of packages to be installed: .. code-block:: bash $ sudo apt-get install -y build-essential python-dev libpython-dev \ python-setuptools libffi-dev libxslt1-dev libxml2-dev libyaml-dev \ libssl-dev zlib1g-dev mysql-client python-pymysql libmysqlclient-dev git Python settings --------------- Install pip: .. code-block:: bash curl -SO# https://bootstrap.pypa.io/get-pip.py && sudo python get-pip.py pip==9.0.3 && rm -f get-pip.py Install virtualenv, create Trove environment and activate it: .. code-block:: bash pip install virtualenv --user virtualenv --system-site-packages trove_env source trove_env/bin/activate Get Trove --------- Obtain the Trove source components from OpenStack repositories: .. code-block:: bash cd ~ git clone https://opendev.org/openstack/trove.git git clone https://opendev.org/openstack/python-troveclient.git Install Trove ------------- First, install the requirements: .. code-block:: bash cd ~/trove sudo pip install -r requirements.txt -r test-requirements.txt Then, install Trove: .. code-block:: bash sudo pip install -e . Finally, install the Trove client: .. code-block:: bash cd ~/python-troveclient sudo pip install -e . cd ~ Other required OpenStack clients (python-novaclient, python-keystoneclient, etc.) should already be installed as part of the Trove requirements. Prepare Trove for OpenStack --------------------------- .. note:: You need to run the following commands using OpenStack admin credentials. #. Create Trove service user with admin role in the ``service`` project. .. code-block:: bash openstack user create trove --project service --password-prompt openstack role add --user trove --project service admin #. Register Trove in Keystone. .. code-block:: bash openstack service create --name trove --description "Database" database openstack endpoint create --region RegionOne database public 'http://:8779/v1.0/$(tenant_id)s' openstack endpoint create --region RegionOne database admin 'http://:8779/v1.0/$(tenant_id)s' openstack endpoint create --region RegionOne database internal 'http://:8779/v1.0/$(tenant_id)s' Where is the IP address of the server where Trove was installed. This IP should be reachable from any hosts that will be used to communicate with Trove. Trove configuration ~~~~~~~~~~~~~~~~~~~ There are several configuration files for Trove, you can find samples of the config files in ``etc/trove/`` of Trove repo: - api-paste.ini — For trove-api service - trove.conf - For trove-api, trove-taskmanagerr, trove-conductor services. - trove-guestagent.conf — For trove-guestagent service - ``.cloudinit`` — Userdata for trove instance during provisioning Options in trove.conf --------------------- #. Service tenant credentials, change the values according to your own environment. .. code-block:: ini [service_credentials] auth_url = username = admin password = password user_domain_name = default project_name = admin project_domain_name = default region_name = RegionOne #. Management config options. management_networks Trove management network ID list. Cloud admin needs to create the networks. management_security_groups Security group IDs that applied to the management port in the trove instance. Cloud admin needs to create the security groups. nova_keypair The Nova keypair used to create trove instance. Cloud admin needs to create the keypair. cinder_volume_type The Cinder volume type name used to create volume that attached to the trove instance, otherwise, users need to provide the volume type when creating the instance. Prepare Trove database ~~~~~~~~~~~~~~~~~~~~~~ Create the Trove database schema: - Connect to the storage backend (MySQL, PostgreSQL) - Create a database called `trove` (this database will be used for storing Trove ORM) - Compose connection string. Example: ``mysql+pymysql://:@:/`` Initialize the database ----------------------- Once the database for Trove is created, its structure needs to be populated. .. code-block:: bash $ trove-manage db_sync Create and register Trove guest image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build Trove guest image, refer to `Build guest agent image `_ Run Trove ~~~~~~~~~ Starting Trove services ----------------------- Run trove-api: .. code-block:: bash $ trove-api --config-file=${TROVE_CONF_DIR}/trove.conf & Run trove-taskmanager: .. code-block:: bash $ trove-taskmanager --config-file=${TROVE_CONF_DIR}/trove.conf & Run trove-conductor: .. code-block:: bash $ trove-conductor --config-file=${TROVE_CONF_DIR}/trove.conf & ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/install-redhat.rst0000644000175000017500000000174300000000000023334 0ustar00coreycorey00000000000000.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. warning:: This guide is not tested since stable/train. This section describes how to install and configure the Database service for Red Hat Enterprise Linux 7 and CentOS 7. .. include:: common_prerequisites.txt Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # yum install openstack-trove python-troveclient .. include:: common_configure.txt Finalize installation --------------------- Start the Database services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-trove-api.service \ openstack-trove-taskmanager.service \ openstack-trove-conductor.service # systemctl start openstack-trove-api.service \ openstack-trove-taskmanager.service \ openstack-trove-conductor.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/install-suse.rst0000644000175000017500000000231200000000000023035 0ustar00coreycorey00000000000000.. _install-obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. warning:: This guide is not tested since stable/train. This section describes how to install and configure the Database service for openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. .. include:: common_prerequisites.txt Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # zypper --quiet --non-interactive install python-oslo.db \ python-MySQL-python # zypper --quiet --non-interactive install openstack-trove-api \ openstack-trove-taskmanager openstack-trove-conductor \ openstack-trove-guestagent .. include:: common_configure.txt Finalize installation --------------------- Start the Database services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-trove-api.service \ openstack-trove-taskmanager.service \ openstack-trove-conductor.service # systemctl start openstack-trove-api.service \ openstack-trove-taskmanager.service \ openstack-trove-conductor.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/install-ubuntu.rst0000644000175000017500000000142200000000000023401 0ustar00coreycorey00000000000000.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Database service for Ubuntu 14.04 (LTS). .. include:: common_prerequisites.txt Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt-get update # apt-get install python-trove python-troveclient \ python-glanceclient trove-common trove-api trove-taskmanager \ trove-conductor .. include:: common_configure.txt Finalize installation --------------------- 1. Restart the Database services: .. code-block:: console # service trove-api restart # service trove-taskmanager restart # service trove-conductor restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/install/next-steps.rst0000644000175000017500000000034300000000000022526 0ustar00coreycorey00000000000000.. _trove-next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes Database services. To add more services, see the `additional OpenStack install documentation `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/install/verify.rst0000644000175000017500000000217600000000000021726 0ustar00coreycorey00000000000000.. _trove-verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Database service. .. note:: Perform these commands on the node where you installed trove. #. Source the ``admin`` tenant credentials: .. code-block:: console $ . admin-openrc #. Check the ``openstack database instance list`` command should work. .. code-block:: console $ openstack database instance list #. Add a datastore to trove: * `Create and upload trove guest image `_. Create an image for the type of database you want to use, for example, MySQL, MariaDB, etc. * Create a datastore. You need to create a separate datastore for each type of database you want to use, for example, MySQL, MongoDB, Cassandra. This example shows you how to create a datastore for a MySQL database: .. code-block:: console $ trove-manage datastore_update mysql "" $ trove-manage datastore_version_update mysql 5.7 mysql $imageid "" 1 #. Create a database `instance `_. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/doc/source/reference/0000755000175000017500000000000000000000000020152 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/reference/index.rst0000644000175000017500000000020300000000000022006 0ustar00coreycorey00000000000000================== Reference Guides ================== .. toctree:: :maxdepth: 1 notifier.rst trove_api_extensions.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/reference/notifier.rst0000644000175000017500000000022000000000000022515 0ustar00coreycorey00000000000000========================== Available Notifier Drivers ========================== .. list-plugins:: oslo.messaging.notify.drivers :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/reference/trove_api_extensions.rst0000644000175000017500000000016500000000000025155 0ustar00coreycorey00000000000000==================== Trove API Extensions ==================== .. list-plugins:: trove.api.extensions :detailed: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/doc/source/user/0000755000175000017500000000000000000000000017172 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/backup-db-incremental.rst0000644000175000017500000001376000000000000024062 0ustar00coreycorey00000000000000======================= Use incremental backups ======================= Incremental backups let you chain together a series of backups. You start with a regular backup. Then, when you want to create a subsequent incremental backup, you specify the parent backup. Restoring a database instance from an incremental backup is the same as creating a database instance from a regular backup—the Database service handles the complexities of applying the chain of incremental backups. This example shows you how to use incremental backups with a MySQL database. **Assumptions.** Assume that you have created a regular backup for the following database instance: - Instance name: ``guest1`` - ID of the instance (``INSTANCE_ID``): ``792a6a56-278f-4a01-9997-d997fa126370`` - ID of the regular backup artifact (``BACKUP_ID``): ``6dc3a9b7-1f3e-4954-8582-3f2e4942cddd`` Create and use incremental backups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. **Create your first incremental backup** Use the :command:`openstack database backup create` command and specify: - The ``INSTANCE_ID`` of the database instance you are doing the incremental backup for (in this example, ``792a6a56-278f-4a01-9997-d997fa126370``) - The name of the incremental backup you are creating: ``backup1.1`` - The ``BACKUP_ID`` of the parent backup. In this case, the parent is the regular backup, with an ID of ``6dc3a9b7-1f3e-4954-8582-3f2e4942cddd`` .. code-block:: console $ openstack database backup create INSTANCE_ID backup1.1 --parent BACKUP_ID +-------------+--------------------------------------+ | Property | Value | +-------------+--------------------------------------+ | created | 2014-03-19T14:09:13 | | description | None | | id | 1d474981-a006-4f62-b25f-43d7b8a7097e | | instance_id | 792a6a56-278f-4a01-9997-d997fa126370 | | locationRef | None | | name | backup1.1 | | parent_id | 6dc3a9b7-1f3e-4954-8582-3f2e4942cddd | | size | None | | status | NEW | | updated | 2014-03-19T14:09:13 | +-------------+--------------------------------------+ Note that this command returns both the ID of the database instance you are incrementally backing up (``instance_id``) and a new ID for the new incremental backup artifact you just created (``id``). #. **Create your second incremental backup** The name of your second incremental backup is ``backup1.2``. This time, when you specify the parent, pass in the ID of the incremental backup you just created in the previous step (``backup1.1``). In this example, it is ``1d474981-a006-4f62-b25f-43d7b8a7097e``. .. code-block:: console $ openstack database backup create INSTANCE_ID backup1.2 --parent BACKUP_ID +-------------+--------------------------------------+ | Property | Value | +-------------+--------------------------------------+ | created | 2014-03-19T14:09:13 | | description | None | | id | bb84a240-668e-49b5-861e-6a98b67e7a1f | | instance_id | 792a6a56-278f-4a01-9997-d997fa126370 | | locationRef | None | | name | backup1.2 | | parent_id | 1d474981-a006-4f62-b25f-43d7b8a7097e | | size | None | | status | NEW | | updated | 2014-03-19T14:09:13 | +-------------+--------------------------------------+ #. **Restore using incremental backups** Now assume that your ``guest1`` database instance is damaged and you need to restore it from your incremental backups. In this example, you use the :command:`openstack database instance create` command to create a new database instance called ``guest2``. To incorporate your incremental backups, you simply use the `--backup`` parameter to pass in the ``BACKUP_ID`` of your most recent incremental backup. The Database service handles the complexities of applying the chain of all previous incremental backups. .. code-block:: console $ openstack database instance create guest2 10 --size 1 --backup BACKUP_ID +-------------------+-----------------------------------------------------------+ | Property | Value | +-------------------+-----------------------------------------------------------+ | created | 2014-03-19T14:10:56 | | datastore | {u'version': u'mysql-5.5', u'type': u'mysql'} | | datastore_version | mysql-5.5 | | flavor | {u'id': u'10', u'links': | | | [{u'href': u'https://10.125.1.135:8779/v1.0/ | | | 626734041baa4254ae316de52a20b390/flavors/10', u'rel': | | | u'self'}, {u'href': u'https://10.125.1.135:8779/ | | | flavors/10', u'rel': u'bookmark'}]} | | id | a3680953-eea9-4cf2-918b-5b8e49d7e1b3 | | name | guest2 | | status | BUILD | | updated | 2014-03-19T14:10:56 | | volume | {u'size': 1} | +-------------------+-----------------------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/backup-db.rst0000644000175000017500000002477300000000000021571 0ustar00coreycorey00000000000000============================= Backup and restore a database ============================= You can use Database services to backup a database and store the backup artifact in the Object Storage service. Later on, if the original database is damaged, you can use the backup artifact to restore the database. The restore process creates a database instance. This example shows you how to back up and restore a MySQL database. #. **Backup the database instance** As background, assume that you have created a database instance with the following characteristics: - Name of the database instance: ``guest1`` - Flavor ID: ``10`` - Root volume size: ``2`` - Databases: ``db1`` and ``db2`` - Users: The ``user1`` user with the ``password`` password First, get the ID of the ``guest1`` database instance by using the :command:`openstack database instance list` command: .. code-block:: console $ openstack database instance list +--------------------------------------+--------+-----------+-------------------+--------+-----------+------+ | id | name | datastore | datastore_version | status | flavor_id | size | +--------------------------------------+--------+-----------+-------------------+--------+-----------+------+ | 97b4b853-80f6-414f-ba6f-c6f455a79ae6 | guest1 | mysql | mysql-5.5 | ACTIVE | 10 | 2 | +--------------------------------------+--------+-----------+-------------------+--------+-----------+------+ Back up the database instance by using the :command:`openstack database backup create` command. In this example, the backup is called ``backup1``. In this example, replace ``INSTANCE_ID`` with ``97b4b853-80f6-414f-ba6f-c6f455a79ae6``: .. note:: This command syntax pertains only to python-troveclient version 1.0.6 and later. Earlier versions require you to pass in the backup name as the first argument. .. code-block:: console $ openstack database backup create INSTANCE_ID backup1 +-------------+--------------------------------------+ | Property | Value | +-------------+--------------------------------------+ | created | 2014-03-18T17:09:07 | | description | None | | id | 8af30763-61fd-4aab-8fe8-57d528911138 | | instance_id | 97b4b853-80f6-414f-ba6f-c6f455a79ae6 | | locationRef | None | | name | backup1 | | parent_id | None | | size | None | | status | NEW | | updated | 2014-03-18T17:09:07 | +-------------+--------------------------------------+ Note that the command returns both the ID of the original instance (``instance_id``) and the ID of the backup artifact (``id``). Later on, use the :command:`openstack database backup list` command to get this information: .. code-block:: console $ openstack database backup list +--------------------------------------+--------------------------------------+---------+-----------+-----------+---------------------+ | id | instance_id | name | status | parent_id | updated | +--------------------------------------+--------------------------------------+---------+-----------+-----------+---------------------+ | 8af30763-61fd-4aab-8fe8-57d528911138 | 97b4b853-80f6-414f-ba6f-c6f455a79ae6 | backup1 | COMPLETED | None | 2014-03-18T17:09:11 | +--------------------------------------+--------------------------------------+---------+-----------+-----------+---------------------+ You can get additional information about the backup by using the :command:`openstack database backup show` command and passing in the ``BACKUP_ID``, which is ``8af30763-61fd-4aab-8fe8-57d528911138``. .. code-block:: console $ openstack database backup show BACKUP_ID +-------------+----------------------------------------------------+ | Property | Value | +-------------+----------------------------------------------------+ | created | 2014-03-18T17:09:07 | | description | None | | id | 8af...138 | | instance_id | 97b...ae6 | | locationRef | http://10.0.0.1:.../.../8af...138.xbstream.gz.enc | | name | backup1 | | parent_id | None | | size | 0.17 | | status | COMPLETED | | updated | 2014-03-18T17:09:11 | +-------------+----------------------------------------------------+ #. **Restore a database instance** Now assume that your ``guest1`` database instance is damaged and you need to restore it. In this example, you use the :command:`openstack database instance create` command to create a new database instance called ``guest2``. - You specify that the new ``guest2`` instance has the same flavor (``10``) and the same root volume size (``2``) as the original ``guest1`` instance. - You use the ``--backup`` argument to indicate that this new instance is based on the backup artifact identified by ``BACKUP_ID``. In this example, replace ``BACKUP_ID`` with ``8af30763-61fd-4aab-8fe8-57d528911138``. .. code-block:: console $ openstack database instance create guest2 10 --size 2 --backup BACKUP_ID +-------------------+----------------------------------------------+ | Property | Value | +-------------------+----------------------------------------------+ | created | 2014-03-18T17:12:03 | | datastore | {u'version': u'mysql-5.5', u'type': u'mysql'}| |datastore_version | mysql-5.5 | | flavor | {u'id': u'10', u'links': [{u'href': ...]} | | id | ac7a2b35-a9b4-4ff6-beac-a1bcee86d04b | | name | guest2 | | status | BUILD | | updated | 2014-03-18T17:12:03 | | volume | {u'size': 2} | +-------------------+----------------------------------------------+ #. **Verify backup** Now check that the new ``guest2`` instance has the same characteristics as the original ``guest1`` instance. Start by getting the ID of the new ``guest2`` instance. .. code-block:: console $ openstack database instance list +-----------+--------+-----------+-------------------+--------+-----------+------+ | id | name | datastore | datastore_version | status | flavor_id | size | +-----------+--------+-----------+-------------------+--------+-----------+------+ | 97b...ae6 | guest1 | mysql | mysql-5.5 | ACTIVE | 10 | 2 | | ac7...04b | guest2 | mysql | mysql-5.5 | ACTIVE | 10 | 2 | +-----------+--------+-----------+-------------------+--------+-----------+------+ Use the :command:`openstack database instance show` command to display information about the new guest2 instance. Pass in guest2's ``INSTANCE_ID``, which is ``ac7a2b35-a9b4-4ff6-beac-a1bcee86d04b``. .. code-block:: console $ openstack database instance show INSTANCE_ID +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2014-03-18T17:12:03 | | datastore | mysql | | datastore_version | mysql-5.5 | | flavor | 10 | | id | ac7a2b35-a9b4-4ff6-beac-a1bcee86d04b | | ip | 10.0.0.3 | | name | guest2 | | status | ACTIVE | | updated | 2014-03-18T17:12:06 | | volume | 2 | | volume_used | 0.18 | +-------------------+--------------------------------------+ Note that the data store, flavor ID, and volume size have the same values as in the original ``guest1`` instance. Use the :command:`openstack database db list` command to check that the original databases (``db1`` and ``db2``) are present on the restored instance. .. code-block:: console $ openstack database db list INSTANCE_ID +--------------------+ | name | +--------------------+ | db1 | | db2 | | performance_schema | | test | +--------------------+ Use the :command:`openstack database user list` command to check that the original user (``user1``) is present on the restored instance. .. code-block:: console $ openstack database user list INSTANCE_ID +--------+------+-----------+ | name | host | databases | +--------+------+-----------+ | user1 | % | db1, db2 | +--------+------+-----------+ #. **Notify users** Tell the users who were accessing the now-disabled ``guest1`` database instance that they can now access ``guest2``. Provide them with ``guest2``'s name, IP address, and any other information they might need. (You can get this information by using the :command:`openstack database instance show` command.) #. **Clean up** At this point, you might want to delete the disabled ``guest1`` instance, by using the :command:`openstack database instance delete` command. .. code-block:: console $ openstack database instance delete INSTANCE_ID ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/create-db.rst0000644000175000017500000001662700000000000021566 0ustar00coreycorey00000000000000.. _create_db: ============================ Create and access a database ============================ Assume that you have installed the Database service and populated your data store with images for the type and versions of databases that you want, and that you can create and access a database. This example shows you how to create and access a MySQL 5.7 database. Create and access a database ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. **Determine which flavor to use for your database** When you create a database instance, you must specify a nova flavor. The flavor indicates various characteristics of the instance, such as RAM and root volume size. You will need to create or obtain new nova flavors that work for databases. The first step is to list flavors by using the :command:`openstack flavor list` command. .. code-block:: console $ openstack flavor list Now take a look at the minimum requirements for various database instances: +--------------------+--------------------+--------------------+--------------------+ | Database | RAM (MB) | Disk (GB) | VCPUs | +====================+====================+====================+====================+ | MySQL | 512 | 5 | 1 | +--------------------+--------------------+--------------------+--------------------+ | Cassandra | 2048 | 5 | 1 | +--------------------+--------------------+--------------------+--------------------+ | MongoDB | 1024 | 5 | 1 | +--------------------+--------------------+--------------------+--------------------+ | Redis | 512 | 5 | 1 | +--------------------+--------------------+--------------------+--------------------+ - If you have a custom flavor that meets the needs of the database that you want to create, proceed to :ref:`Step 2 ` and use that flavor. - If your environment does not have a suitable flavor, an administrative user must create a custom flavor by using the :command:`openstack flavor create` command. **MySQL example.** This example creates a flavor that you can use with a MySQL database. This example has the following attributes: - Flavor name: ``mysql_minimum`` - Flavor ID: You must use an ID that is not already in use. In this example, IDs 1 through 5 are in use, so use ID ``6``. - RAM: ``512`` - Root volume size in GB: ``5`` - Virtual CPUs: ``1`` .. code-block:: console $ openstack flavor create mysql-minimum --id 6 --ram 512 --disk 5 --vcpus 1 +----------------------------+---------------+ | Field | Value | +----------------------------+---------------+ | OS-FLV-DISABLED:disabled | False | | OS-FLV-EXT-DATA:ephemeral | 0 | | disk | 5 | | id | 6 | | name | mysql-minimum | | os-flavor-access:is_public | True | | properties | | | ram | 512 | | rxtx_factor | 1.0 | | swap | | | vcpus | 1 | +----------------------------+---------------+ .. _create-database-instance: #. **Create a database instance** This example creates a database instance with the following parameters: - Name of the instance: ``mysql_instance_1`` - Database flavor: ``6`` - A volume size of ``5`` (5 GB) - A database named ``test`` - The database is based on the ``mysql`` data store and the ``5.7`` datastore version - The ``userA`` user with the ``password`` password. - A Neutron network ``8799cf10-01ef-40e2-b04e-06da7cfa5668`` to allocate the database IP address (for internal access). - Expose the instance to the public via ``--is-public`` (for external access). Ignore this parameter if you don't want to expose database service to the public internet. - Only the IP addresses coming from ``202.37.199.1/24`` or ``10.1.0.1/24`` are allowed to access the database service. .. code-block:: console $ openstack database instance create mysql_instance_1 \ 6 \ --size 5 \ --nic net-id=8799cf10-01ef-40e2-b04e-06da7cfa5668 \ --databases test --users userA:password \ --datastore mysql --datastore_version 5.7 \ --is-public \ --allowed-cidr 10.1.0.1/24 \ --allowed-cidr 202.37.199.1/24 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | created | 2019-09-11T09:19:18 | | datastore | mysql | | datastore_version | 5.7 | | flavor | 6 | | id | 4bca2f27-f986-419e-ab4a-df1db399d590 | | name | mysql_instance_1 | | region | RegionOne | | status | BUILD | | updated | 2019-09-11T09:19:18 | | volume | 5 | +-------------------+--------------------------------------+ #. **Get the IP address of the database instance** Both internal and external IP addresses can be shown by running: .. code-block:: console $ openstack database instance show 4bca2f27-f986-419e-ab4a-df1db399d590 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | created | 2019-09-11T07:14:37 | | datastore | mysql | | datastore_version | 5.7 | | flavor | 6 | | id | 4bca2f27-f986-419e-ab4a-df1db399d590 | | ip | 10.1.0.14, 172.24.5.15 | | name | mysql_instance_1 | | region | RegionOne | | status | ACTIVE | | updated | 2019-09-11T07:14:47 | | volume | 5 | | volume_used | 0.12 | +-------------------+--------------------------------------+ #. **Access the new database** You can now access the new database you just created by using typical database access commands. In this MySQL example, replace ``IP_ADDRESS`` with either 10.1.0.14 or 172.24.5.15 according to where the command is running. Make sure your IP address is in the allowed CIDRs specified in the above command. .. code-block:: console $ mysql -h IP_ADDRESS -uuserA -ppassword ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/index.rst0000644000175000017500000000115000000000000021030 0ustar00coreycorey00000000000000=========================== Database Service User Guide =========================== The Database service provides scalable and reliable cloud provisioning functionality for both relational and non-relational database engines. Users can quickly and easily use database features without the burden of handling complex administrative tasks. .. toctree:: :maxdepth: 1 instance-status.rst create-db.rst manage-db-and-users.rst backup-db.rst backup-db-incremental.rst manage-db-config.rst set-up-replication.rst set-up-clustering.rst upgrade-datastore.rst upgrade-cluster-datastore.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/instance-status.rst0000644000175000017500000000162500000000000023055 0ustar00coreycorey00000000000000======================== Database instance status ======================== HEALTHY The database service is functional, e.g. table is accessible. RUNNING The database service is alive, but maybe not functional yet. SHUTDOWN The database service is stopped. NEW The database service creation request is just received by Trove. BUILD The database service is being installed. BLOCKED The database service process exists but service is not accessible for some reason. PROMOTE Trove is replicating data between a replication group in order to promote a new master instance. EJECT The master election is happening within a replication group. RESTART_REQUIRED The database service needs to restart, e.g. due to the configuration change. FAILED The database service is failed to spawn. ERROR There are some errors in a running database service. DELETED The database service is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/manage-db-and-users.rst0000644000175000017500000000653300000000000023445 0ustar00coreycorey00000000000000============================================= Manage databases and users on Trove instances ============================================= Assume that you installed Trove service and uploaded images with datastore of your choice. This section shows how to manage users and databases in a MySQL 5.7 instance. Add new database and user to an existing Trove instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Trove provides API to manage users and databases on datastores including relational (e.g. MySQL, PostgreSQL) and non-relational (e.g. Redis, Cassandra). Once a Trove instance with a datastore of choice is active you can use Trove API to create new databases and/or users. .. code-block:: console $ openstack database user list db-instance +------+------+-----------+ | Name | Host | Databases | +------+------+-----------+ | test | % | testdb | +------+------+-----------+ $ openstack database user create db-instance newuser userpass --databases testdb $ openstack database user list db-instance +---------+------+-----------+ | Name | Host | Databases | +---------+------+-----------+ | newuser | % | testdb | | test | % | testdb | +---------+------+-----------+ $ mysql -h 172.24.4.199 -u newuser -p testdb Enter password: mysql> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | testdb | +--------------------+ 2 rows in set (0.00 sec) $ openstack database db create db-instance newdb $ openstack database db list db-instance +--------+ | Name | +--------+ | newdb | | sys | | testdb | +--------+ $ mysql -h 172.24.4.199 -u newuser -p newdb Enter password: ERROR 1044 (42000): Access denied for user 'newuser'@'%' to database 'newdb' Manage access to databases ~~~~~~~~~~~~~~~~~~~~~~~~~~ With Trove API you can grant and revoke database access rights for existing users. .. code-block:: console $ openstack database user grant access db-instance newuser newdb $ openstack database user show access db-instance newuser +--------+ | Name | +--------+ | newdb | | testdb | +--------+ $ mysql -h IP_ADDRESS -u newuser -p newdb Enter password: $ openstack database user show access db-instance test +--------+ | Name | +--------+ | testdb | +--------+ $ mysql -h IP_ADDRESS -u test -p newdb Enter password: ERROR 1044 (42000): Access denied for user 'test'@'%' to database 'newdb' $ openstack database user revoke access db-instance newuser newdb $ mysql -h IP_ADDRESS -u newuser -p newdb Enter password: ERROR 1044 (42000): Access denied for user 'newuser'@'%' to database 'newdb' Delete databases ~~~~~~~~~~~~~~~~ Lastly, Trove provides API for deleting databases. .. code-block:: console $ openstack database db list db-instance +--------+ | Name | +--------+ | newdb | | sys | | testdb | +--------+ $ openstack database db delete db-instance testdb $ openstack database db list db-instance +--------+ | Name | +--------+ | newdb | | sys | +--------+ $ mysql -h IP_ADDRESS -u test -p testdb Enter password: ERROR 1049 (42000): Unknown database 'testdb'././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/manage-db-config.rst0000644000175000017500000002636300000000000023014 0ustar00coreycorey00000000000000============================= Manage database configuration ============================= You can manage database configuration tasks by using configuration groups. Configuration groups let you set configuration options, in bulk, on one or more databases. This example assumes you have created a MySQL database and shows you how to use a configuration group to configure it. Although this example sets just one option on one database, you can use these same procedures to set multiple options on multiple database instances throughout your environment. This can provide significant time savings in managing your cloud. Bulk-configure a database or databases ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. **List available options** First, determine which configuration options you can set. Different data store versions have different configuration options. List the names and IDs of all available versions of the ``mysql`` data store: .. code-block:: console $ openstack datastore version list mysql +--------------------------------------+-----------+ | id | name | +--------------------------------------+-----------+ | eeb574ce-f49a-48b6-820d-b2959fcd38bb | mysql-5.5 | +--------------------------------------+-----------+ Pass in the data store version ID with the :command:`openstack database configuration parameter list` command to get the available options: .. code-block:: console $ openstack database configuration parameter list DATASTORE_VERSION_ID +--------------------------------+---------+---------+----------------------+------------------+ | name | type | min | max | restart_required | +--------------------------------+---------+---------+----------------------+------------------+ | auto_increment_increment | integer | 1 | 65535 | False | | auto_increment_offset | integer | 1 | 65535 | False | | autocommit | integer | 0 | 1 | False | | bulk_insert_buffer_size | integer | 0 | 18446744073709547520 | False | | character_set_client | string | | | False | | character_set_connection | string | | | False | | character_set_database | string | | | False | | character_set_filesystem | string | | | False | | character_set_results | string | | | False | | character_set_server | string | | | False | | collation_connection | string | | | False | | collation_database | string | | | False | | collation_server | string | | | False | | connect_timeout | integer | 1 | 65535 | False | | expire_logs_days | integer | 1 | 65535 | False | | innodb_buffer_pool_size | integer | 0 | 68719476736 | True | | innodb_file_per_table | integer | 0 | 1 | True | | innodb_flush_log_at_trx_commit | integer | 0 | 2 | False | | innodb_log_buffer_size | integer | 1048576 | 4294967296 | True | | innodb_open_files | integer | 10 | 4294967296 | True | | innodb_thread_concurrency | integer | 0 | 1000 | False | | interactive_timeout | integer | 1 | 65535 | False | | join_buffer_size | integer | 0 | 4294967296 | False | | key_buffer_size | integer | 0 | 4294967296 | False | | local_infile | integer | 0 | 1 | False | | max_allowed_packet | integer | 1024 | 1073741824 | False | | max_connect_errors | integer | 1 | 18446744073709547520 | False | | max_connections | integer | 1 | 65535 | False | | max_user_connections | integer | 1 | 100000 | False | | myisam_sort_buffer_size | integer | 4 | 18446744073709547520 | False | | server_id | integer | 1 | 100000 | True | | sort_buffer_size | integer | 32768 | 18446744073709547520 | False | | sync_binlog | integer | 0 | 18446744073709547520 | False | | wait_timeout | integer | 1 | 31536000 | False | +--------------------------------+---------+---------+----------------------+------------------+ In this example, the :command:`openstack database configuration parameter list` command returns a list of options that work with MySQL 5.5. #. **Create a configuration group** A configuration group contains a comma-separated list of key-value pairs. Each pair consists of a configuration option and its value. You can create a configuration group by using the :command:`openstack database configuration create` command. The general syntax for this command is: .. code-block:: console $ openstack database configuration create NAME VALUES --datastore DATASTORE_NAME - *NAME*. The name you want to use for this group. - *VALUES*. The list of key-value pairs. - *DATASTORE_NAME*. The name of the associated data store. Set *VALUES* as a JSON dictionary, for example: .. code-block:: json {"myFirstKey" : "someString", "mySecondKey" : 1} This example creates a configuration group called ``group1``. ``group1`` contains just one key and value pair, and this pair sets the ``sync_binlog`` option to ``1``. .. code-block:: console $ openstack database configuration create group1 '{"sync_binlog" : 1}' --datastore mysql +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | datastore_version_id | eeb574ce-f49a-48b6-820d-b2959fcd38bb | | description | None | | id | 9a9ef3bc-079b-476a-9cbf-85aa64f898a5 | | name | group1 | | values | {"sync_binlog": 1} | +----------------------+--------------------------------------+ #. **Examine your existing configuration** Before you use the newly-created configuration group, look at how the ``sync_binlog`` option is configured on your database. Replace the following sample connection values with values that connect to your database: .. code-block:: console $ mysql -u user7 -ppassword -h 172.16.200.2 myDB7 Welcome to the MySQL monitor. Commands end with ; or \g. ... mysql> show variables like 'sync_binlog'; +---------------+-------+ | Variable_name | Value | +---------------+-------+ | sync_binlog | 0 | +---------------+-------+ As you can see, the ``sync_binlog`` option is currently set to ``0`` for the ``myDB7`` database. #. **Change the database configuration using a configuration group** You can change a database's configuration by attaching a configuration group to a database instance. You do this by using the :command:`openstack database configuration attach` command and passing in the ID of the database instance and the ID of the configuration group. Get the ID of the database instance: .. code-block:: console $ openstack database instance list +-------------+------------------+-----------+-------------------+--------+-----------+------+ | id | name | datastore | datastore_version | status | flavor_id | size | +-------------+------------------+-----------+-------------------+--------+-----------+------+ | 26a265dd... | mysql_instance_7 | mysql | mysql-5.5 | ACTIVE | 6 | 5 | +-------------+------------------+-----------+-------------------+--------+-----------+------+ Get the ID of the configuration group: .. code-block:: console $ openstack database configuration list +-------------+--------+-------------+---------------------+ | id | name | description |datastore_version_id | +-------------+--------+-------------+---------------------+ | 9a9ef3bc... | group1 | None | eeb574ce... | +-------------+--------+-------------+---------------------+ Attach the configuration group to the database instance: .. note:: This command syntax pertains only to python-troveclient version 1.0.6 and later. Earlier versions require you to pass in the configuration group ID as the first argument. .. code-block:: console $ openstack database configuration attach DB_INSTANCE_ID CONFIG_GROUP_ID #. **Re-examine the database configuration** Display the ``sync_binlog`` setting again: .. code-block:: console mysql> show variables like 'sync_binlog'; +---------------+-------+ | Variable_name | Value | +---------------+-------+ | sync_binlog | 1 | +---------------+-------+ As you can see, the ``sync_binlog`` option is now set to ``1``, as specified in the ``group1`` configuration group. **Conclusion.** Using a configuration group to set a single option on a single database is obviously a trivial example. However, configuration groups can provide major efficiencies when you consider that: - A configuration group can specify a large number of option values. - You can apply a configuration group to hundreds or thousands of database instances in your environment. Used in this way, configuration groups let you modify your database cloud configuration, on the fly, on a massive scale. **Maintenance.** There are also a number of useful maintenance features for working with configuration groups. You can: - Disassociate a configuration group from a database instance, using the :command:`openstack database configuration detach` command. - Modify a configuration group on the fly, using the :command:`trove configuration-patch` command. - Find out what instances are using a configuration group, using the :command:`openstack database configuration instances` command. - Delete a configuration group, using the :command:`openstack database configuration delete` command. You might want to do this if no instances use a group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/set-up-clustering.rst0000644000175000017500000001652300000000000023325 0ustar00coreycorey00000000000000========================== Set up database clustering ========================== You can store data across multiple machines by setting up MongoDB sharded clusters. Each cluster includes: - One or more *shards*. Each shard consists of a three member replica set (three instances organized as a replica set). - One or more *query routers*. A query router is the machine that your application actually connects to. This machine is responsible for communicating with the config server to figure out where the requested data is stored. It then accesses and returns the data from the appropriate shard(s). - One or more *config servers*. Config servers store the metadata that links requested data with the shard that contains it. This example shows you how to set up a MongoDB sharded cluster. .. note:: **Before you begin.** Make sure that: - The administrative user has registered a MongoDB datastore type and version. - The administrative user has created an appropriate :ref:`flavor that meets the MongoDB minimum requirements `. Set up clustering ~~~~~~~~~~~~~~~~~ #. **Create a cluster** Create a cluster by using the :command:`openstack database cluster create` command. This command creates a one-shard cluster. Pass in: - The name of the cluster. - The name and version of the datastore you want to use. - The three instances you want to include in the replication set for the first shard. Specify each instance by using the ``--instance`` argument and the associated flavor ID and volume size. Use the same flavor ID and volume size for each instance. In this example, flavor ``7`` is a custom flavor that meets the MongoDB minimum requirements. .. code-block:: console $ openstack database cluster create cluster1 mongodb "2.4" \ --instance flavor=7,volume=2 --instance flavor=7,volume=2 \ --instance flavor=7,volume=2 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2014-08-16T01:46:51 | | datastore | mongodb | | datastore_version | 2.4 | | id | aa6ef0f5-dbef-48cd-8952-573ad881e717 | | name | cluster1 | | task_description | Building the initial cluster. | | task_name | BUILDING | | updated | 2014-08-16T01:46:51 | +-------------------+--------------------------------------+ #. **Display cluster information** Display information about a cluster by using the :command:`openstack database cluster show` command. Pass in the ID of the cluster. The cluster ID displays when you first create a cluster. (If you need to find it later on, use the :command:`openstack database cluster list` command to list the names and IDs of all the clusters in your system.) .. code-block:: console $ openstack database cluster show CLUSTER_ID +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2014-08-16T01:46:51 | | datastore | mongodb | | datastore_version | 2.4 | | id | aa6ef0f5-dbef-48cd-8952-573ad881e717 | | ip | 10.0.0.2 | | name | cluster1 | | task_description | No tasks for the cluster. | | task_name | NONE | | updated | 2014-08-16T01:59:33 | +-------------------+--------------------------------------+ .. note:: **Your application connects to this IP address.** The :command:`openstack database cluster show` command displays the IP address of the query router. This is the IP address your application uses to retrieve data from the database. #. **List cluster instances** List the instances in a cluster by using the :command:`openstack database cluster list instances` command. .. code-block:: console $ openstack database cluster list instances CLUSTER_ID +--------------------------------------+----------------+-----------+------+ | ID | Name | Flavor ID | Size | +--------------------------------------+----------------+-----------+------+ | 45532fc4-661c-4030-8ca4-18f02aa2b337 | cluster1-rs1-1 | 7 | 2 | | 7458a98d-6f89-4dfd-bb61-5cf1dd65c121 | cluster1-rs1-2 | 7 | 2 | | b37634fb-e33c-4846-8fe8-cf2b2c95e731 | cluster1-rs1-3 | 7 | 2 | +--------------------------------------+----------------+-----------+------+ **Naming conventions for replication sets and instances.** Note that the ``Name`` column displays an instance name that includes the replication set name. The replication set names and instance names are automatically generated, following these rules: - **Replication set name.** This name consists of the cluster name, followed by the string -rs\ *n*, where *n* is 1 for the first replication set you create, 2 for the second replication set, and so on. In this example, the cluster name is ``cluster1``, and there is only one replication set, so the replication set name is ``cluster1-rs1``. - **Instance name.** This name consists of the replication set name followed by the string -*n*, where *n* is 1 for the first instance in a replication set, 2 for the second instance, and so on. In this example, the instance names are ``cluster1-rs1-1``, ``cluster1-rs1-2``, and ``cluster1-rs1-3``. #. **List clusters** List all the clusters in your system, using the :command:`openstack database cluster list` command. .. code-block:: console $ openstack database cluster list +--------------------------------------+----------+-----------+-------------------+-----------+ | ID | Name | Datastore | Datastore Version | Task Name | +--------------------------------------+----------+-----------+-------------------+-----------+ | aa6ef0f5-dbef-48cd-8952-573ad881e717 | cluster1 | mongodb | 2.4 | NONE | | b8829c2a-b03a-49d3-a5b1-21ec974223ee | cluster2 | mongodb | 2.4 | BUILDING | +--------------------------------------+----------+-----------+-------------------+-----------+ #. **Delete a cluster** Delete a cluster, using the :command:`openstack database cluster delete` command. .. code-block:: console $ openstack database cluster delete CLUSTER_ID Query routers and config servers -------------------------------- Each cluster includes at least one query router and one config server. Query routers and config servers count against your quota. When you delete a cluster, the system deletes the associated query router(s) and config server(s). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/set-up-replication.rst0000644000175000017500000001177000000000000023456 0ustar00coreycorey00000000000000=========================== Set up database replication =========================== You can create a replica of an existing database instance. When you make subsequent changes to the original instance, the system automatically applies those changes to the replica. - Replicas are read-only. - When you create a replica, do not specify the ``--users`` or ``--databases`` options. - You can choose a smaller volume or flavor for a replica than for the original, but the replica's volume must be big enough to hold the data snapshot from the original. This example shows you how to replicate a MySQL database instance. Set up replication ~~~~~~~~~~~~~~~~~~ #. **Get the instance ID** Get the ID of the original instance you want to replicate: .. code-block:: console $ openstack database instance list +-----------+------------+-----------+-------------------+--------+-----------+------+ | id | name | datastore | datastore_version | status | flavor_id | size | +-----------+------------+-----------+-------------------+--------+-----------+------+ | 97b...ae6 | base_1 | mysql | mysql-5.5 | ACTIVE | 10 | 2 | +-----------+------------+-----------+-------------------+--------+-----------+------+ #. **Create the replica** Create a new instance that will be a replica of the original instance. You do this by passing in the ``--replica_of`` option with the :command:`openstack database instance create` command. This example creates a replica called ``replica_1``. ``replica_1`` is a replica of the original instance, ``base_1``: .. code-block:: console $ openstack database instance create replica_1 6 --size=5 \ --datastore_version mysql-5.5 \ --datastore mysql --replica_of ID_OF_ORIGINAL_INSTANCE #. **Verify replication status** Pass in ``replica_1``'s instance ID with the :command:`openstack database instance show` command to verify that the newly created ``replica_1`` instance is a replica of the original ``base_1``. Note that the ``replica_of`` property is set to the ID of ``base_1``. .. code-block:: console $ openstack database instance show INSTANCE_ID_OF_REPLICA_1 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2014-09-16T11:16:49 | | datastore | mysql | | datastore_version | mysql-5.5 | | flavor | 6 | | id | 49c6eff6-ef91-4eff-91c0-efbda7e83c38 | | name | replica_1 | | replica_of | 97b4b853-80f6-414f-ba6f-c6f455a79ae6 | | status | BUILD | | updated | 2014-09-16T11:16:49 | | volume | 5 | +-------------------+--------------------------------------+ Now pass in ``base_1``'s instance ID with the :command:`openstack database instance show` command to list the replica(s) associated with the original instance. Note that the ``replicas`` property is set to the ID of ``replica_1``. If there are multiple replicas, they appear as a comma-separated list. .. code-block:: console $ openstack database instance show INSTANCE_ID_OF_BASE_1 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | created | 2014-09-16T11:04:56 | | datastore | mysql | | datastore_version | mysql-5.5 | | flavor | 6 | | id | 97b4b853-80f6-414f-ba6f-c6f455a79ae6 | | ip | 172.16.200.2 | | name | base_1 | | replicas | 49c6eff6-ef91-4eff-91c0-efbda7e83c38 | | status | ACTIVE | | updated | 2014-09-16T11:05:06 | | volume | 5 | | volume_used | 0.11 | +-------------------+--------------------------------------+ #. **Detach the replica** If the original instance goes down, you can detach the replica. The replica becomes a standalone database instance. You can then take the new standalone instance and create a new replica of that instance. You detach a replica using the :command:`openstack database instance detach replica` command: .. code-block:: console $ openstack database instance detach replica INSTANCE_ID_OF_REPLICA ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/doc/source/user/upgrade-cluster-datastore.rst0000644000175000017500000000620300000000000025017 0ustar00coreycorey00000000000000========================= Upgrade cluster datastore ========================= Upgrading datastore for cluster instances is very similar to upgrading a single instance. Trove tries to perform a rolling upgrade so that there won't be any downtime. However, it is not always possible and, for example, in case of Redis upgrade, some of its slots may be temporarily unavailable. Trove strategy upgrades every instance in the entire cluster one by one. Upgrading is finished once all instances are upgraded. Please check the guide for datastore upgrade to check prerequisistes. This example shows you how to upgrade Redis datastore (version 3.2.6) for a cluster. Upgrading cluster ~~~~~~~~~~~~~~~~~ #. **Check cluster task** Use :command:`openstack database cluster list` to check whether the task of your cluster is NONE. .. code-block:: console $openstack database cluster list +--------------------------------------+---------------+-----------+-------------------+-----------+ | ID | Name | Datastore | Datastore Version | Task Name | +--------------------------------------+---------------+-----------+-------------------+-----------+ | 05f2e7b7-8dac-453f-ad5d-38195cd5718f | redis_cluster | redis | 3.2.6 | NONE | +--------------------------------------+---------------+-----------+-------------------+-----------+ #. **Check if target version is available** Use :command:`openstack datastore version list` to list all available versions your datastore. .. code-block:: console $openstack datastore version list redis +--------------------------------------+-------+ | ID | Name | +--------------------------------------+-------+ | 483debec-b7c3-4167-ab1d-1765795ed7eb | 3.2.6 | | 507f666e-193c-4194-9d9d-da8342dcb4f1 | 3.2.7 | +--------------------------------------+-------+ #. **Run cluster-upgrade** Use :command:`openstack database cluster upgrade` command to upgrade your datastore for the selected instance. .. code-block:: console $openstack database cluster upgrade 05f2e7b7-8dac-453f-ad5d-38195cd5718f 3.2.7 #. **Wait until task changes from UPGRADING_CLUSTER to NONE** You can use :command:`openstack database cluster list` to check the current task. .. code-block:: console $openstack database cluster list +--------------------------------------+---------------+-----------+-------------------+-----------+ | ID | Name | Datastore | Datastore Version | Task Name | +--------------------------------------+---------------+-----------+-------------------+-----------+ | 05f2e7b7-8dac-453f-ad5d-38195cd5718f | redis_cluster | redis | 3.2.7 | NONE | +--------------------------------------+---------------+-----------+-------------------+-----------+ Other clusters ~~~~~~~~~~~~~~~ Upgrade for other clusters works in the same way. Currently Trove supports upgrades for the following cluster datastores: - MySQL. - MariaDB. - Redis. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/doc/source/user/upgrade-datastore.rst0000644000175000017500000001076400000000000023347 0ustar00coreycorey00000000000000================= Upgrade datastore ================= You can upgrade the datastore version of the database instance. When you perform an upgrade, the system automatically manages data and configuration files of your database. To perform datastore upgrade, you need: - A guest image with the target datastore version. - A Trove database instance to be upgrade. This example shows you how to upgrade Redis datastore (version 3.2.6) for a single instance database. .. note:: **Before** upgrading, make sure that: - Your target datastore is binary compatible with the current datastore. Each database provider has its own compatibilty policy. Usually there shouldn't be any problem when performing an upgrade within minor versions. - You **do not** downgrade your datastore. - Target versions is supported by Trove. For instance, Trove doesn't support Cassandra >=2.2 at this moment so you shouldn't perform an upgrade from 2.1 to 2.2. Upgrading datastore ~~~~~~~~~~~~~~~~~~~ #. **Check instance status** Make sure the instance status is HEALTHY before upgrading. .. code-block:: console $ openstack database instance list +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Addresses | Flavor ID | Size | Region | +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ | 55411e95-1670-497f-8d92-0179f3b4fdd4 | redis_test | redis | 3.2.6 | HEALTHY | 10.1.0.25 | 6 | 1 | RegionOne | +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ #. **Check if target version is available** Use :command:`openstack datastore version list` command to list all available versions your datastore. .. code-block:: console $ openstack datastore version list redis +--------------------------------------+-------+ | ID | Name | +--------------------------------------+-------+ | 483debec-b7c3-4167-ab1d-1765795ed7eb | 3.2.6 | | 507f666e-193c-4194-9d9d-da8342dcb4f1 | 3.2.7 | +--------------------------------------+-------+ #. **Run upgrade** Use :command:`openstack database instance upgrade` command to upgrade the datastore of the instance. .. code-block:: console $ openstack database instance 55411e95-1670-497f-8d92-0179f3b4fdd4 3.2.7 #. **Wait until status changes from UPGRADE to HEALTHY** Use :command:`openstack database instance list` to check the current status. .. code-block:: console $ openstack database instance list +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Addresses | Flavor ID | Size | Region | +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ | 55411e95-1670-497f-8d92-0179f3b4fdd4 | redis_test | redis | 3.2.7 | UPGRADE | 10.1.0.25 | 6 | 5 | RegionOne | +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ $ openstack database instance list +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ | ID | Name | Datastore | Datastore Version | Status | Addresses | Flavor ID | Size | Region | +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ | 55411e95-1670-497f-8d92-0179f3b4fdd4 | redis_test | redis | 3.2.7 | HEALTHY | 10.1.0.25 | 6 | 5 | RegionOne | +--------------------------------------+------------+-----------+-------------------+---------+-----------+-----------+------+-----------+ Other datastores ~~~~~~~~~~~~~~~~ Upgrade for other datastores works in the same way. Currently Trove supports upgrades for the following datastores: - MySQL - MariaDB - Redis ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6481085 trove-12.1.0.dev92/etc/0000755000175000017500000000000000000000000014722 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/etc/apache2/0000755000175000017500000000000000000000000016225 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/etc/apache2/trove0000644000175000017500000000232100000000000017305 0ustar00coreycorey00000000000000# Copyright 2017 Amrith Kumar. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using Trove API # through mod_wsgi Listen 8779 WSGIDaemonProcess trove-api user=stack group=stack processes=2 threads=2 display-name=%{GROUP} WSGIScriptAlias / /opt/stack/trove/trove/cmd/app.wsgi WSGIProcessGroup trove-api ErrorLog /var/log/httpd/trove_error.log LogLevel info CustomLog /var/log/httpd/trove_access.log combined WSGIProcessGroup trove-api WSGIApplicationGroup %{GLOBAL} AllowOverride All Require all granted ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/etc/tests/0000755000175000017500000000000000000000000016064 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/etc/tests/core.test.conf0000644000175000017500000000226400000000000020645 0ustar00coreycorey00000000000000{ "report_directory":"rdli-test-report", "test_mgmt":false, "use_local_ovz":false, "use_venv":false, "glance_code_root":"/opt/stack/glance", "glance_api_conf":"/vagrant/conf/glance-api.conf", "glance_reg_conf":"/vagrant/conf/glance-reg.conf", "glance_images_directory": "/glance_images", "glance_image": "fakey_fakerson.tar.gz", "instance_flavor_name":"m1.tiny", "instance_bigger_flavor_name":"m1.rd-smaller", "nova_code_root":"/opt/stack/nova", "nova_conf":"/home/vagrant/nova.conf", "keystone_code_root":"/opt/stack/keystone", "keystone_conf":"/etc/keystone/keystone.conf", "trove_code_root":"/opt/stack/trove", "trove_conf":"/tmp/trove.conf", "trove_version":"v1.0", "trove_api_updated":"2012-08-01T00:00:00Z", "trove_must_have_volume":false, "trove_can_have_volume":true, "trove_main_instance_has_volume": true, "trove_max_accepted_volume_size": 25, "trove_max_instances_per_tenant": 55, "trove_max_volumes_per_tenant": 100, "use_reaper":false, "root_removed_from_instance_api": true, "root_timestamp_disabled": false, "openvz_disabled": false, "management_api_disabled": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/tests/localhost.test.conf0000644000175000017500000000617500000000000021712 0ustar00coreycorey00000000000000{ "include-files":["core.test.conf"], "fake_mode": true, "dbaas_url":"http://localhost:8779/v1.0", "version_url":"http://localhost:8779", "nova_auth_url":"http://localhost:8779/v1.0/auth", "trove_auth_url":"http://localhost:8779/v1.0/auth", "trove_client_insecure":false, "auth_strategy":"fake", "trove_version":"v1.0", "trove_api_updated":"2012-08-01T00:00:00Z", "trove_dns_support":true, "trove_dns_checker":"trove.tests.fakes.dns.FakeDnsChecker", "trove_ip_support":false, "nova_client": null, "shared_network": "b19b5da0-d2f6-11e9-9382-00224d6b7bc1", "users": [ { "auth_user":"admin", "auth_key":"password", "tenant":"admin-1000", "tenant_id":"admin-1000", "requirements": { "is_admin":true, "services": ["trove"] } }, { "auth_user":"jsmith", "auth_key":"password", "tenant":"2500", "tenant_id":"2500", "requirements": { "is_admin":false, "services": ["trove"] } }, { "auth_user":"hub_cap", "auth_key":"password", "tenant":"3000", "tenant_id":"3000", "requirements": { "is_admin":false, "services": ["trove"] } } ], "flavors": [ { "id": 1, "name": "m1.tiny", "ram": 512 }, { "id": 2, "name": "m1.small", "ram": 2048 }, { "id": 3, "name": "m1.medium", "ram": 4096 }, { "id": 4, "name": "m1.large", "ram": 8192 }, { "id": 5, "name": "m1.xlarge", "ram": 16384 }, { "id": 6, "name": "m1.nano", "ram": 64 }, { "id": 7, "name": "m1.micro", "ram": 128 }, { "id": 8, "name": "m1.rd-smaller", "ram": 768 }, { "id": 9, "name": "tinier", "ram": 506 }, { "id": 10, "name": "m1.rd-tiny", "ram": 512 }, { "id": 11, "name": "eph.rd-tiny", "ram": 512, "local_storage": 1 }, { "id": 12, "name": "eph.rd-smaller", "ram": 768, "local_storage": 2 }, { "id": "custom", "name": "custom.small", "ram": 512, "local_storage": 1 } ], "examples": { "directory":"api-ref/source/samples", "normal_user_name":"hub_cap", "normal_user_tenant":"3000", "admin_user_name":"admin", "admin_user_tenant":"admin-1000", "replace_host":"https://troveapi.org", "replace_dns_hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.troveexampledb.com" }, "sentinel": null } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/etc/trove/0000755000175000017500000000000000000000000016061 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/README-policy.generated.md0000644000175000017500000000120600000000000022571 0ustar00coreycorey00000000000000Generate Trove policies sample ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Trove policies sample are no longer provided, instead it could be generated by running the following command from the top of the trove directory: tox -egenpolicy Use customized policy file ~~~~~~~~~~~~~~~~~~~~~~~~~~ As Trove uses policy in code now, it's not necessary to add a policy file for Trove components to run. But when a customized policy is needed, Trove will take ``/etc/trove/policy.json`` by default. The location of the policy file can also be overridden by adding following lines in Trove config file: [oslo_policy] policy_file = /path/to/policy/file ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/api-paste.ini0000644000175000017500000000244700000000000020454 0ustar00coreycorey00000000000000[composite:trove] use = call:trove.common.wsgi:versioned_urlmap /: versions /v1.0: troveapi [app:versions] paste.app_factory = trove.versions:app_factory [pipeline:troveapi] pipeline = cors http_proxy_to_wsgi faultwrapper osprofiler authtoken authorization contextwrapper ratelimit extensions troveapp #pipeline = debug extensions troveapp [filter:extensions] paste.filter_factory = trove.common.extensions:factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:authorization] paste.filter_factory = trove.common.auth:AuthorizationMiddleware.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = trove [filter:contextwrapper] paste.filter_factory = trove.common.wsgi:ContextMiddleware.factory [filter:faultwrapper] paste.filter_factory = trove.common.wsgi:FaultWrapper.factory [filter:ratelimit] paste.filter_factory = trove.common.limits:RateLimitingMiddleware.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [app:troveapp] paste.app_factory = trove.common.api:app_factory #Add this filter to log request and response for debugging [filter:debug] paste.filter_factory = trove.common.wsgi:Debug [filter:http_proxy_to_wsgi] use = egg:oslo.middleware#http_proxy_to_wsgi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/api-paste.ini.test0000644000175000017500000000247200000000000021430 0ustar00coreycorey00000000000000[composite:trove] use = call:trove.common.wsgi:versioned_urlmap /: versions /v1.0: troveapi [app:versions] paste.app_factory = trove.versions:app_factory [pipeline:troveapi] pipeline = faultwrapper http_proxy_to_wsgi authtoken authorization contextwrapper extensions ratelimit troveapp #pipeline = debug extensions troveapp [filter:extensions] paste.filter_factory = trove.common.extensions:factory [filter:authtoken] paste.filter_factory = trove.tests.fakes.keystone:filter_factory service_protocol = http service_host = 127.0.0.1 service_port = 5000 auth_host = 127.0.0.1 auth_port = 35357 auth_protocol = http www_authenticate_uri = http://127.0.0.1/identity/ signing_dir = /tmp/keystone-signing-trove [filter:authorization] paste.filter_factory = trove.common.auth:AuthorizationMiddleware.factory [filter:contextwrapper] paste.filter_factory = trove.common.wsgi:ContextMiddleware.factory [filter:faultwrapper] paste.filter_factory = trove.common.wsgi:FaultWrapper.factory [filter:ratelimit] paste.filter_factory = trove.common.limits:RateLimitingMiddleware.factory [app:troveapp] paste.app_factory = trove.common.api:app_factory #Add this filter to log request and response for debugging [filter:debug] paste.filter_factory = trove.common.wsgi:Debug [filter:http_proxy_to_wsgi] use = egg:oslo.middleware#http_proxy_to_wsgi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/etc/trove/cloudinit/0000755000175000017500000000000000000000000020053 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/cloudinit/README0000644000175000017500000000022600000000000020733 0ustar00coreycorey00000000000000These cloudinit scripts will used as userdata on instance create File names should match pattern: service_type.cloudinit For example: mysql.cloudinit ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/etc/trove/conf.d/0000755000175000017500000000000000000000000017230 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/conf.d/README0000644000175000017500000000035400000000000020112 0ustar00coreycorey00000000000000These conf files are read and used by the guest to provide extra information to the guest. The first example of this is the guest_info.conf which will have the uuid of the instance so that the guest can report back things to the infra. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/conf.d/guest_info.conf0000644000175000017500000000007100000000000022237 0ustar00coreycorey00000000000000# Guest-specific information injected by the taskmanager ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/trove-guestagent.conf.sample0000644000175000017500000001354500000000000023523 0ustar00coreycorey00000000000000[DEFAULT] #=========== RPC Configuration ====================== # URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url= # The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove # ========== Configuration options for Swift ========== # The swift_url can be specified directly or fetched from Keystone catalog. # To fetch from Keystone, comment out swift_url, and uncomment the others. # swift_url = http://10.0.0.1:8080/v1/AUTH_ # Region name of this node. Default value is None. # os_region_name = RegionOne # Service type to use when searching catalog. # swift_service_type = object-store # ========== Datastore Manager Configurations ========== # Datastore manager implementations. # Format: list of 'datastore-type:datastore.manager.implementation.module' # datastore_registry_ext = mysql:trove.guestagent.datastore.mysql.manager.Manager, percona:trove.guestagent.datastore.mysql.manager.Manager # ========== Default Users / DBs Configuration ========== # Permissions to grant "root" user by default root_grant = ALL root_grant_option = True # root_grant = ALTER ROUTINE, CREATE, ALTER, CREATE ROUTINE, CREATE TEMPORARY TABLES, CREATE VIEW, CREATE USER, DELETE, DROP, EVENT, EXECUTE, INDEX, INSERT, LOCK TABLES, PROCESS, REFERENCES, SELECT, SHOW DATABASES, SHOW VIEW, TRIGGER, UPDATE, USAGE # root_grant_option = False # Default password Length for root password # default_password_length = 36 # ========== Default Storage Options for backup ========== # Default configuration for storage strategy and storage options # for backups # For storage to Swift, use the following as defaults: # storage_strategy = SwiftStorage # storage_namespace = trove.common.strategies.storage.swift # Default config options for storing backups to swift # backup_swift_container = database_backups # backup_use_gzip_compression = True # backup_use_openssl_encryption = True # backup_aes_cbc_key = "default_aes_cbc_key" # backup_use_snet = False # backup_chunk_size = 65536 # backup_segment_max_size = 2147483648 # ========== Sample Logging Configuration ========== # Show debugging output in logs (sets DEBUG log level output) # debug = True # Directory and path for log files log_dir = /var/log/trove/ log_file = logfile.txt log_config_append = /etc/trove/trove-logging-guestagent.conf [profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True [oslo_messaging_notifications] # # From oslo.messaging # # The Driver(s) to handle sending notifications. Possible # values are messaging, messagingv2, routing, log, test, noop # (multi valued) # Deprecated group/name - [DEFAULT]/notification_driver #driver = # A URL representing the messaging driver to use for # notifications. If not set, we fall back to the same # configuration used for RPC. (string value) # Deprecated group/name - [DEFAULT]/notification_transport_url #transport_url = # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics # Deprecated group/name - [DEFAULT]/notification_topics #topics = notifications # The maximum number of attempts to re-send a notification # message which failed to be delivered due to a recoverable # error. 0 - No retry, -1 - indefinite (integer value) #retry = -1 # ========== Datastore Specific Configuration Options ========== [mysql] # For mysql, the following are the defaults for backup, and restore: # backup_strategy = InnoBackupEx # backup_namespace = trove.guestagent.strategies.backup.mysql_impl # restore_namespace = trove.guestagent.strategies.restore.mysql_impl # Default configuration for mysql replication # replication_strategy = MysqlBinlogReplication # replication_namespace = trove.guestagent.strategies.replication.mysql_binlog # replication_user = slave_user # replication_password = slave_password # Users to ignore for user create/list/delete operations # ignore_users = os_admin # Databases to ignore for db create/list/delete operations # ignore_dbs = mysql, information_schema, performance_schema [vertica] # For vertica, following are the defaults needed: # mount_point = /var/lib/vertica # readahead_size = 2048 # guestagent_strategy = trove.common.strategies.cluster.experimental.vertica.guestagent.VerticaGuestAgentStrategy [redis] # For redis, the following are the defaults for backup, and restore: # backup_strategy = RedisBackup # backup_namespace = trove.guestagent.strategies.backup.experimental.redis_impl # restore_namespace = trove.guestagent.strategies.restore.experimental.redis_impl [percona] backup_namespace = trove.guestagent.strategies.backup.mysql_impl restore_namespace = trove.guestagent.strategies.restore.mysql_impl [couchbase] backup_namespace = trove.guestagent.strategies.backup.experimental.couchbase_impl restore_namespace = trove.guestagent.strategies.restore.experimental.couchbase_impl [cassandra] backup_namespace = trove.guestagent.strategies.backup.experimental.cassandra_impl restore_namespace = trove.guestagent.strategies.restore.experimental.cassandra_impl [db2] # For db2, the following are the defaults for backup, and restore: # backup_strategy = DB2OfflineBackup # backup_namespace = trove.guestagent.strategies.backup.experimental.db2_impl # restore_namespace = trove.guestagent.strategies.restore.experimental.db2_impl [couchdb] #For CouchDB, the following are the defaults for backup and restore: # backup_strategy = CouchDBBackup # backup_namespace = trove.guestagent.strategies.backup.experimental.couchdb_impl # restore_namespace = trove.guestagent.strategies.restore.experimental.couchdb_impl ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/trove-logging-guestagent.conf0000644000175000017500000000123400000000000023657 0ustar00coreycorey00000000000000[loggers] keys=root [handlers] keys=file [formatters] keys=minimal,normal,debug ########### # Loggers # ########### [logger_root] level=WARNING handlers=file ################ # Log Handlers # ################ [handler_file] class=logging.handlers.RotatingFileHandler level=WARNING formatter=normal args=('/var/log/trove-guestagent.log', 'a', 100 * 1024 * 1024) # log file limit is 100MB ################## # Log Formatters # ################## [formatter_minimal] format=%(message)s [formatter_normal] format=(%(name)s): %(asctime)s %(levelname)s %(message)s [formatter_debug] format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/trove-workbook.yaml0000644000175000017500000000065200000000000021742 0ustar00coreycorey00000000000000--- version: '2.0' name: trove description: Trove Workflows workflows: backup_create: input: [instance, name, description, incremental] output: status: <% $.message %> tasks: backup_create: action: trove.backups_create instance=<% $.instance %> name=<% $.name %> description=<% $.description %> incremental=<% $.incremental %> publish: message: <% 'Backup complete' %> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/trove.conf.sample0000644000175000017500000002311100000000000021345 0ustar00coreycorey00000000000000[DEFAULT] # Show debugging output in logs (sets DEBUG log level output) debug = True # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 8779 # Number of workers for the API service. The default will # be the number of CPUs available. (integer value) #trove_api_workers=None #===================== RPC Configuration ================================= # URL representing the messaging driver to use and its full configuration. # If not set, we fall back to the 'rpc_backend' option and driver specific # configuration. #transport_url= # The messaging driver to use. Options include rabbit, qpid and zmq. # Default is rabbit. (string value) #rpc_backend=rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the 'transport_url option. control_exchange = trove # Maximum line size of message headers to be accepted. # max_header_line may need to be increased when using large tokens # (typically those generated by the Keystone v3 API with big service # catalogs) # max_header_line = 16384 #DB Api Implementation db_api_implementation = "trove.db.sqlalchemy.api" # Configuration options for talking to nova via the novaclient. trove_auth_url = http://0.0.0.0/identity/v2.0 #nova_compute_url = http://localhost:8774/v2 #cinder_url = http://localhost:8776/v1 #swift_url = http://localhost:8080/v1/AUTH_ #neutron_url = http://localhost:9696/ # nova_compute_url, cinder_url, swift_url, and can all be fetched # from Keystone. To fetch from Keystone, comment out nova_compute_url, # cinder_url, swift_url, and and optionally uncomment the lines below. # Region name of this node. Used when searching catalog. Default value is None. #os_region_name = RegionOne # Service type to use when searching catalog. #nova_compute_service_type = compute # Service type to use when searching catalog. #cinder_service_type = volumev2 # Service type to use when searching catalog. #swift_service_type = object-store # Service type to use when searching catalog. #neutron_service_type = network # Config option for showing the IP address that nova doles out # For nova-network, set this to the appropriate network label defined in nova # For neutron, set this to .* since users can specify custom network labels # You can also optionally specify regex'es to match the actual IP addresses # ip_regex (white-list) is applied before black_list_regex in the filter chain network_label_regex = ^private$ #network_label_regex = .* #with neutron enabled #ip_regex = ^(15.|123.) #black_list_regex = ^10.0.0. # Config options for enabling volume service trove_volume_support = True block_device_mapping = vdb device_path = /dev/vdb # Maximum volume size for an instance max_accepted_volume_size = 10 max_instances_per_tenant = 5 # Maximum volume capacity (in GB) spanning across all trove volumes per tenant max_volumes_per_tenant = 100 max_backups_per_tenant = 5 volume_time_out=30 # Config options for rate limits http_get_rate = 200 http_post_rate = 200 http_put_rate = 200 http_delete_rate = 200 http_mgmt_post_rate = 200 # Trove DNS trove_dns_support = False dns_account_id = 123456 dns_auth_url = http://127.0.0.1/identity/v2.0 dns_username = user dns_passkey = password dns_ttl = 3600 dns_domain_name = 'trove.com.' dns_domain_id = 11111111-1111-1111-1111-111111111111 dns_driver = trove.dns.designate.driver.DesignateDriver dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory dns_endpoint_url = http://127.0.0.1/v1/ dns_service_type = dns # Neutron network_driver = trove.network.nova.NovaNetwork management_networks = # Taskmanager queue name taskmanager_queue = taskmanager # Auth admin_roles = admin # Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 # Reboot time out for instances reboot_time_out = 60 # Trove api-paste file name api_paste_config = api-paste.ini # ============ Notification System configuration =========================== # Sets the notification driver used by oslo.messaging. Options include # messaging, messagingv2, log and routing. Default is 'noop' # notification_driver=noop # Topics used for OpenStack notifications, list value. Default is 'notifications'. # notification_topics=notifications # ============ Logging information ============================= #log_dir = /integration/report #log_file = trove-api.log [database] # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine # connection = sqlite:///trove_test.sqlite connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove #connection = postgresql://trove:trove@localhost/trove # Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default `wait_timeout` of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600 # ============ SSL configuration (and enablement) ============================= # In order to enable SSL for the trove api server, uncomment # the cert_file and key_file - and of course have those files # accessible. The existence of those setting and files will # enable SSL. [profiler] # If False fully disable profiling feature. #enabled = False # If False doesn't trace SQL requests. #trace_sqlalchemy = True [ssl] #cert_file = /path/to/server.crt #key_file = /path/to/server.key #optional: #ca_file = /path/to/ca_file [oslo_messaging_notifications] # # From oslo.messaging # # The Driver(s) to handle sending notifications. Possible # values are messaging, messagingv2, routing, log, test, noop # (multi valued) # Deprecated group/name - [DEFAULT]/notification_driver #driver = # A URL representing the messaging driver to use for # notifications. If not set, we fall back to the same # configuration used for RPC. (string value) # Deprecated group/name - [DEFAULT]/notification_transport_url #transport_url = # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics # Deprecated group/name - [DEFAULT]/notification_topics #topics = notifications # The maximum number of attempts to re-send a notification # message which failed to be delivered due to a recoverable # error. 0 - No retry, -1 - indefinite (integer value) #retry = -1 [mysql] root_on_create = False # Format (single port or port range): A, B-C # where C greater than B tcp_ports = 3306 volume_support = True device_path = /dev/vdb # Users to ignore for user create/list/delete operations ignore_users = os_admin, root ignore_dbs = mysql, information_schema, performance_schema [redis] tcp_ports = 6379, 16379 volume_support = True device_path = /dev/vdb [cassandra] tcp_ports = 7000, 7001, 9042, 9160 volume_support = True device_path = /dev/vdb [couchbase] tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 volume_support = True device_path = /dev/vdb [mongodb] tcp_ports = 2500, 27017, 27019 volume_support = True device_path = /dev/vdb num_config_servers_per_cluster = 1 num_query_routers_per_cluster = 1 [vertica] tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 udp_ports = 5433, 4803, 4804, 6453 volume_support = True device_path = /dev/vdb cluster_support = True cluster_member_count = 3 api_strategy = trove.common.strategies.cluster.experimental.vertica.api.VerticaAPIStrategy # ============ CORS configuration ============================= [cors] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request. # (list value) #allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID [cors.subdomain] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request. # (list value) #allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID [oslo_middleware] # # From oslo.middleware # # Whether the application is behind a proxy or not. This determines if the # middleware should parse the headers or not. (boolean value) #enable_proxy_headers_parsing = false ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/etc/trove/trove.conf.test0000644000175000017500000001223100000000000021044 0ustar00coreycorey00000000000000[DEFAULT] # Fake out the remote implementations remote_nova_client = trove.tests.fakes.nova.fake_create_nova_client remote_guest_client = trove.tests.fakes.guestagent.fake_create_guest_client remote_swift_client = trove.tests.fakes.swift.fake_create_swift_client remote_cinder_client = trove.tests.fakes.nova.fake_create_cinder_client remote_neutron_client = trove.tests.fakes.neutron.fake_create_neutron_client # Fake out the RPC implementation transport_url = 'fake:/' # Skip running periodic tasks report_interval = 0 # Fake out DNS. trove_dns_support = True dns_driver = trove.tests.fakes.dns.FakeDnsDriver dns_instance_entry_factory = trove.tests.fakes.dns.FakeDnsInstanceEntryFactory # This will remove some of the verbose logging when trying to diagnose tox issues default_log_levels=routes.middleware=ERROR,trove.common.auth=WARN log_file = trovetest.log use_stderr = False debug = True # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 8779 # Number of workers for the API service. The default will # be the number of CPUs available. (integer value) #trove_api_workers=None #DB Api Implementation db_api_implementation = trove.db.sqlalchemy.api # Configuration options for talking to nova via the novaclient. # These options are for an admin user in your keystone config. # It proxy's the token received from the user to send to nova via this admin users creds, # basically acting like the client via that proxy token. nova_proxy_admin_user = admin nova_proxy_admin_pass = 3de4922d8b6ac5a1aad9 nova_proxy_admin_tenant_id = trove_auth_url = http://0.0.0.0/identity/v2.0 os_region_name = RegionOne nova_compute_service_type = compute nova_service_name = Compute Service # Config option for showing the IP address that nova doles out network_label_regex = ^private$ ip_regex = ^(15.|123.) black_list_regex = ^(10.0.0.) # Config options for enabling volume service trove_volume_support = True nova_volume_service_type = volume nova_volume_service_name = Volume Service device_path = /dev/vdb max_accepted_volume_size = 25 max_instances_per_tenant = 55 max_volumes_per_tenant = 100 max_backups_per_tenant = 5 volume_time_out=30 # Config options for rate limits http_get_rate = 500 http_post_rate = 500 http_put_rate = 500 http_delete_rate = 500 # default datastore default_datastore = a00000a0-00a0-0a00-00a0-000a000000aa # Auth admin_roles = admin # Users to ignore for user create/list/delete operations ignore_users = os_admin, root ignore_dbs = lost+found, mysql, information_schema # Guest related conf agent_heartbeat_time = 10 agent_call_low_timeout = 5 agent_call_high_timeout = 150 server_delete_time_out=10 dns_time_out = 120 resize_time_out = 120 revert_time_out = 120 # usage notifications notification_driver = trove.tests.util.usage notification_service_id = mysql:123,percona:abc control_exchange = trove paste_config_file=api-paste.ini.test [mysql] root_on_create = False volume_support = True device_path = /dev/vdb [redis] # redis uses local storage volume_support = False # default device_path = None [cassandra] volume_support = True device_path = /dev/vdb [couchbase] volume_support = True device_path = /dev/vdb [mongodb] volume_support = True device_path = /dev/vdb [database] # SQLAlchemy connection string for the reference implementation # registry server. Any valid SQLAlchemy connection string is fine. # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine connection = sqlite:///trove_test.sqlite #connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove #connection = postgresql://trove:trove@localhost/trove # Period in seconds after which SQLAlchemy should reestablish its connection # to the database. # # MySQL uses a default `wait_timeout` of 8 hours, after which it will drop # idle connections. This can result in 'MySQL Gone Away' exceptions. If you # notice this, you can lower this value to ensure that SQLAlchemy reconnects # before MySQL can drop the connection. idle_timeout = 3600 [composite:trove] use = call:trove.common.wsgi:versioned_urlmap /: versions /v1.0: troveapi [app:versions] paste.app_factory = trove.versions:app_factory [pipeline:troveapi] pipeline = faultwrapper authtoken authorization contextwrapper ratelimit extensions troveapp # pipeline = debug troveapp [filter:extensions] paste.filter_factory = trove.common.extensions:factory [filter:authtoken] paste.filter_factory = trove.tests.fakes.keystone:filter_factory service_protocol = http service_host = 127.0.0.1 service_port = 5000 auth_host = 127.0.0.1 auth_port = 35357 auth_protocol = http www_authenticate_uri = http://127.0.0.1/identity/ [filter:authorization] paste.filter_factory = trove.common.auth:AuthorizationMiddleware.factory [filter:contextwrapper] paste.filter_factory = trove.common.wsgi:ContextMiddleware.factory [filter:faultwrapper] paste.filter_factory = trove.common.wsgi:FaultWrapper.factory [filter:ratelimit] paste.filter_factory = trove.tests.fakes.limits:FakeRateLimitingMiddleware.factory [app:troveapp] paste.app_factory = trove.common.api:app_factory #Add this filter to log request and response for debugging [filter:debug] paste.filter_factory = trove.common.wsgi:Debug ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/generate_examples.py0000755000175000017500000000253400000000000020220 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import argparse import os import sys import run_tests def import_tests(): from trove.tests.examples import snippets snippets.monkey_patch_uuid_and_date() if __name__ == "__main__": parser = argparse.ArgumentParser(description='Generate Example Snippets') parser.add_argument('--fix-examples', action='store_true', help='Fix the examples rather than failing tests.') args = parser.parse_args() if args.fix_examples: os.environ['TESTS_FIX_EXAMPLES'] = 'True' # Remove the '--fix-examples' argument from sys.argv as it is not a # valid argument in the run_tests module. sys.argv.pop(sys.argv.index('--fix-examples')) run_tests.main(import_tests) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6841092 trove-12.1.0.dev92/integration/0000755000175000017500000000000000000000000016472 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/README.md0000644000175000017500000001544200000000000017757 0ustar00coreycorey00000000000000# Trove integration script - trovestack ## Steps to setup environment Install a fresh Ubuntu 16.04 (xenial) image. We suggest creating a development virtual machine using the image. 1. Login to the machine as root 1. Make sure we have git installed ``` # apt-get update # apt-get install git-core -y ``` 1. Add a user named ubuntu if you do not already have one: ``` # adduser ubuntu # visudo ``` Add this line to the file below the root user ubuntu ALL=(ALL:ALL) ALL Or use this if you dont want to type your password to sudo a command: ubuntu ALL=(ALL) NOPASSWD: ALL if /dev/pts/0 does not have read/write for your user # chmod 666 /dev/pts/0 > Note that this number can change and if you can not connect to the screen session then the /dev/pts/# needs modding like above. 1. Login with ubuntu and download the Trove code. ```shell # su ubuntu $ mkdir -p /opt/stack $ cd /opt/stack ``` > Note that it is important that you clone the repository here. This is a change from the earlier trove-integration where you could clone trove-integration anywhere you wanted (like HOME) and trove would get cloned for you in the right place. Since trovestack is now in the trove repository, if you wish to test changes that you have made to trove, it is advisable for you to have your trove repository in /opt/stack to avoid another trove repository being cloned for you. 1. Clone this repo and go into the scripts directory ``` $ git clone https://github.com/openstack/trove.git $ cd trove/integration/scripts/ ``` ## Running trovestack Run this to get the command list with a short description of each $ ./trovestack ### Install Trove *This brings up trove services and initializes the trove database.* $ ./trovestack install ### Connecting to the screen session $ screen -x stack If that command fails with the error Cannot open your terminal '/dev/pts/1' If that command fails with the error chmod the corresponding /dev/pts/# $ chmod 660 /dev/pts/1 ### Navigate the log screens To produce the list of screens that you can scroll through and select ctrl+a then " An example of screen list: ``` ..... (full list ommitted) 20 c-vol 21 h-eng 22 h-api 23 h-api-cfn 24 h-api-cw 25 tr-api 26 tr-tmgr 27 tr-cond ``` Alternatively, to go directly to a specific screen window ctrl+a then ' then enter a number (like 25) or name (like tr-api) ### Detach from the screen session Allows the services to continue running in the background ctrl+a then d ### Kick start the build/test-init/build-image commands *Add mysql as a parameter to set build and add the mysql guest image. This will also populate /etc/trove/test.conf with appropriate values for running the integration tests.* $ ./trovestack kick-start mysql ### Initialize the test configuration and set up test users (overwrites /etc/trove/test.conf) $ ./trovestack test-init ### Build guest agent image The trove guest agent image could be created using `trovestack` script according to the following command: ```shell PATH_DEVSTACK_OUTPUT=/opt/stack \ ./trovestack build-image \ ${datastore_type} \ ${guest_os} \ ${guest_os_release} \ ${dev_mode} ``` - If the script is running as a part of DevStack, the viriable `PATH_DEVSTACK_OUTPUT` is set automatically. - if `dev_mode=false`, the trove code for guest agent is injected into the image at the building time. Now `dev_mode=false` is still in experimental and not considered production ready yet. - If `dev_mode=true`, no Trove code is injected into the guest image. The guest agent will download Trove code during the service initialization. For example, build a Mysql image for Ubuntu Xenial operating system: ```shell $ ./trovestack build-image mysql ubuntu xenial false ``` ### Running Integration Tests Check the values in /etc/trove/test.conf in case it has been re-initialized prior to running the tests. For example, from the previous mysql steps: "dbaas_datastore": "%datastore_type%", "dbaas_datastore_version": "%datastore_version%", should be: "dbaas_datastore": "mysql", "dbaas_datastore_version": "5.5", Once Trove is running on DevStack, you can run the integration tests locally. $./trovestack int-tests This will runs all of the blackbox tests by default. Use the `--group` option to run a different group: $./trovestack int-tests --group=simple_blackbox You can also specify the `TESTS_USE_INSTANCE_ID` environment variable to have the integration tests use an existing instance for the tests rather than creating a new one. $./TESTS_DO_NOT_DELETE_INSTANCE=True TESTS_USE_INSTANCE_ID=INSTANCE_UUID ./trovestack int-tests --group=simple_blackbox ## Reset your environment ### Stop all the services running in the screens and refresh the environment $ killall -9 screen $ screen -wipe $ RECLONE=yes ./trovestack install $ ./trovestack kick-start mysql or $ RECLONE=yes ./trovestack install $ ./trovestack test-init $ ./trovestack build-image mysql ## Recover after reboot If the VM was restarted, then the process for bringing up Openstack and Trove is quite simple $./trovestack start-deps $./trovestack start Use screen to ensure all modules have started without error $screen -r stack ## VMware Fusion 5 speed improvement Running Ubuntu with KVM or Qemu can be extremely slow without certain optimizations. The following are some VMware settings that can improve performance and may also apply to other virtualization platforms. 1. Shutdown the Ubuntu VM. 2. Go to VM Settings -> Processors & Memory -> Advanced Options. Check the "Enable hypervisor applications in this virtual machine" 3. Go to VM Settings -> Advanced. Set the "Troubleshooting" option to "None" 4. After setting these create a snapshot so that in cases where things break down you can revert to a clean snapshot. 5. Boot up the VM and run the `./trovestack install` 6. To verify that KVM is setup properly after the devstack installation you can run these commands. ``` ubuntu@ubuntu:~$ kvm-ok INFO: /dev/kvm exists KVM acceleration can be used ``` ## VMware Workstation performance improvements In recent versions of VMWare, you can get much better performance if you enable the right virtualization options. For example, in VMWare Workstation (found in version 10.0.2), click on VM->Settings->Processor. You should see a box of "Virtualization Engine" options that can be changed only when the VM is shutdown. Make sure you check "Virtualize Intel VT-x/EPT or AMD-V/RVI" and "Virtualize CPU performance counters". Set the preferred mode to "Automatic". Then boot the VM and ensure that the proper virtualization is enabled. ``` ubuntu@ubuntu:~$ kvm-ok INFO: /dev/kvm exists KVM acceleration can be used ``` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/0000755000175000017500000000000000000000000020161 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/conf/0000755000175000017500000000000000000000000021106 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/cassandra.conf0000644000175000017500000000043200000000000023713 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.large-8", "instance_bigger_flavor_name": "test.large-8.resize", "instance_eph_flavor_name": "test.eph.large-8", "instance_bigger_eph_flavor_name": "test.eph.large-8.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/couchbase.conf0000644000175000017500000000043200000000000023710 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.large-8", "instance_bigger_flavor_name": "test.large-8.resize", "instance_eph_flavor_name": "test.eph.large-8", "instance_bigger_eph_flavor_name": "test.eph.large-8.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/couchdb.conf0000644000175000017500000000042600000000000023366 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.tiny-3", "instance_bigger_flavor_name": "test.tiny-3.resize", "instance_eph_flavor_name": "test.eph.tiny-3", "instance_bigger_eph_flavor_name": "test.eph.tiny-3.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/db2.conf0000644000175000017500000000043200000000000022423 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.small-8", "instance_bigger_flavor_name": "test.small-8.resize", "instance_eph_flavor_name": "test.eph.small-8", "instance_bigger_eph_flavor_name": "test.eph.small-8.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/mariadb.conf0000644000175000017500000000043200000000000023353 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.small-7", "instance_bigger_flavor_name": "test.small-7.resize", "instance_eph_flavor_name": "test.eph.small-7", "instance_bigger_eph_flavor_name": "test.eph.small-7.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/mongodb.conf0000644000175000017500000000043200000000000023401 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.large-8", "instance_bigger_flavor_name": "test.large-8.resize", "instance_eph_flavor_name": "test.eph.large-8", "instance_bigger_eph_flavor_name": "test.eph.large-8.resize", "trove_volume_support": true, "trove_volume_size": 5, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/mysql.conf0000644000175000017500000000043200000000000023121 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.small-7", "instance_bigger_flavor_name": "test.small-7.resize", "instance_eph_flavor_name": "test.eph.small-7", "instance_bigger_eph_flavor_name": "test.eph.small-7.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/percona.conf0000644000175000017500000000043200000000000023403 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.small-7", "instance_bigger_flavor_name": "test.small-7.resize", "instance_eph_flavor_name": "test.eph.small-7", "instance_bigger_eph_flavor_name": "test.eph.small-7.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/postgresql.conf0000644000175000017500000000043600000000000024163 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.medium-7", "instance_bigger_flavor_name": "test.medium-7.resize", "instance_eph_flavor_name": "test.eph.medium-7", "instance_bigger_eph_flavor_name": "test.eph.medium-7.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/pxc.conf0000644000175000017500000000043200000000000022546 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.small-7", "instance_bigger_flavor_name": "test.small-7.resize", "instance_eph_flavor_name": "test.eph.small-7", "instance_bigger_eph_flavor_name": "test.eph.small-7.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/redis.conf0000644000175000017500000000042600000000000023065 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.tiny-4", "instance_bigger_flavor_name": "test.tiny-4.resize", "instance_eph_flavor_name": "test.eph.tiny-4", "instance_bigger_eph_flavor_name": "test.eph.tiny-4.resize", "trove_volume_support": true, "trove_volume_size": 1, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/conf/test_begin.conf0000644000175000017500000000517700000000000024112 0ustar00coreycorey00000000000000{ "dbaas_url":"http://%service_host%:8779/v1.0", "version_url":"http://%service_host%:8779", "trove_auth_url":"http://%service_host%/identity/v3/auth/tokens", "trove_client_insecure":false, "auth_strategy":null, "auth_url": "http://%service_host%/identity/v3", "trove_client_region_name": "%region_name%", "users": [ { "auth_user":"trove", "auth_key":"%service_password%", "tenant":"service", "tenant_id":"%service_tenant_id%", "requirements": { "is_admin":true, "services": ["trove", "swift", "glance"] } }, { "auth_user":"alt_demo", "auth_key":"%admin_password%", "tenant":"alt_demo", "tenant_id":"%alt_demo_tenant_id%", "requirements": { "is_admin":false, "services": ["trove", "swift"] } }, { "auth_user":"demo", "auth_key":"%admin_password%", "tenant":"demo", "tenant_id":"%demo_tenant_id%", "requirements": { "is_admin":false, "services": ["nova", "glance", "trove"] } } ], "flavors": null, "white_box":false, "test_mgmt":false, "use_local_ovz":false, "use_venv":false, "report_directory":"%report_directory%", "usr_bin_dir":"%bin_path%", "trove_conf":"/tmp/trove.conf", "trove_version":"v1.0", "trove_api_updated":"2012-08-01T00:00:00Z", "trove_max_accepted_volume_size": 1000, "trove_max_instances_per_user": 55, "trove_max_volumes_per_user": 100, "use_reaper":false, "root_removed_from_instance_api": true, "root_timestamp_disabled": false, "openvz_disabled": true, "management_api_disabled": true, "dbaas_image": 1, "dns_driver":"trove.dns.rsdns.driver.RsDnsDriver", "dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory", "trove_dns_support": false, "databases_page_size": 20, "instances_page_size": 20, "users_page_size": 20, "rabbit_runs_locally": true, "dbaas_datastore": "%datastore_type%", "dbaas_datastore_version": "%datastore_version%", "neutron_enabled": %neutron_enabled%, "swift_enabled": %swift_enabled%, "shared_network": "%shared_network%", "trove_mgmt_network": "trove-mgmt", "shared_network_subnet": "%shared_network_subnet%", "instance_fault_1_flavor_name": "test.fault_1-1", "instance_fault_1_eph_flavor_name": "test.eph.fault_1-1", "instance_fault_2_flavor_name": "test.fault_2-7", "instance_fault_2_eph_flavor_name": "test.eph.fault_2-7", "instance_log_on_failure": false, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/test_end.conf0000644000175000017500000000002700000000000023561 0ustar00coreycorey00000000000000 "sentinel": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf/vertica.conf0000644000175000017500000000043600000000000023415 0ustar00coreycorey00000000000000 "instance_flavor_name": "test.large-13", "instance_bigger_flavor_name": "test.large-13.resize", "instance_eph_flavor_name": "test.eph.large-13", "instance_bigger_eph_flavor_name": "test.eph.large-13.resize", "trove_volume_support": true, "trove_volume_size": 5, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/conf.json.example0000644000175000017500000000035300000000000023434 0ustar00coreycorey00000000000000{ "devstack":null, "glance":null, "horizon":null, "keystone":null, "nova":null, "python_openstackclient":null, "python_novaclient":null, "trove":null, "python_troveclient":null, "tempest":null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/create_vm0000755000175000017500000000513600000000000022061 0ustar00coreycorey00000000000000#!/usr/bin/env python """ Sets up a VM with hardcoded paths to multiple source trees. Creates a script for use with VMWare or a Vagrantfile. Uses a configuration file (in JSON format) that stores the paths to checked-out copies of OpenStack projects on the host machine. If the path is None then it lets devstack download them. """ import json class Config(object): """ Very simple configuration file thats just some JSON. """ vm_paths = { 'devstack':"/devstack", 'glance': '/opt/stack/glance', 'horizon': '/opt/stack/horizon', 'keystone': "/opt/stack/keystone", 'nova': "/opt/stack/nova", 'python_openstackclient': "/opt/stack/python-openstackclient", 'python_novaclient': "/opt/stack/python-novaclient", 'trove':"/opt/stack/trove", 'python_troveclient':"/opt/stack/python-troveclient", 'tempest':"/opt/stack/tempest" } def __init__(self, **kwargs): for name in Config.vm_paths.keys(): if name not in kwargs: raise RuntimeError('Missing configuration value "%s".' % name) value = kwargs[name] if value is not None and type(value) is not str \ and type(value) is not unicode: raise RuntimeError('Path "%s" must be a string or None but is ' 'of type %s.' % (name, type(value))) setattr(self, name, kwargs[name]) self.vagrant_path = kwargs.get("vagrant_path", "Vagrantfile") @staticmethod def load(file_path): file_contents = open(file_path, "r").read() dict = json.loads(file_contents); return Config(**dict) def write_vagrant_file(self): with open(self.vagrant_path, 'w') as file: file.write(""" Vagrant::Config.run do |global_config| # Host config global_config.vm.define :host do |config| config.vm.network "33.33.44.11" config.vm.box = "precise" config.vm.host_name = "host" config.ssh.timeout = 3600 config.vm.customize do |vm| vm.memory_size = 2048 end config.vm.share_folder "integration", "/integration", "../" """) for key in Config.vm_paths.keys(): local_path = getattr(self, key) vm_path = Config.vm_paths[key] if local_path is not None: file.write('\tconfig.vm.share_folder "%s", "%s", "%s" \n' % (key, vm_path, local_path)) file.write(""" end end """) if __name__=="__main__": conf = Config.load("conf.json") conf.write_vagrant_file() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/0000755000175000017500000000000000000000000021263 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/0000755000175000017500000000000000000000000025175 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6481085 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/0000755000175000017500000000000000000000000027562 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/0000755000175000017500000000000000000000000032036 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/15-trove-dep 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/15-trove-0000755000175000017500000000305300000000000033424 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: Setup the requirements file for use by 15-reddwarf-dep source $_LIB/die BRANCH_OVERRIDE=${BRANCH_OVERRIDE:-default} REQUIREMENTS_FILE=${TROVESTACK_SCRIPTS}/files/requirements/fedora-requirements.txt [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" [ -e ${REQUIREMENTS_FILE} ] || die "Requirements not found" [ -n "$HOST_USERNAME" ] || die "HOST_USERNAME not set" sudo -Hiu ${HOST_USERNAME} dd if=${REQUIREMENTS_FILE} of=${TMP_HOOKS_PATH}/requirements.txt # Grab the upper constraints file, but don't fail if we can't find it. # If we are running in the CI environment, $DEST will be set and stackrc # will use $DEST/requirements as the location for the requirements repo. # Use that as it will help us chain a job with something that is changing UC. UC_FILE=upper-constraints.txt if [ -f "${DEST}/requirements/${UC_FILE}" ]; then echo "Found ${DEST}/requirements/${UC_FILE}, using that" sudo -Hiu ${HOST_USERNAME} dd if="${DEST}/requirements/${UC_FILE}" \ of="${TMP_HOOKS_PATH}/${UC_FILE}" else UC_DIR=$(pwd) UC_BRANCH=${BRANCH_OVERRIDE} if [[ "${UC_BRANCH}" == "default" ]]; then UC_BRANCH=master fi set +e curl -o "${UC_DIR}/${UC_FILE}" "https://opendev.org/openstack/requirements/raw/branch/${UC_BRANCH}/${UC_FILE}" set -e if [ -f "${UC_DIR}/${UC_FILE}" ]; then sudo -Hiu ${HOST_USERNAME} dd if="${UC_DIR}/${UC_FILE}" of=${TMP_HOOKS_PATH}/${UC_FILE} rm -f "${UC_DIR}/${UC_FILE}" fi fi ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/20-guest-systemd 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/20-guest-0000755000175000017500000000237000000000000033411 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: stages the bootstrap file and upstart conf file while replacing variables so that guest image is properly # configured source $_LIB/die [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" [ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image" [ -n "${HOST_SCP_USERNAME}" ] || die "HOST_SCP_USERNAME needs to be set to the user for the host instance" [ -n "${CONTROLLER_IP}" ] || die "CONTROLLER_IP needs to be set to the ip address that guests will use to contact the controller" [ -n "${ESCAPED_PATH_TROVE}" ] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host" [ -n "${TROVESTACK_SCRIPTS}" ] || die "TROVESTACK_SCRIPTS needs to be set to the trove/integration/scripts dir" [ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir" sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/CONTROLLER_IP/${CONTROLLER_IP}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${TROVESTACK_SCRIPTS}/files/trove-guest.systemd.conf > ${TMP_HOOKS_PATH}/trove-guest.service ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/62-ssh-key 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/62-ssh-ke0000755000175000017500000000242000000000000033401 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: creates the SSH key on the host if it doesn't exist. Then this copies the keys over to a staging area where # they will be duplicated in the guest VM. # This process allows the host to log into the guest but more importantly the guest phones home to get the trove # source source $_LIB/die [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" [ -n "${HOST_USERNAME}" ] || die "HOST_USERNAME needs to be set to the user for the current user on the host" if [ `whoami` = "root" ]; then die "This should not be run as root" fi # copy files over the "staging" area for the guest image (they'll later be put in the correct location by the guest user # not these keys should not be overridden otherwise a) you won't be able to ssh in and b) the guest won't be able to # rsync the files if [ -e ${SSH_DIR}/authorized_keys ]; then sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/authorized_keys of=${TMP_HOOKS_PATH}/ssh-authorized-keys sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa of=${TMP_HOOKS_PATH}/id_rsa sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa.pub of=${TMP_HOOKS_PATH}/id_rsa.pub else die "SSH Authorized Keys file must exist along with pub and private key" fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/install.d/0000755000175000017500000000000000000000000031452 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/install.d/15-trove-dep0000755000175000017500000000166200000000000033535 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install trove guest python dependencies - see trovestack functions_qemu set -e set -o xtrace dnf install -y python-devel libxml2-devel libxslt-devel python-setuptools \ python-sqlalchemy python-lxml \ python-routes python-eventlet python-webob \ python-kombu python-paste-deploy python-paste python-netaddr \ python-httplib2 python-iso8601 python-pip python-mysql \ python-migrate python-anyjson gcc python-pexpect # pick up the requirements file left for us by # extra-data.d/15-reddwarf-dep TMP_HOOKS_DIR="/tmp/in_target.d" UPPER_CONSTRAINTS= if [ -f ${TMP_HOOKS_DIR}/upper-constraints.txt ]; then UPPER_CONSTRAINTS=" -c ${TMP_HOOKS_DIR}/upper-constraints.txt" fi pip install -q --upgrade -r ${TMP_HOOKS_DIR}/requirements.txt ${UPPER_CONSTRAINTS} echo "diagnostic pip freeze output follows" pip freeze echo "diagnostic pip freeze output above" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/install.d/20-etc0000755000175000017500000000043000000000000032367 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: take "staged" trove-guest.conf file and put it in the init directory on guest image dd if=/tmp/in_target.d/trove-guest.service of=/usr/lib/systemd/system/trove-guest.service systemctl enable trove-guest.service ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/install.d/21-use-fedora-certificates 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/install.d/21-use-fedor0000755000175000017500000000056100000000000033513 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: configure trove-guest service to use system store of trusted certificates GUEST_UNIT_DROPINS="/etc/systemd/system/trove-guest.service.d" mkdir -v -p ${GUEST_UNIT_DROPINS} echo -e '[Service]\nEnvironment=REQUESTS_CA_BUNDLE=/etc/pki/tls/certs' > ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/install.d/50-user0000755000175000017500000000070100000000000032576 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Add the guest image user that will own the trove agent source...if the user does not already exist set -e set -o xtrace # Difference from apt, -G admin option if ! id -u ${GUEST_USERNAME} >/dev/null 2>&1; then echo "Adding ${GUEST_USERNAME} user" useradd -m ${GUEST_USERNAME} -s /bin/bash passwd ${GUEST_USERNAME} <<_EOF_ ${GUEST_USERNAME} ${GUEST_USERNAME} _EOF_ fi././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/install.d/62-ssh-key0000755000175000017500000000220100000000000033203 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: take "staged" ssh keys (see extra-data.d/62-ssh-key) and put them in the GUEST_USERS home directory set -e set -o xtrace SSH_DIR="/home/${GUEST_USERNAME}/.ssh" TMP_HOOKS_DIR="/tmp/in_target.d" if [ -e "${TMP_HOOKS_DIR}/ssh-authorized-keys" ]; then if [ ! -e ${SSH_DIR} ]; then # this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME} mkdir ${SSH_DIR} chown ${GUEST_USERNAME}:${GUEST_USERNAME} ${SSH_DIR} fi sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/authorized_keys conv=notrunc if=${TMP_HOOKS_DIR}/ssh-authorized-keys sudo -Hiu ${GUEST_USERNAME} chmod 600 ${SSH_DIR}/authorized_keys if [ ! -e "${SSH_DIR}/id_rsa" ]; then sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa # perms have to be right on this file for ssh to work sudo -Hiu ${GUEST_USERNAME} chmod 600 ${SSH_DIR}/id_rsa sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub fi else echo "SSH Keys were not staged by host" exit -1 fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/0000755000175000017500000000000000000000000032435 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/05-ipforwarding 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/05-ipfo0000755000175000017500000000021400000000000033537 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace sed -i -r 's/^\s*#(net\.ipv4\.ip_forward=1.*)/\1/' /etc/sysctl.conf echo 1 > /proc/sys/net/ipv4/ip_forward ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/62-trove-guest-sudoers 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/62-trov0000755000175000017500000000077600000000000033614 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST after IMAGE BUILD as SCRIPT USER # PURPOSE: add the guest user account to the /etc/sudoers files with NOPASSWD # Adds user to the sudoers file so they can do everything w/o a pass # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will # see them by forcing PATH TEMPFILE=`mktemp` echo "${GUEST_USERNAME} ALL=(ALL) NOPASSWD:ALL" > $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/60_trove_guest ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/90-yum-update 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/90-yum-0000755000175000017500000000024500000000000033501 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST after packages installed # PURPOSE: do dnf update to save each instance having to do all the work set -e set -o xtrace dnf -y update ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mariadb/0000755000175000017500000000000000000000000030032 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mariadb/README.md0000644000175000017500000000016600000000000031314 0ustar00coreycorey00000000000000Sets up a MariaDB server install in the image. TODO: auto-tune settings based on host resources or metadata service. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mariadb/install.d/0000755000175000017500000000000000000000000031722 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mariadb/install.d/10-mariadb0000755000175000017500000000026500000000000033470 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace dnf -y install mariadb-server percona-xtrabackup ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mariadb/pre-install.d/0000755000175000017500000000000000000000000032506 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mariadb/pre-install.d/10-percona-copr 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mariadb/pre-install.d/10-per0000755000175000017500000000027200000000000033441 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during PRE-CONSTRUCTION as ROOT # PURPOSE: Setup COPR Percona repository set -e set -o xtrace # install from Fedora repos dnf -y install percona-xtrabackup ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6881092 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mongodb/0000755000175000017500000000000000000000000030060 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mongodb/README.md0000644000175000017500000000004700000000000031340 0ustar00coreycorey00000000000000Sets up a MongoDB install in the image.././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/0000755000175000017500000000000000000000000031750 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/10-mongodb0000755000175000017500000000130500000000000033540 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace cat > "/etc/rc.local" << _EOF_ #!/bin/bash # Make sure to disable Linux kernel feature transparent huge pages, # it will affect greatly both memory usage and latency in a negative way. # See: http://docs.mongodb.org/manual/tutorial/transparent-huge-pages/ if test -f /sys/kernel/mm/redhat_transparent_hugepage/defrag; then echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag fi if test -f /sys/kernel/mm/redhat_transparent_hugepage/enabled; then echo never > /sys/kernel/mm/redhat_transparent_hugepage/enabled fi exit \$? _EOF_ dnf -y install mongodb-server ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/25-trove-mongo-dep 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/25-trove-m0000755000175000017500000000030500000000000033511 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install trove guest python dependencies - see trovestack functions_qemu set -e set -o xtrace pip install pymongo>=3.0.2,!=3.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/0000755000175000017500000000000000000000000027600 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/README.md0000644000175000017500000000016400000000000031060 0ustar00coreycorey00000000000000Sets up a MySQL server install in the image. TODO: auto-tune settings based on host resources or metadata service. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/0000755000175000017500000000000000000000000031470 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/10-mysql0000755000175000017500000000066000000000000033003 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace dnf -y install https://repo.mysql.com/mysql-community-release-fc22.rpm dnf -y install mysql-community-server # move the config dir for now but leave /etc/my.cnf alone # ln -s creates problems for the systemd script mkdir /etc/mysql mv /etc/my.cnf.d /etc/mysql/conf.d chown -R mysql:mysql /etc/mysql ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/40-xtrabackup 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/40-xtrabacku0000755000175000017500000000024700000000000033626 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace dnf -y install percona-xtrabackup ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/post-install.d/0000755000175000017500000000000000000000000032453 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/post-install.d/30-register-mysql-service 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-mysql/post-install.d/30-regi0000644000175000017500000000016600000000000033547 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # DO NOT enable or start mysqld for systemd, let the guestagent coordinate startup ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6481085 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-percona/0000755000175000017500000000000000000000000030062 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-percona/install.d/0000755000175000017500000000000000000000000031752 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-percona/install.d/05-percona-server 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-percona/install.d/05-percona0000755000175000017500000000074400000000000033556 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during PRE-CONSTRUCTION as ROOT # PURPOSE: Setup apt-repo list so that we can connect to Percona's repo set -e set -o xtrace curl -o /etc/pki/rpm-gpg/RPM-GPG-KEY-percona http://www.percona.com/downloads/RPM-GPG-KEY-percona cat < /etc/yum.repos.d/Percona.repo [percona] name = CentOS \$releasever - Percona baseurl=http://repo.percona.com/centos/latest/os/\$basearch/ enabled = 1 gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-percona gpgcheck = 1 EOL././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-percona/install.d/10-mysql0000755000175000017500000000130300000000000033260 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace # The fix to make versions of percona-xtrabackup > v2.2 work with Trove # was put into the mysql guestagent code for Mitaka. There are no current # plans to backport so we need to make sure the guest generated when the # tests are run for Kilo or Liberty get the 2.2 verson of PXB if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then PXB_VERSION_OVERRIDE="-22" fi dnf -y install percona-toolkit Percona-Server-shared-55 Percona-Server-server-55 Percona-Server-test-55 Percona-Server-client-55 percona-xtrabackup${PXB_VERSION_OVERRIDE} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6481085 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-postgresql/0000755000175000017500000000000000000000000030636 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-postgresql/install.d/0000755000175000017500000000000000000000000032526 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-postgresql/install.d/10-postgresql 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-postgresql/install.d/10-post0000755000175000017500000000431700000000000033664 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace cat > "/etc/sysctl.d/10-postgresql-performance.conf" << _EOF_ # See 'http://www.postgresql.org/docs/9.6/static/kernel-resources.html' # for best practices. # It is recommended to disable memory overcommit, # but the Python interpreter may require it on smaller flavors. # We therefore stick with the heuristic overcommit setting. vm.overcommit_memory=0 _EOF_ cat > "/etc/rc.local" << _EOF_ #!/bin/bash # See 'http://www.postgresql.org/docs/9.6/static/kernel-resources.html' # Disable Linux kernel transparent huge pages. This feature is not supported by # by Postgres 9.6 and may negatively impact performance of the database. if test -f /sys/kernel/mm/redhat_transparent_hugepage/defrag; then echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag fi if test -f /sys/kernel/mm/redhat_transparent_hugepage/enabled; then echo never > /sys/kernel/mm/redhat_transparent_hugepage/enabled fi exit \$? _EOF_ dnf install -y https://yum.postgresql.org/9.6/fedora/fedora-27-x86_64/pgdg-fedora96-9.6-4.noarch.rpm dnf install -y postgresql96-server postgresql96-contrib postgresql96-devel gcc # Though /var/lib/pgsql is the preferred directory, need to move it as # this is where the volume will be mounted su - postgres -c "/usr/pgsql-9.6/bin/initdb /var/lib/pgsql/9.6/data" mv /var/lib/pgsql /var/lib/postgresql # The `postgresql96-setup` use postgresql-${MAJORVERSION} as the service name, so rename # postgresql-9.6.service to postgresql.service is not a good idea. #mv /lib/systemd/system/postgresql-9.6.service /lib/systemd/system/postgresql.service sed -i 's/PGDATA=\/var\/lib\/pgsql\/9.6\/data/PGDATA=\/var\/lib\/postgresql\/9.6\/data/' /lib/systemd/system/postgresql-9.6.service # Set the right log location for PGUPLOG and PGLOG. sed -i 's/\/var\/lib\/pgsql/\/var\/lib\/postgresql/' /usr/pgsql-9.6/bin/postgresql96-setup # Add all postgresql related command(include pg_rewind) to the OS path. export PATH=$PATH:/usr/pgsql-9.6/bin # Install the native Python client. dnf install -y postgresql-devel python-devel pip install psycopg2 # Run initdb before `systemctl start` /usr/pgsql-9.6/bin/postgresql96-setup initdb systemctl enable postgresql-9.6.service systemctl start postgresql-9.6.service ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-redis/0000755000175000017500000000000000000000000027541 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-redis/README.md0000644000175000017500000000005500000000000031020 0ustar00coreycorey00000000000000Sets up a redis server install in the image. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-redis/install.d/0000755000175000017500000000000000000000000031431 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/fedora-redis/install.d/10-redis0000755000175000017500000000024000000000000032677 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -ex dnf -y install redis service redis start ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-cassandra/0000755000175000017500000000000000000000000030454 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-cassandra/install.d/0000755000175000017500000000000000000000000032344 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-cassandra/install.d/10-cassandra 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-cassandra/install.d/10-cassa0000755000175000017500000000270700000000000033610 0ustar00coreycorey00000000000000#!/bin/bash set -ex set -o xtrace # Set CASSANDRA_JAVA to override which release of Java to use, defaults to 8 if [ -z "$CASSANDRA_JAVA" ]; then CASSANDRA_JAVA=8; fi # Set CASSANDRA_RELEASE to override which casandra release to use, defaults to 311x # For current releases supported, see http://cassandra.apache.org/download/ or # http://dl.bintray.com/apache/cassandra/dists/ if [ -z "$CASSANDRA_RELEASE" ]; then CASSANDRA_RELEASE=21x; fi export DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated install -qy curl echo "deb http://www.apache.org/dist/cassandra/debian ${CASSANDRA_RELEASE} main" >> /etc/apt/sources.list.d/cassandra.sources.list curl https://www.apache.org/dist/cassandra/KEYS | sudo apt-key add - apt-get -y update apt-get --allow-unauthenticated install -qy openjdk-${CASSANDRA_JAVA}-jdk expect python-dev apt-get --allow-unauthenticated install -qy libxml2-dev ntp mc libev4 libev-dev apt-get --allow-unauthenticated install -qy libxslt1-dev python-pexpect apt-get --allow-unauthenticated install -qy python-migrate build-essential apt-get --allow-unauthenticated install -qy python-setuptools python-pip python-snappy apt-get --allow-unauthenticated install -qy cassandra cassandra-tools pip2 install Cython # The Python Driver 2.0 for Apache Cassandra. pip2 install cassandra-driver # Sorted sets support for the Python driver. pip2 install blist service cassandra stop rm -rf /var/lib/cassandra/data/system/* service cassandra start ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-couchbase/0000755000175000017500000000000000000000000030451 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-couchbase/install.d/0000755000175000017500000000000000000000000032341 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-couchbase/install.d/10-couchbase 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-couchbase/install.d/10-couch0000755000175000017500000000052600000000000033611 0ustar00coreycorey00000000000000set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated install -qy curl apt-get --allow-unauthenticated install -qy libssl0.9.8 curl -O http://packages.couchbase.com/releases/2.2.0/couchbase-server-community_2.2.0_x86_64.deb INSTALL_DONT_START_SERVER=1 dpkg -i couchbase-server-community_2.2.0_x86_64.deb ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-couchdb/0000755000175000017500000000000000000000000030124 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-couchdb/install.d/0000755000175000017500000000000000000000000032014 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-couchdb/install.d/10-couchdb0000755000175000017500000000110700000000000033566 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive # install the ppa-finding tool for ubuntu 12.0.4 release apt-get --allow-unauthenticated install -y python-software-properties add-apt-repository -y ppa:couchdb/stable # update cached list of packages apt-get update -y # remove any existing couchdb binaries apt-get remove -yf couchdb couchdb-bin couchdb-common # install couchdb apt-get --allow-unauthenticated install -yV couchdb # install curl to provide a way to interact with CouchDB # over HTTP REST API apt-get --allow-unauthenticated install -qy curl ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-db2/0000755000175000017500000000000000000000000027164 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-db2/README.md0000644000175000017500000000352500000000000030450 0ustar00coreycorey00000000000000 Creates an image for DB2 Express-C v11.1 The packages for DB2 Express-C can be downloaded from: https://www.ibm.com/developerworks/downloads/im/db2express/ and click on the link "Download" button and then click on "DB2 Express-C for Linux 64-bit". New users can either get an IBM ID or click on the "Proceed without an IBM ID". User is provided with a registration form which needs to be completed in order to proceed further to download the DB2 Express-C packages. After accepting the license agreement, user can download the DB2 Express-C package (.tar.gz file). There are 2 options for making the DB2 Express-C package accessible to the Trove disk-image building process: - place the package in a private repository and set the environment variable DATASTORE_PKG_LOCATION with the url to this private repository. e.g. export DATASTORE_PKG_LOCATION="http://www.foo.com/db2/v11.1_linuxx64_expc.tar.gz" - download the package and place it in any directory on the local filesystem that the trovestack script can access. Set the environment variable DATASTORE_PKG_LOCATION with the full path to the downloaded package. e.g. export DATASTORE_PKG_LOCATION="/home/stack/db2/v11.1_linuxx64_expc.tar.gz" The environment variables used are as follows: DATASTORE_PKG_LOCATION - is the place where user stores the DB2 Express-C package after registration. This can either be a url to a private repository or the full path to the downloaded package on a local filesystem. DATASTORE_DOWNLOAD_OPTS - defines any wget options user wants to specify like user,password, etc. This is an optional variable and is needed only if specifying a private repository to download the packages from. e.g. export DATASTORE_DOWNLOAD_OPTS="--user=foo --password='secret'" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-db2/extra-data.d/0000755000175000017500000000000000000000000031440 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-db2/extra-data.d/20-copy-db20000755000175000017500000000221400000000000033223 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: Download the DB2 Express-C v10.5 packages to a directory on the local filesystem or # to a private repository. The download location is specified using the env variable: # DATASTORE_PKG_LOCATION [ -n "${TMP_HOOKS_PATH}" ] || die "Temp hook path not set" [ -n "${DATASTORE_PKG_LOCATION}" ] || die "DATASTORE_PKG_LOCATION not set" # First check if the package is available on the local filesystem. if [ -f "${DATASTORE_PKG_LOCATION}" ]; then echo "Found the DB2 Express-C packages in ${DATASTORE_PKG_LOCATION}." dd if="${DATASTORE_PKG_LOCATION}" of=${TMP_HOOKS_PATH}/db2.tar.gz # else, check if the package is available for download in a private repository. elif wget ${DATASTORE_DOWNLOAD_OPTS} "${DATASTORE_PKG_LOCATION}" -O ${TMP_HOOKS_PATH}/db2.tar.gz; then echo "Downloaded the DB2 Express-C package from the private repository" else echo "Unable to find the DB2 package at ${DATASTORE_PKG_LOCATION}" echo "Please register and download the DB2 Express-C packages to a private repository or local filesystem." exit -1 fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-db2/install.d/0000755000175000017500000000000000000000000031054 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-db2/install.d/10-db20000755000175000017500000000353600000000000031676 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Uncompress the DB2 packages and install and configure DB2 on Ubuntu. # DB2_PKG_LOCATION points to the directory where the DB2 packages # are located to install. DB2_PKG_LOCATION="/db2" mkdir ${DB2_PKG_LOCATION} cd ${DB2_PKG_LOCATION} # DB2 install location DB2_INSTALL_LOCATION="/opt/ibm/db2/current" # DB2 install requires the hostname to be resolved correctly host_name=`hostname` echo "127.0.0.1 ${host_name}" >> /etc/hosts tar -xvzf /tmp/in_target.d/db2.tar.gz # installing dependencies apt-get --allow-unauthenticated install libaio1 apt-get --allow-unauthenticated install libstdc++6 # start the installation process. Accepts the default installation directory '/opt/ibm/db2/current' ${DB2_PKG_LOCATION}/expc/db2_install -b ${DB2_INSTALL_LOCATION} -f sysreq -l ${DB2_PKG_LOCATION}/db2_install.log -y # create the DB2 users. # DB2 instance owner - db2inst1 # DB2 fence user - db2fenc1 # DB2 admin user - db2das1 useradd -m db2inst1 useradd -m db2fenc1 useradd -m db2das1 # Create the DB2 server instance ${DB2_INSTALL_LOCATION}/instance/db2icrt -a server -u db2fenc1 db2inst1 ${DB2_INSTALL_LOCATION}/cfg/db2ln # Configure DB2 server instance to communicate via TCP/IP on a particulat port. echo 'db2c_db2inst1 50000/tcp # DB2 connection service port' >> /etc/services # Configure DB2 to use the TCP/IP settings defined above. su - db2inst1 -c "db2 update database manager configuration using svcename db2c_db2inst1" # Start the actual TCP/IP communication. su - db2inst1 -c "db2set DB2COMM=tcpip" # DB2 requires the hostname to be resolved correctly. Delete this entry from the # /etc/hosts since this is the hostname of the instance where the image is being # built. The correct hostname will be set in the guest agent. sed -i "/127.0.0.1[[:space:]]*${host_name}/d" /etc/hosts ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-mongodb/0000755000175000017500000000000000000000000030142 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-mongodb/README.md0000644000175000017500000000005000000000000031414 0ustar00coreycorey00000000000000Sets up a MongoDB install in the image. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-mongodb/pre-install.d/0000755000175000017500000000000000000000000032616 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-mongodb/pre-install.d/10-mon0000755000175000017500000000060500000000000033554 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace [ -n "${RELEASE}" ] || die "RELEASE must be set to either trusty or xenial" apt-get --allow-unauthenticated -y install software-properties-common python3-pip get_key_robust EA312927 echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list apt-get -y update ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/0000755000175000017500000000000000000000000030144 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/install.d/0000755000175000017500000000000000000000000032034 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/install.d/30-mysql0000755000175000017500000000141300000000000033346 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive # The fix to make versions of percona-xtrabackup > v2.2 work with Trove # was put into the mysql guestagent code for Mitaka. There are no current # plans to backport so we need to make sure the guest generated when the # tests are run for Kilo or Liberty get the 2.2 verson of PXB if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then PXB_VERSION_OVERRIDE="-22" fi apt-get --allow-unauthenticated -y install percona-toolkit percona-server-common-5.6 percona-server-server-5.6 percona-server-test-5.6 percona-server-client-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/0000755000175000017500000000000000000000000032620 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/10-percona-apt-key 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/10-per0000755000175000017500000000136600000000000033560 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during PRE-CONSTRUCTION as ROOT # PURPOSE: Setup apt-repo list so that we can connect to Percona's repo set -e set -o xtrace [ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image" [ -n "${RELEASE}" ] || die "RELEASE must be set to either Precise or Quantal" #5 add Percona GPG key if [ ! -e /home/${GUEST_USERNAME}/.gnupg ]; then mkdir -p /home/${GUEST_USERNAME}/.gnupg fi get_key_robust 1C4CBDCDCD2EFD2A get_key_robust 9334A25F8507EFA5 # add Percona repo # creates the percona sources list cat < /etc/apt/sources.list.d/percona.list deb http://repo.percona.com/apt $RELEASE main deb-src http://repo.percona.com/apt $RELEASE main EOL # force an update apt-get update ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/20-app0000755000175000017500000000030700000000000033545 0ustar00coreycorey00000000000000#!/bin/sh set -e #CONTEXT: chroot on host #PURPOSE: Allows mysqld to create temporary files when restoring backups cat <>/etc/apparmor.d/local/usr.sbin.mysqld /tmp/ rw, /tmp/** rwk, EOF ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/0000755000175000017500000000000000000000000027307 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/install.d/0000755000175000017500000000000000000000000031177 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/install.d/30-mysql0000755000175000017500000000063000000000000032511 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated -y install percona-xtradb-cluster-server-5.6 percona-xtradb-cluster-client-5.6 percona-xtrabackup # Don't auto start mysql (we'll start it up in guest) update-rc.d mysql defaults update-rc.d mysql disable ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/0000755000175000017500000000000000000000000031763 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/10-percona-apt-key 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/10-percona0000755000175000017500000000136600000000000033564 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during PRE-CONSTRUCTION as ROOT # PURPOSE: Setup apt-repo list so that we can connect to Percona's repo set -e set -o xtrace [ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image" [ -n "${RELEASE}" ] || die "RELEASE must be set to either Precise or Quantal" #5 add Percona GPG key if [ ! -e /home/${GUEST_USERNAME}/.gnupg ]; then mkdir -p /home/${GUEST_USERNAME}/.gnupg fi get_key_robust 1C4CBDCDCD2EFD2A get_key_robust 9334A25F8507EFA5 # add Percona repo # creates the percona sources list cat < /etc/apt/sources.list.d/percona.list deb http://repo.percona.com/apt $RELEASE main deb-src http://repo.percona.com/apt $RELEASE main EOL # force an update apt-get update ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/20-apparmo0000755000175000017500000000030700000000000033567 0ustar00coreycorey00000000000000#!/bin/sh set -e #CONTEXT: chroot on host #PURPOSE: Allows mysqld to create temporary files when restoring backups cat <>/etc/apparmor.d/local/usr.sbin.mysqld /tmp/ rw, /tmp/** rwk, EOF ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-redis/0000755000175000017500000000000000000000000027623 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-redis/README.md0000644000175000017500000000005500000000000031102 0ustar00coreycorey00000000000000Sets up a redis server install in the image. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/0000755000175000017500000000000000000000000031513 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/30-redis0000755000175000017500000000643700000000000033001 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -ex export DEBIAN_FRONTEND=noninteractive cat > "/etc/sysctl.d/10-redis-performance.conf" << _EOF_ # See 'http://redis.io/topics/admin' for best practices. # Make sure to set the Linux kernel overcommit memory setting to 1. vm.overcommit_memory=1 # Linux kernel will silently truncate 'tcp-backlog' to the value of # '/proc/sys/net/core/somaxconn' so make sure to raise both the value of # 'somaxconn' and 'tcp_max_syn_backlog' in order to get the desired effect. net.ipv4.tcp_max_syn_backlog=1024 net.core.somaxconn=1024 _EOF_ cat > "/etc/rc.local" << _EOF_ #!/bin/bash # Make sure to disable Linux kernel feature transparent huge pages, # it will affect greatly both memory usage and latency in a negative way. if test -f /sys/kernel/mm/transparent_hugepage/defrag; then echo never > /sys/kernel/mm/transparent_hugepage/defrag fi if test -f /sys/kernel/mm/transparent_hugepage/enabled; then echo never > /sys/kernel/mm/transparent_hugepage/enabled fi _EOF_ # Install Redis from scratch ARCH_SAV=$ARCH unset ARCH apt-get update apt-get --allow-unauthenticated install -y build-essential tcl curl cd /tmp REDIS_VERSION=3.2.6 REDIS_NAME=redis-$REDIS_VERSION REDIS_ARCHIVE=$REDIS_NAME.tar.gz curl -O http://download.redis.io/releases/$REDIS_ARCHIVE tar xzvf $REDIS_ARCHIVE cd $REDIS_NAME make distclean make make install export ARCH=$ARCH_SAV adduser --system --group --no-create-home redis # Create the data dir REDIS_DATA_DIR=/var/lib/redis REDIS_LOG_DIR=/var/log/redis REDIS_LOG=$REDIS_LOG_DIR/redis.log REDIS_RUN_DIR=/var/run/redis REDIS_PID=$REDIS_RUN_DIR/redis-server.pid mkdir -p $REDIS_DATA_DIR chown redis:redis $REDIS_DATA_DIR chmod 770 $REDIS_DATA_DIR mkdir -p $REDIS_LOG_DIR chown redis:redis $REDIS_LOG_DIR chmod 775 $REDIS_LOG_DIR mkdir -p $REDIS_RUN_DIR chown redis:redis $REDIS_RUN_DIR # Set up a proper conf file to start REDIS_CONF_NAME=redis.conf REDIS_CONF_DIR=/etc/redis REDIS_CONF=$REDIS_CONF_DIR/$REDIS_CONF_NAME mkdir $REDIS_CONF_DIR cp /tmp/$REDIS_NAME/$REDIS_CONF_NAME $REDIS_CONF_DIR chown redis:redis $REDIS_CONF sed -i "s#dir .*#dir $REDIS_DATA_DIR#" $REDIS_CONF sed -i "s#pidfile .*#pidfile $REDIS_PID#" $REDIS_CONF sed -i "s#logfile .*#logfile $REDIS_LOG#" $REDIS_CONF sed -i "s/supervised no/supervised systemd/" $REDIS_CONF sed -i "s/daemonize no/daemonize yes/" $REDIS_CONF sed -i "s/protected-mode yes/protected-mode no/" $REDIS_CONF cat > "/etc/systemd/system/redis-server.service" << _EOF_ [Unit] Description=Redis In-Memory Data Store After=network.target [Service] Type=forking PIDFile=$REDIS_PID User=redis Group=redis Environment=statedir=$REDIS_RUN_DIR PermissionsStartOnly=true ExecStartPre=/bin/mkdir -p \${statedir} ExecStartPre=/bin/chown -R redis:redis \${statedir} ExecStart=/usr/local/bin/redis-server $REDIS_CONF ExecReload=/bin/kill -USR2 \$MAINPID ExecStop=/usr/local/bin/redis-cli shutdown Restart=always [Install] WantedBy=multi-user.target _EOF_ cat > "/etc/default/redis-server" << _EOF_ # Call ulimit -n with this argument prior to invoking Redis itself. # This may be required for high-concurrency environments. Redis itself cannot # alter its limits as it is not being run as root. ULIMIT=65536 _EOF_ # Install Python driver for Redis ('redis-py'). pip3 install redis ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/80-fix-in-gu0000755000175000017500000000030500000000000033467 0ustar00coreycorey00000000000000#!/bin/sh # PURPOSE: Install redis required package in guest-agent-venv set -ex if [ $GUEST_VENV ] then # Install Python driver for Redis ('redis-py'). $GUEST_VENV/bin/pip3 install redis fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-vertica/0000755000175000017500000000000000000000000030152 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-vertica/README.md0000644000175000017500000000012500000000000031427 0ustar00coreycorey00000000000000Sets up a Vertica CE 7.1 debian package and other dependencies install in the image. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-vertica/extra-data.d/0000755000175000017500000000000000000000000032426 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-vertica/extra-data.d/93-copy0000755000175000017500000000071300000000000033560 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: Stages the package installer file from DATASTORE_PKG_LOCATION, # so that guest image has the package file. set -e set -o xtrace source $_LIB/die [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" [ -f "$DATASTORE_PKG_LOCATION" ] || die "Datastore package installer file not found at:" $DATASTORE_PKG_LOCATION dd if=${DATASTORE_PKG_LOCATION} of=${TMP_HOOKS_PATH}/vertica.deb ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-vertica/install.d/0000755000175000017500000000000000000000000032042 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-vertica/install.d/97-vertica0000755000175000017500000000326400000000000033667 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT - install.d # PURPOSE: Install controller base required packages set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive # Copy the package file to the image, # as it needs to be used later during configuration. dd if=/tmp/in_target.d/vertica.deb of=/vertica.deb # Install base packages apt-get --allow-unauthenticated install -qy build-essential bc iptables apt-get --allow-unauthenticated install -qy curl sysstat pstack mcelog apt-get --allow-unauthenticated install -qy python-dev g++ unixODBC unixODBC-dev dialog apt-get --allow-unauthenticated install -qy dialog libbz2-dev libboost-all-dev libcurl4-gnutls-dev apt-get --allow-unauthenticated install -qy openjdk-7-jdk # Install Vertica package dpkg -i /vertica.deb # Creating dbadmin user and verticadba group groupadd verticadba useradd -g verticadba -d /home/dbadmin -s /bin/bash -m dbadmin echo "export PATH=/opt/vertica/bin:\$PATH" >> ~dbadmin/.profile echo "export TZ=`date +%Z`" >> ~dbadmin/.profile # Create base directory for to be used for database creation mkdir /var/lib/vertica chown dbadmin:verticadba /var/lib/vertica # Backup /etc/hosts cp -p /etc/hosts /etc/hosts.bkp # Compile the SDK examples - the supplied UDFs can then be loaded cd /opt/vertica/sdk/examples TMPDIR=/tmp JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 make cd cat > "/etc/rc.local" << _EOF_ #!/bin/bash # Vertica requires THP to be turned off if test -f /sys/kernel/mm/transparent_hugepage/defrag; then echo never > /sys/kernel/mm/transparent_hugepage/defrag fi if test -f /sys/kernel/mm/transparent_hugepage/enabled; then echo never > /sys/kernel/mm/transparent_hugepage/enabled fi exit \$? _EOF_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-cassandra/0000755000175000017500000000000000000000000031732 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-cassandra/element-deps 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-cassandra/element-dep0000644000175000017500000000002100000000000034045 0ustar00coreycorey00000000000000ubuntu-cassandra ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchbase/0000755000175000017500000000000000000000000031727 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchbase/element-deps 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchbase/element-dep0000644000175000017500000000002100000000000034042 0ustar00coreycorey00000000000000ubuntu-couchbase ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchdb/0000755000175000017500000000000000000000000031402 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchdb/element-deps0000644000175000017500000000001700000000000033705 0ustar00coreycorey00000000000000ubuntu-couchdb ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6921093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/0000755000175000017500000000000000000000000031420 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/element-deps0000644000175000017500000000001700000000000033723 0ustar00coreycorey00000000000000ubuntu-mongodb ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/0000755000175000017500000000000000000000000033310 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/10-0000755000175000017500000000112700000000000033534 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive cat > /lib/systemd/system/disable_transparent_huge_pages.service << '_EOF_' [Unit] Description=Disable Transparent Huge Pages [Service] Type=oneshot ExecStart=/usr/bin/sh -c "/usr/bin/echo "never" | tee /sys/kernel/mm/transparent_hugepage/enabled" ExecStart=/usr/bin/sh -c "/usr/bin/echo "never" | tee /sys/kernel/mm/transparent_hugepage/defrag" [Install] WantedBy=multi-user.target _EOF_ systemctl daemon-reload systemctl enable disable_transparent_huge_pages.service systemctl start disable-transparent-huge-pages.service ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/20-mongodb 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/20-0000755000175000017500000000015600000000000033536 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive apt-get -y install mongodb-org=3.2.11 ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/25-0000755000175000017500000000030600000000000033540 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install trove guest python dependencies - see trovestack functions_qemu set -e set -o xtrace pip3 install pymongo>=3.0.2,!=3.1 ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/30-0000755000175000017500000000075400000000000033543 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace # Remove the default pid file rm -f /var/run/mongodb.pid cat > /etc/mongod.conf << '_EOF_' storage.dbPath: /var/lib/mongodb security.authorization: enabled storage.engine: wiredTiger storage.journal.enabled: true systemLog.destination: file systemLog.logAppend: true systemLog.path: /var/log/mongodb/mongod.log _EOF_ cat > /etc/mongos.conf << '_EOF_' systemLog.destination: file systemLog.logAppend: true systemLog.path: /var/log/mongodb/mongos.log _EOF_ ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/35-check-numa 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/35-0000755000175000017500000000117500000000000033546 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive mkdir -p /usr/share/mongodb cat > /usr/share/mongodb/check-numa.sh << '_EOF_' #!/bin/sh # Handle NUMA access to CPUs (SERVER-3574) # This verifies the existence of numactl as well as testing that the command works # Then it generates an environment file for systemd NUMACTL_ARGS="--interleave=all" if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null then echo -n NUMACTL="$(which numactl) $NUMACTL_ARGS" > /etc/numactl.env else echo -n NUMACTL="" > /etc/numactl.env fi _EOF_ chmod 755 /usr/share/mongodb/check-numa.sh ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/40-0000755000175000017500000000046300000000000033541 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace cat > /lib/systemd/system/check-numa.service << '_EOF_' [Unit] Description=Generates /etc/numactl.env [Service] Type=oneshot ExecStart=/usr/share/mongodb/check-numa.sh [Install] WantedBy=multi-user.target _EOF_ systemctl daemon-reload systemctl enable check-numa.service ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/41-0000755000175000017500000000122700000000000033541 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace cat > /lib/systemd/system/mongod.service << '_EOF_' [Unit] Description=High-performance, schema-free document-oriented database After=network.target check-numa.service Wants=check-numa.service Documentation=https://docs.mongodb.org/manual [Service] User=mongodb Group=mongodb RuntimeDirectory=mongodb EnvironmentFile=/etc/numactl.env ExecStart=/bin/bash -c "${NUMACTL} /usr/bin/mongod --quiet --config /etc/mongod.conf" LimitFSIZE=infinity LimitCPU=infinity LimitAS=infinity LimitNOFILE=64000 LimitRSS=infinity LimitNPROC=64000 [Install] WantedBy=multi-user.target _EOF_ systemctl daemon-reload systemctl enable mongod.service ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/42-0000755000175000017500000000106000000000000033535 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace cat > /lib/systemd/system/mongos.service << '_EOF_' [Unit] Description=High-performance, schema-free document-oriented database After=network.target Documentation=https://docs.mongodb.org/manual [Service] User=mongodb Group=mongodb RuntimeDirectory=mongodb ExecStart=/usr/bin/mongos --quiet --config /etc/mongos.conf LimitFSIZE=infinity LimitCPU=infinity LimitAS=infinity LimitNOFILE=64000 LimitRSS=infinity LimitNPROC=64000 [Install] WantedBy=multi-user.target _EOF_ systemctl daemon-reload systemctl enable mongos.service ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/0000755000175000017500000000000000000000000031422 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/element-deps0000644000175000017500000000001700000000000033725 0ustar00coreycorey00000000000000ubuntu-percona ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000112 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.d/ 28 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.0000755000175000017500000000000000000000000034052 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.0000755000175000017500000000026300000000000034060 0ustar00coreycorey00000000000000#!/bin/bash # Create [mysql] section if no section exsists in /etc/mysql/my.cnf if ! grep -q -P '^\[.*\]' /etc/mysql/my.cnf; then sed -i '1s/^/[mysql]\n/' /etc/mysql/my.cnf fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/0000755000175000017500000000000000000000000030565 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/element-deps0000644000175000017500000000001300000000000033064 0ustar00coreycorey00000000000000ubuntu-pxc ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/install.d/0000755000175000017500000000000000000000000032455 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/install.d/31-fix-0000755000175000017500000000044600000000000033473 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf chown mysql:mysql /etc/mysql/my.cnf cat >/etc/mysql/my.cnf <<_EOF_ [mysql] > !includedir /etc/mysql/conf.d/ _EOF_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/0000755000175000017500000000000000000000000031101 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/element-deps0000644000175000017500000000001500000000000033402 0ustar00coreycorey00000000000000ubuntu-redis ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/install.d/0000755000175000017500000000000000000000000032771 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/install.d/31-fix-init-file 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/install.d/31-fi0000755000175000017500000000110600000000000033534 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace cat > /lib/systemd/system/redis-server.service << '_EOF_' [Unit] Description=Advanced key-value store After=network.target [Service] Type=forking PIDFile=/var/run/redis/redis-server.pid User=redis Group=redis Environment=statedir=/var/run/redis PermissionsStartOnly=true ExecStartPre=/bin/mkdir -p ${statedir} ExecStartPre=/bin/chown -R redis:redis ${statedir} ExecStart=/usr/bin/redis-server /etc/redis/redis.conf ExecReload=/bin/kill -USR2 $MAINPID ExecStop=/usr/bin/redis-cli shutdown Restart=always [Install] WantedBy=multi-user.target _EOF_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/elements/0000755000175000017500000000000000000000000023077 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/apt-conf-dir/0000755000175000017500000000000000000000000025362 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/apt-conf-dir/README.rst0000644000175000017500000000071000000000000027047 0ustar00coreycorey00000000000000============ apt-conf-dir ============ This element overrides the default apt.conf.d directory for APT based systems. Environment Variables --------------------- DIB_APT_CONF_DIR: :Required: No :Default: None :Description: To override `DIB_APT_CONF_DIR`, set it to the path to your apt.conf.d. The new apt.conf.d will take effect at build time and run time. :Example: ``DIB_APT_CONF_DIR=/etc/apt/apt.conf`` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/apt-conf-dir/extra-data.d/0000755000175000017500000000000000000000000027636 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=trove-12.1.0.dev92/integration/scripts/files/elements/apt-conf-dir/extra-data.d/99-use-host-apt-confd 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/apt-conf-dir/extra-data.d/99-use-host-apt-conf0000755000175000017500000000120600000000000033276 0ustar00coreycorey00000000000000#!/bin/bash # Override the default /etc/apt/apt.conf.d directory with $DIB_APT_CONF_DIR if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail # exit directly if DIB_APT_CONF_DIR is not defined properly if [ -z "${DIB_APT_CONF_DIR:-}" ] ; then echo "DIB_APT_CONF_DIR is not set - no apt.conf.d will be copied in" exit 0 elif [ ! -d "$DIB_APT_CONF_DIR" ] ; then echo "$DIB_APT_CONF_DIR is not a valid apt.conf.d directory." echo "You should assign a proper apt.conf.d directory in DIB_APT_CONF_DIR" exit 1 fi # copy the apt.conf to cloudimg sudo cp -L -f -R $DIB_APT_CONF_DIR $TMP_MOUNT_PATH/etc/apt ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/0000755000175000017500000000000000000000000025322 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/README.rst0000644000175000017500000000020500000000000027006 0ustar00coreycorey00000000000000Element to install an Trove guest agent. Note: this requires a system base image modified to include Trove source code repositories ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/element-deps0000644000175000017500000000013000000000000027621 0ustar00coreycorey00000000000000dib-init-system package-installs pkg-map source-repositories svc-map pip-and-virtualenv ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/environment.d/0000755000175000017500000000000000000000000030110 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/environment.d/99-reliable-apt-key-importing.bash 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/environment.d/99-reliable-apt-key-0000644000175000017500000000202400000000000033474 0ustar00coreycorey00000000000000# sometimes the primary key server is unavailable and we should try an # alternate. see # https://bugs.launchpad.net/percona-server/+bug/907789. Disable # shell errexit so we can interrogate the exit code and take action # based on the exit code. We will reenable it later. # # NOTE(zhaochao): we still have this problem from time to time, so it's # better use more reliable keyservers and just retry on that(for now, 3 # tries should be fine). # According to: # [1] https://www.gnupg.org/faq/gnupg-faq.html#new_user_default_keyserver # [2] https://sks-keyservers.net/overview-of-pools.php # we'll just the primary suggested pool: pool.sks-keyservers.net. function get_key_robust() { KEY=$1 set +e tries=1 while [ $tries -le 3 ]; do if [ $tries -eq 3 ]; then set -e fi echo "Importing the key, try: $tries" apt-key adv --keyserver hkp://pool.sks-keyservers.net \ --recv-keys ${KEY} && break tries=$((tries+1)) done set -e } export -f get_key_robust ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/0000755000175000017500000000000000000000000027212 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/50-user0000755000175000017500000000104100000000000030334 0ustar00coreycorey00000000000000#!/bin/bash # PURPOSE: Add the guest image user that will own the trove agent source if the # user does not already exist if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then set -x fi set -e set -o pipefail GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"} if ! id -u ${GUEST_USERNAME} >/dev/null 2>&1; then echo "Adding ${GUEST_USERNAME} user" useradd -G sudo -m ${GUEST_USERNAME} -s /bin/bash chown ${GUEST_USERNAME}:${GUEST_USERNAME} /home/${GUEST_USERNAME} passwd ${GUEST_USERNAME} <<_EOF_ ${GUEST_USERNAME} ${GUEST_USERNAME} _EOF_ fi ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000113 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/ 28 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-insta0000755000175000017500000000000000000000000033770 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/75-guest-agent-install 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-insta0000755000175000017500000000300200000000000033770 0ustar00coreycorey00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail SCRIPTDIR=$(dirname $0) GUEST_VENV=/opt/guest-agent-venv GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"} # Create a virtual environment for guest agent ${DIB_PYTHON} -m virtualenv ${GUEST_VENV} ${GUEST_VENV}/bin/pip install pip --upgrade ${GUEST_VENV}/bin/pip install -U -c /opt/upper-constraints.txt /opt/guest-agent chown -R ${GUEST_USERNAME}:root ${GUEST_VENV} # Link the trove-guestagent out to /usr/local/bin where the startup scripts look for ln -s ${GUEST_VENV}/bin/trove-guestagent /usr/local/bin/guest-agent || true for folder in "/var/lib/trove" "/etc/trove" "/etc/trove/certs" "/etc/trove/conf.d" "/var/log/trove"; do mkdir -p ${folder} chown -R ${GUEST_USERNAME}:root ${folder} done install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.logrotate /etc/logrotate.d/guest-agent case "$DIB_INIT_SYSTEM" in systemd) mkdir -p /usr/lib/systemd/system touch /usr/lib/systemd/system/guest-agent.service sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g" ${SCRIPTDIR}/guest-agent.service > /usr/lib/systemd/system/guest-agent.service ;; upstart) install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.conf /etc/init/guest-agent.conf ;; sysv) install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.init /etc/init.d/guest-agent.init ;; *) echo "Unsupported init system" exit 1 ;; esac ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.conf 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-insta0000644000175000017500000000057600000000000034002 0ustar00coreycorey00000000000000description "Start up the Trove Guest Agent" start on runlevel [2345] stop on runlevel [!2345] respawn respawn limit 2 2 exec guest-agent --config-file /etc/trove/guest-agent.conf post-start script PID=`status guest-agent | egrep -oi '([0-9]+)$' | head -n1` echo $PID > /var/run/guest-agent.pid end script post-stop script rm -f /var/run/guest-agent.pid end script ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.init 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-insta0000644000175000017500000000375000000000000033777 0ustar00coreycorey00000000000000### BEGIN INIT INFO # Provides: guest-agent # Required-Start: $remote_fs $syslog $network # Required-Stop: $remote_fs $syslog $network # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Runs the Trove Guest Agent processes # Description: This script runs Trove Guest Agent processes. # This script will start the Guest Agent services # and kill them. ### END INIT INFO # Using the lsb functions to perform the operations. . /lib/lsb/init-functions # Process name ( For display ) NAME=guest-agent # Daemon name, where is the actual executable DAEMON=/usr/local/bin/guest-agent # pid file for the daemon PIDFILE=/var/run/guest-agent.pid # If the daemon is not there, then exit. test -x $DAEMON || exit 5 case $1 in start) # Checked the PID file exists and check the actual status of process if [ -e $PIDFILE ]; then status_of_proc -p $PIDFILE $DAEMON "$NAME process" && status="0" || status="$?" # If the status is SUCCESS then don't need to start again. if [ $status = "0" ]; then exit # Exit fi fi # Start the daemon. log_daemon_msg "Starting the process" "$NAME" # Start the daemon with the help of start-stop-daemon # Log the message appropriately if start-stop-daemon --start -m --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON -- --config-file /etc/octavia/guest-agent.conf ; then log_end_msg 0 else log_end_msg 1 fi ;; stop) # Stop the daemon. if [ -e $PIDFILE ]; then status_of_proc -p $PIDFILE $DAEMON "Stopping the $NAME process" && status="0" || status="$?" if [ "$status" = 0 ]; then start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE /bin/rm -rf $PIDFILE fi else log_daemon_msg "$NAME process is not running" log_end_msg 0 fi ;; restart) # Restart the daemon. $0 stop && sleep 2 && $0 start ;; *) # For invalid arguments, print the usage message. echo "Usage: $0 {start|stop|restart|reload|status}" exit 2 ;; esac ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.logrotate 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-insta0000644000175000017500000000054700000000000034000 0ustar00coreycorey00000000000000/var/log/guest-agent.log { daily rotate 10 missingok notifempty compress delaycompress sharedscripts postrotate # Signal name shall not have the SIG prefix in kill command # http://pubs.opengroup.org/onlinepubs/9699919799/utilities/kill.html kill -s USR1 $(cat /var/run/guest-agent.pid) endscript } ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.service 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-insta0000644000175000017500000000056400000000000033777 0ustar00coreycorey00000000000000[Unit] Description=OpenStack Trove Guest Agent After=network.target syslog.service Wants=syslog.service [Service] User=GUEST_USERNAME Group=GUEST_USERNAME ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove/conf.d" ExecStart=/usr/local/bin/guest-agent --config-dir=/etc/trove/conf.d KillMode=mixed Restart=always [Install] WantedBy=multi-user.target ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/package-installs.yaml0000644000175000017500000000122000000000000031423 0ustar00coreycorey00000000000000guest-agent: installtype: package build-essential: installtype: source libffi-dev: installtype: source libssl-dev: installtype: source python-dev: installtype: source acl: acpid: apparmor: apparmor-utils: apt-transport-https: at: bash-completion: cloud-guest-utils: cloud-init: cron: curl: dbus: dkms: dmeventd: ethtool: gawk: ifenslave: ifupdown: iptables: iputils-tracepath: irqbalance: isc-dhcp-client: less: logrotate: lsof: net-tools: netbase: netcat-openbsd: open-vm-tools: arch: i386, amd64 openssh-client: openssh-server: pollinate: psmisc: rsyslog: socat: tcpdump: ubuntu-cloudimage-keyring: ureadahead: uuid-runtime: vim-tiny: vlan: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/pkg-map0000644000175000017500000000110600000000000026577 0ustar00coreycorey00000000000000{ "family": { "redhat": { "guest-agent": "openstack-trove-guest-agent", "netcat-openbsd": "nmap-ncat", "netbase": "", "cron": "", "ifenslave": "", "iputils-tracepath": "", "cloud-guest-utils": "", "apparmor": "", "dmeventd": "", "isc-dhcp-client": "dhcp-client", "uuid-runtime": "", "ubuntu-cloudimage-keyring": "", "vim-tiny": "", "ureadahead": "", "apt-transport-https": "", "pollinate": "", "ifupdown": "" } }, "default": { "guest-agent": "guest-agent" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/post-install.d/0000755000175000017500000000000000000000000030175 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/post-install.d/11-enable-guest-agent-systemd 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/post-install.d/11-enable-guest-age0000755000175000017500000000027100000000000033447 0ustar00coreycorey00000000000000#!/bin/bash if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then set -x fi set -eu set -o pipefail if [ "$DIB_INIT_SYSTEM" == "systemd" ]; then systemctl enable $(svc-map guest-agent) fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/post-install.d/99-clean-apt0000755000175000017500000000024700000000000032231 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Delete contents of apt cache on guest (saves image disk space) set -e set -o xtrace apt-get clean ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/source-repository-guest-agent0000644000175000017500000000037000000000000033203 0ustar00coreycorey00000000000000# This is used for source-based builds guest-agent git /opt/guest-agent https://opendev.org/openstack/trove master upper-constraints file /opt/upper-constraints.txt https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/guest-agent/svc-map0000644000175000017500000000010000000000000026602 0ustar00coreycorey00000000000000guest-agent: default: guest-agent redhat: trove-guest-agent ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/no-resolvconf/0000755000175000017500000000000000000000000025671 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/no-resolvconf/README.rst0000644000175000017500000000061000000000000027355 0ustar00coreycorey00000000000000This element clears out /etc/resolv.conf and prevents dhclient from populating it with data from DHCP. This means that DNS resolution will not work from the guest. This is OK because all outbound connections from the guest will be based using raw IP addresses. In addition we remove dns from the nsswitch.conf hosts setting. This means that the guest never waits for DNS timeouts to occur. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/no-resolvconf/finalise.d/0000755000175000017500000000000000000000000027705 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=trove-12.1.0.dev92/integration/scripts/files/elements/no-resolvconf/finalise.d/99-disable-resolv-conf 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/no-resolvconf/finalise.d/99-disable-resolv-con0000755000175000017500000000111500000000000033560 0ustar00coreycorey00000000000000#!/bin/bash echo "" > /etc/resolv.conf echo "" > /etc/resolv.conf.ORIG if [ -d /etc/dhcp/dhclient-enter-hooks.d ]; then # Debian/Ubuntu echo "#!/bin/sh make_resolv_conf() { : ; }" > /etc/dhcp/dhclient-enter-hooks.d/noresolvconf chmod +x /etc/dhcp/dhclient-enter-hooks.d/noresolvconf rm -f /etc/dhcp/dhclient-enter-hooks.d/resolvconf else # RHEL/CentOS/Fedora echo "#!/bin/sh make_resolv_conf() { : ; }" > /etc/dhclient-enter-hooks chmod +x /etc/dhclient-enter-hooks fi if [ -e /etc/nsswitch.conf ]; then sed -i -e "/hosts:/ s/dns//g" /etc/nsswitch.conf fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/0000755000175000017500000000000000000000000025546 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/environment.d/0000755000175000017500000000000000000000000030334 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/environment.d/99-reliable-apt-key-importing.bash 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/environment.d/99-reliable-apt-key0000644000175000017500000000202400000000000033643 0ustar00coreycorey00000000000000# sometimes the primary key server is unavailable and we should try an # alternate. see # https://bugs.launchpad.net/percona-server/+bug/907789. Disable # shell errexit so we can interrogate the exit code and take action # based on the exit code. We will reenable it later. # # NOTE(zhaochao): we still have this problem from time to time, so it's # better use more reliable keyservers and just retry on that(for now, 3 # tries should be fine). # According to: # [1] https://www.gnupg.org/faq/gnupg-faq.html#new_user_default_keyserver # [2] https://sks-keyservers.net/overview-of-pools.php # we'll just the primary suggested pool: pool.sks-keyservers.net. function get_key_robust() { KEY=$1 set +e tries=1 while [ $tries -le 3 ]; do if [ $tries -eq 3 ]; then set -e fi echo "Importing the key, try: $tries" apt-key adv --keyserver hkp://pool.sks-keyservers.net \ --recv-keys ${KEY} && break tries=$((tries+1)) done set -e } export -f get_key_robust ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6961093 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/extra-data.d/0000755000175000017500000000000000000000000030022 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/extra-data.d/15-trove-dep0000755000175000017500000000156600000000000032110 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: Setup the requirements file for use by 15-reddwarf-dep source $_LIB/die TROVE_BRANCH=${TROVE_BRANCH:-'master'} REQUIREMENTS_FILE=${TROVESTACK_SCRIPTS}/../../requirements.txt [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" [ -e ${REQUIREMENTS_FILE} ] || die "Requirements not found" [ -n "$HOST_USERNAME" ] || die "HOST_USERNAME not set" sudo -Hiu ${HOST_USERNAME} dd if=${REQUIREMENTS_FILE} of=${TMP_HOOKS_PATH}/requirements.txt UC_FILE=upper-constraints.txt UC_DIR=$(pwd) UC_BRANCH=${TROVE_BRANCH##stable/} curl -L -o "${UC_DIR}/${UC_FILE}" "https://releases.openstack.org/constraints/upper/${UC_BRANCH}" if [ -f "${UC_DIR}/${UC_FILE}" ]; then sudo -Hiu ${HOST_USERNAME} dd if="${UC_DIR}/${UC_FILE}" of=${TMP_HOOKS_PATH}/${UC_FILE} rm -f "${UC_DIR}/${UC_FILE}" fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/extra-data.d/62-ssh-key0000755000175000017500000000167400000000000031570 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: creates the SSH key on the host if it doesn't exist. Then this copies the keys over to a staging area where # they will be duplicated in the guest VM. # This process allows the host to log into the guest but more importantly the guest phones home to get the trove # source source $_LIB/die [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" [ -n "${HOST_USERNAME}" ] || die "HOST_USERNAME needs to be set to the user for the current user on the host" if [ `whoami` = "root" ]; then die "This should not be run as root" fi # Guest agent needs to ssh into the controller to download code in dev mode. if [ -e ${SSH_DIR}/id_rsa ]; then sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa of=${TMP_HOOKS_PATH}/id_rsa sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa.pub of=${TMP_HOOKS_PATH}/id_rsa.pub else die "SSH keys must exist" fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/install.d/0000755000175000017500000000000000000000000027436 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps0000755000175000017500000000035000000000000031457 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install basic services and applications set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated -y install ntp apparmor-utils ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep0000755000175000017500000000154400000000000031520 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install trove guest python dependencies - see trovestack functions_qemu set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated -y install \ libxml2-dev libxslt1-dev libffi-dev libssl-dev libyaml-dev \ python3 python3-dev python3-pip python3-sqlalchemy python3-setuptools # pick up the requirements file left for us by # extra-data.d/15-reddwarf-dep TMP_HOOKS_DIR="/tmp/in_target.d" UPPER_CONSTRAINTS= if [ -f ${TMP_HOOKS_DIR}/upper-constraints.txt ]; then UPPER_CONSTRAINTS=" -c ${TMP_HOOKS_DIR}/upper-constraints.txt" fi pip3 install pip==9.0.3 pip3 install wheel pip3 install --upgrade -r ${TMP_HOOKS_DIR}/requirements.txt ${UPPER_CONSTRAINTS} echo "diagnostic pip freeze output follows" pip3 freeze echo "diagnostic pip freeze output above" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/install.d/50-user0000755000175000017500000000075200000000000030570 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Add the guest image user that will own the trove agent source...if the user does not already exist set -e set -o xtrace if ! id -u ${GUEST_USERNAME} >/dev/null 2>&1; then echo "Adding ${GUEST_USERNAME} user" useradd -G sudo -m ${GUEST_USERNAME} -s /bin/bash chown ${GUEST_USERNAME}:${GUEST_USERNAME} /home/${GUEST_USERNAME} passwd ${GUEST_USERNAME} <<_EOF_ ${GUEST_USERNAME} ${GUEST_USERNAME} _EOF_ fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/install.d/62-ssh-key0000755000175000017500000000151300000000000031174 0ustar00coreycorey00000000000000#!/bin/bash # PURPOSE: take "staged" ssh keys (see extra-data.d/62-ssh-key) and put them in the GUEST_USERS home directory # In future, this should be removed and use Nova keypair to inject ssh keys. set -e set -o xtrace SSH_DIR="/home/${GUEST_USERNAME}/.ssh" TMP_HOOKS_DIR="/tmp/in_target.d" if [ ! -e ${SSH_DIR} ]; then # this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME} mkdir ${SSH_DIR} chown ${GUEST_USERNAME}:${GUEST_USERNAME} ${SSH_DIR} fi if [ -e "${TMP_HOOKS_DIR}/id_rsa" ]; then sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa sudo -Hiu ${GUEST_USERNAME} chmod 600 ${SSH_DIR}/id_rsa else echo "SSH Keys were not staged by host" exit -1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/install.d/98-ssh0000755000175000017500000000024300000000000030416 0ustar00coreycorey00000000000000#!/bin/bash # Regenerate host keys now. XXX: Really should be a cloud-init task, should get # that working. set -e set -o xtrace dpkg-reconfigure openssh-server ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/install.d/99-clean-apt0000755000175000017500000000025100000000000031465 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Delete contents of apt cache on guest (saves image disk space) set -e set -o xtrace apt-get clean ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/post-install.d/0000755000175000017500000000000000000000000030421 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/post-install.d/05-ipforwarding0000755000175000017500000000014500000000000033264 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace sed -i -r 's/^\s*#(net\.ipv4\.ip_forward=1.*)/\1/' /etc/sysctl.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/post-install.d/10-ntp0000755000175000017500000000030300000000000031362 0ustar00coreycorey00000000000000#!/bin/bash ntpfile=`mktemp` cat << EOF > $ntpfile server ntp.ubuntu.com iburst server 127.127.1.0 fudge 127.127.1.0 stratum 10 EOF mv /etc/ntp.conf /etc/ntp.conf.orig mv $ntpfile /etc/ntp.conf ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/post-install.d/62-trove-guest-sudoers 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/post-install.d/62-trove-guest-sud0000755000175000017500000000077600000000000033663 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST after IMAGE BUILD as SCRIPT USER # PURPOSE: add the guest user account to the /etc/sudoers files with NOPASSWD # Adds user to the sudoers file so they can do everything w/o a pass # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will # see them by forcing PATH TEMPFILE=`mktemp` echo "${GUEST_USERNAME} ALL=(ALL) NOPASSWD:ALL" > $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/60_trove_guest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/post-install.d/90-apt-get-update0000755000175000017500000000025200000000000033415 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST after packages installed # PURPOSE: do apt-get update to save each instance having to do all the work set -e set -o xtrace apt-get update ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/pre-install.d/0000755000175000017500000000000000000000000030222 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools0000755000175000017500000000024000000000000033305 0ustar00coreycorey00000000000000#!/bin/bash # Install baseline packages and tools. set -e set -o xtrace apt-get --allow-unauthenticated install -y language-pack-en python-software-properties././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mariadb/0000755000175000017500000000000000000000000026016 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mariadb/README.md0000644000175000017500000000016600000000000027300 0ustar00coreycorey00000000000000Sets up a MariaDB server install in the image. TODO: auto-tune settings based on host resources or metadata service. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/0000755000175000017500000000000000000000000030472 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/20-apparmor-mysql-local 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/20-apparmor-mysql0000755000175000017500000000030700000000000033623 0ustar00coreycorey00000000000000#!/bin/sh set -e #CONTEXT: chroot on host #PURPOSE: Allows mysqld to create temporary files when restoring backups cat <>/etc/apparmor.d/local/usr.sbin.mysqld /tmp/ rw, /tmp/** rwk, EOF ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mysql/0000755000175000017500000000000000000000000025564 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/0000755000175000017500000000000000000000000030240 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/10-percona-apt-key0000755000175000017500000000112700000000000033404 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during PRE-CONSTRUCTION as ROOT # PURPOSE: Setup apt-repo list so that we can connect to Percona's repo set -e set -o xtrace GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"} RELEASE=${DIB_RELEASE:-"xenial"} # Add Percona GPG key mkdir -p /home/${GUEST_USERNAME}/.gnupg get_key_robust 1C4CBDCDCD2EFD2A get_key_robust 9334A25F8507EFA5 # Add Percona repo # Creates the percona sources list cat < /etc/apt/sources.list.d/percona.list deb http://repo.percona.com/apt $RELEASE main deb-src http://repo.percona.com/apt $RELEASE main EOL # Force an update apt-get update ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/20-apparmor-mysql-local 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/20-apparmor-mysql-l0000755000175000017500000000034700000000000033626 0ustar00coreycorey00000000000000#!/bin/sh set -e #CONTEXT: chroot on host #PURPOSE: Allows mysqld to create temporary files when restoring backups mkdir -p /etc/apparmor.d/local/ cat <>/etc/apparmor.d/local/usr.sbin.mysqld /tmp/ rw, /tmp/** rwk, EOF ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6521087 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-postgresql/0000755000175000017500000000000000000000000026622 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-postgresql/install.d/0000755000175000017500000000000000000000000030512 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql0000755000175000017500000000136000000000000033063 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive cat > "/etc/sysctl.d/10-postgresql-performance.conf" << _EOF_ # See 'http://www.postgresql.org/docs/9.6/static/kernel-resources.html' # for best practices. # It is recommended to disable memory overcommit, # but the Python interpreter may require it on smaller flavors. # We therefore stick with the heuristic overcommit setting. vm.overcommit_memory=0 _EOF_ apt-get --allow-unauthenticated -y install libpq-dev postgresql-12 postgresql-server-dev-12 postgresql-client-12 pgsql_conf=/etc/postgresql/12/main/postgresql.conf sed -i "/listen_addresses/c listen_addresses = '*'" ${pgsql_conf} systemctl restart postgresql # Install the native Python client. pip3 install psycopg2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/0000755000175000017500000000000000000000000031276 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-0000755000175000017500000000054300000000000033724 0ustar00coreycorey00000000000000#!/bin/sh set -e set -o xtrace [ -n "${DIB_RELEASE}" ] || die "RELEASE must be set to a valid Ubuntu release (e.g. trusty)" cat < /etc/apt/sources.list.d/postgresql.list deb http://apt.postgresql.org/pub/repos/apt/ ${DIB_RELEASE}-pgdg main EOL wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - apt-get update ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/0000755000175000017500000000000000000000000027024 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/element-deps0000644000175000017500000000001500000000000031325 0ustar00coreycorey00000000000000ubuntu-guest ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/0000755000175000017500000000000000000000000031300 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-syst0000755000175000017500000000212400000000000033573 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER # PURPOSE: stages the bootstrap file and upstart conf file while replacing variables so that guest image is properly # configured source $_LIB/die [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" [ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image" [ -n "${HOST_SCP_USERNAME}" ] || die "HOST_SCP_USERNAME needs to be set to the user for the host instance" [ -n "${ESCAPED_PATH_TROVE}" ] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host" [ -n "${TROVESTACK_SCRIPTS}" ] || die "TROVESTACK_SCRIPTS needs to be set to the trove/integration/scripts dir" [ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir" sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${TROVESTACK_SCRIPTS}/files/trove-guest.systemd.conf > ${TMP_HOOKS_PATH}/trove-guest.service ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/0000755000175000017500000000000000000000000030714 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc0000755000175000017500000000042400000000000031634 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: take "staged" trove-guest.conf file and put it in the init directory on guest image dd if=/tmp/in_target.d/trove-guest.service of=/etc/systemd/system/trove-guest.service systemctl enable trove-guest.service ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates 22 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-ce0000755000175000017500000000055600000000000033571 0ustar00coreycorey00000000000000#!/bin/sh # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: configure trove-guest service to use system store of trusted certificates GUEST_UNIT_DROPINS="/etc/systemd/system/trove-guest.service.d" mkdir -v -p ${GUEST_UNIT_DROPINS} cat < ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf [Service] Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs EOF ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/0000755000175000017500000000000000000000000031677 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/91-hwe-kernel 22 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/91-hwe-kern0000755000175000017500000000130600000000000033574 0ustar00coreycorey00000000000000#!/bin/bash set -e set -o xtrace # The HWE stack must be installed for nested virtualization on ppc64el. This # environment variable is set automatically by trovestack, but it may also be # set by the user when manually invoking disk-image-create. case "$DIB_USE_HWE_KERNEL" in true|True|TRUE|yes|Yes|YES) DIB_USE_HWE_KERNEL=true ;; *) DIB_USE_HWE_KERNEL=false ;; esac if [ "$DIB_USE_HWE_KERNEL" == "true" ]; then export DEBIAN_FRONTEND=noninteractive PKG_ARCH=$(dpkg --print-architecture) case "$PKG_ARCH" in amd64|arm64|ppc64el|s390x) apt-get --allow-unauthenticated install -y linux-generic-hwe-16.04 ;; esac fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/0000755000175000017500000000000000000000000031500 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs0000755000175000017500000000474400000000000033572 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install basic services and applications set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive apt-get -y purge acpid\ apport\ apport-symptoms\ apt-transport-https\ aptitude\ at\ bash-completion\ bc\ bind9-host\ bsdmainutils\ busybox-static\ byobu\ command-not-found\ command-not-found-data\ curl\ dbus\ dmidecode\ dosfstools\ ed\ fonts-ubuntu-font-family-console\ friendly-recovery\ ftp\ fuse\ geoip-database\ groff-base\ hdparm\ info\ install-info\ iptables\ iputils-tracepath\ irqbalance\ language-selector-common\ libaccountsservice0\ libevent-2.0-5\ libgeoip1\ libnfnetlink0\ libpcap0.8\ libpci3\ libpipeline1\ libpolkit-gobject-1-0\ libsasl2-modules\ libusb-1.0-0\ lshw\ lsof\ ltrace\ man-db\ mlocate\ mtr-tiny\ nano\ ntfs-3g\ parted\ patch\ plymouth-theme-ubuntu-text\ popularity-contest\ powermgmt-base\ ppp\ screen\ shared-mime-info\ strace\ tcpdump\ telnet\ time\ tmux\ ubuntu-standard\ ufw\ update-manager-core\ update-notifier-common\ usbutils\ uuid-runtime\ # The following packages cannot be removed as they cause cloud-init to be # uninstalled in Ubuntu 14.04 # gir1.2-glib-2.0 # libdbus-glib-1-2 # libgirepository-1.0-1 # python-chardet # python-serial # xz-utils apt-get -y autoremove ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mariadb/0000755000175000017500000000000000000000000027274 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps0000644000175000017500000000001600000000000031576 0ustar00coreycorey00000000000000ubuntu-mariadb././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/0000755000175000017500000000000000000000000031164 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb0000755000175000017500000000261700000000000032737 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages # Refer to https://mariadb.com/kb/en/library/installing-mariadb-deb-files set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive # These GPG key IDs are used to fetch keys from a keyserver on Ubuntu & Debian apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | bash -s -- --mariadb-server-version="mariadb-10.4" --skip-key-import --skip-maxscale apt-get install -y -qq apt-transport-https ca-certificates gnupg2 # NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb # Disable password prompt debconf-set-selections <<< "mariadb-server mysql-server/root_password password ''" debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password ''" apt-get update -qq apt-get install -y -qq --allow-unauthenticated mariadb-server mariadb-client galera-4 libmariadb3 mariadb-backup mariadb-common cat </etc/mysql/conf.d/no_perf_schema.cnf [mysqld] performance_schema = off EOF chown mysql:mysql /etc/mysql/my.cnf rm -f /etc/init.d/mysql systemctl daemon-reload systemctl enable mariadb././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mysql/0000755000175000017500000000000000000000000027042 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps0000644000175000017500000000001500000000000031343 0ustar00coreycorey00000000000000ubuntu-mysql ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/0000755000175000017500000000000000000000000030732 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql0000755000175000017500000000204500000000000032246 0ustar00coreycorey00000000000000#!/bin/bash # CONTEXT: GUEST during CONSTRUCTION as ROOT # PURPOSE: Install controller base required packages set -e set -o xtrace export DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated -y install mysql-client mysql-server gnupg2 # NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb apt-get update # Xenial provides mysql 5.7 which requires percona-xtrabackup-24 PXB_VERSION_OVERRIDE=24 apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE} cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_ [mysqld] performance_schema = off show_compatibility_56 = on _EOF_ mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf chown mysql:mysql /etc/mysql/my.cnf cat >/etc/mysql/my.cnf <<_EOF_ [mysql] !includedir /etc/mysql/conf.d/ _EOF_ if [ -e /etc/init/mysql.conf ]; then rm -f /etc/init/mysql.conf fi systemctl enable mysql ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-postgresql/0000755000175000017500000000000000000000000030100 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps0000644000175000017500000000002100000000000032376 0ustar00coreycorey00000000000000ubuntu-postgresql././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/keys/0000755000175000017500000000000000000000000022236 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/keys/authorized_keys0000644000175000017500000000061300000000000025372 0ustar00coreycorey00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDmQeA/uyEyFf9DsmwR+OztWb7Hb/uTC+R3xG1QgBvRwhSbpBnyBESGMZZ07bIw5Ib7BUSDzwoeryUqNAhAhir2KLeIYODS39UmTwOIl+rIvhlTxhsIoQHV90pewD2qw0T8KgVMPUDsQ0Bd98E6e5dbxciZp67ihVD0r7srhdSRo8PIc56hJWrD52j5FeiIGEmLXHXiZLOyma1M7j/EmiV81wHAzgql6sihWSZHm3xPZZ712JtXbmHhe3RLFIK13u9PSb3XbuEIdGwkZdzP+vYNE0CsYqwjXjVRrY/APsiEkbSNVzHI5p2W1L7ZMtSOMUqZ1Ve+sytVb+YcIJ9L8y07 trove@devstack././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/keys/id_rsa0000644000175000017500000000321600000000000023424 0ustar00coreycorey00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA5kHgP7shMhX/Q7JsEfjs7Vm+x2/7kwvkd8RtUIAb0cIUm6QZ 8gREhjGWdO2yMOSG+wVEg88KHq8lKjQIQIYq9ii3iGDg0t/VJk8DiJfqyL4ZU8Yb CKEB1fdKXsA9qsNE/CoFTD1A7ENAXffBOnuXW8XImaeu4oVQ9K+7K4XUkaPDyHOe oSVqw+do+RXoiBhJi1x14mSzspmtTO4/xJolfNcBwM4KperIoVkmR5t8T2We9dib V25h4Xt0SxSCtd7vT0m9127hCHRsJGXcz/r2DRNArGKsI141Ua2PwD7IhJG0jVcx yOadltS+2TLUjjFKmdVXvrMrVW/mHCCfS/MtOwIDAQABAoIBAQCTAQHbjmwuHu8e 2F24x7Tz//UWPT9fbRtK/6RO3ctlCsS/bXCHHARnrGcDdfHq1yv6PS21/UvXtThE Dn4qO75X9DzgnAFNgEwELjPyVBM4YG2pF3SQ+MJESaI4hgGY8Rws5eMF/qFhdbo1 hATggqFqnQZqWy0DP9wkq8ESk1nYNICehj0d6Mo5uW190TDMD6QrfHg4rDYbgITf SCmsRdybCASlgOHCrYgjweG9czNoFimhaG8WwP59yfCX0A1TrDJ0toryyopupIre A+5HHBM2Dk3KylrtVBAPRsM9eGUo4bmz8p6hRkuw7mr321d416MHdIXcHK38EWR0 ZvUWM8QJAoGBAPlPe3ggR9xhEbQJQkOohnZ1hkogE/uxZrcRiUI3pnDNcCXy9Ogt SYfwxYnHLa5kuynbmCCzSLOtq2DN9QwJ5o+zgRjX9T8DAWub6KFdEnTHhq/ZLF/w PWPg3Oe8dYDEEcrPLvN25aetY2LrWKsRPFL8//WLJc1+LFRG6Vc1ATftAoGBAOxv hGxNGrcD5c8g0ZcyeKVbCCRGfp0+mwonnQ25mDyIXNH+PEHa7SPs5dVexA5r0/Ky lM/jQgs756EvslwA2oukqVz7ehDhJI4RE92OPjpYrAu7HF6eN/fAhUMghs/vAZ6c YCM6i9emHYHM4mU6H/yLIr+0e2JNf+479bB5hQTHAoGBAOGnFUQXQ7OukE16C+Yd RQc3PIMfIbcwTJ3qW2f54sY6zAUtMIptYx6NyN35z9kHB3jNb5Y5b9ZhnLqT7/Yj h/INMQ4BedK8r65sgVR8X1YfukKzuLxlP8uFHa0KIPiZftkoSYDH3vmzsD86cRj1 ErqykCH4/hBO4WSugkkSirXJAoGARXvQcvOF8lsW4nRGpCSVCCNklSSSeSu47JcP tMTiVIfOn3gTxVbNck1gjgA3pfVSaHTK/v1On3aPb/NQe3FUyM0vaMAO8372+zlR mT8AUq1Ugm4OvE/LKuhNQZkBhYI7+50BM9k0179d1JOdxRn75IAPSj+EMzOLcTv1 zFMqIGkCgYBm5xT3Gu8fJh/8ztelzrDkGga6UpYKKYjHGFHpaqmDn9sjjCu8X2pG JUGgyUVj1NkJAtHMS45Ud3upQwxpy2aNmMaQbwzHybvX7EYZHHVuCwsSzaXRtwj2 Q6mG1Ghi0UQ76SPKQr0Vu8Uu+0CAzYAK4IEKeH6BCRjrzHggSpdNzQ== -----END RSA PRIVATE KEY-----././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/keys/id_rsa.pub0000644000175000017500000000061300000000000024207 0ustar00coreycorey00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDmQeA/uyEyFf9DsmwR+OztWb7Hb/uTC+R3xG1QgBvRwhSbpBnyBESGMZZ07bIw5Ib7BUSDzwoeryUqNAhAhir2KLeIYODS39UmTwOIl+rIvhlTxhsIoQHV90pewD2qw0T8KgVMPUDsQ0Bd98E6e5dbxciZp67ihVD0r7srhdSRo8PIc56hJWrD52j5FeiIGEmLXHXiZLOyma1M7j/EmiV81wHAzgql6sihWSZHm3xPZZ712JtXbmHhe3RLFIK13u9PSb3XbuEIdGwkZdzP+vYNE0CsYqwjXjVRrY/APsiEkbSNVzHI5p2W1L7ZMtSOMUqZ1Ve+sytVb+YcIJ9L8y07 trove@devstack././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7001095 trove-12.1.0.dev92/integration/scripts/files/requirements/0000755000175000017500000000000000000000000024006 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/requirements/fedora-requirements.txt0000644000175000017500000000177000000000000030535 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. unittest2 testtools extras python-novaclient>=2.22.0 python-swiftclient>=2.2.0 python-cinderclient>=1.1.0 python-keystoneclient>=2.0.0,!=2.1.0 # Apache-2.0 kombu>=2.5.0 babel>=1.3 python-heatclient>=0.3.0 passlib jinja2>=2.6 PyMySQL>=0.6.2 # MIT License python-neutronclient>=2.3.11,<3 netifaces>=0.10.4 oslo.config>=1.9.3 # Apache-2.0 oslo.messaging>=1.8.0 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 oslo.service>=0.1.0 # Apache-2.0 oslo.utils>=1.4.0 # Apache-2.0 oslo.log>=1.8.0 # Apache-2.0 osprofiler>=0.3.0 oslo.concurrency>=1.8.0 # Apache-2.0 pexpect>=3.1,!=3.3 enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD cryptography>=2.1.4 # BSD/Apache-2.0 xmltodict>=0.10.1 # MIT ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/requirements/ubuntu-requirements.txt0000644000175000017500000000153700000000000030620 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. unittest2 testtools extras python-novaclient>=2.18.0 python-swiftclient>=2.2.0 python-cinderclient>=1.1.0 python-keystoneclient>=2.0.0,!=2.1.0 # Apache-2.0 kombu>=2.5.0 six>=1.7.0 babel python-heatclient>=0.2.9 passlib jinja2 PyMySQL>=0.6.2 # MIT License python-neutronclient>=2.3.6,<3 netifaces>=0.10.4 oslo.config>=1.4.0 # Apache-2.0 oslo.messaging>=1.4.0,!=1.5.0 oslo.i18n>=1.0.0 oslo.serialization>=1.0.0 oslo.service>=0.1.0 # Apache-2.0 oslo.utils>=1.1.0 osprofiler>=0.3.0 oslo.concurrency>=0.3.0 enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD cryptography>=2.1.4 # BSD/Apache-2.0 xmltodict>=0.10.1 # MIT ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/files/trove-guest.systemd.conf0000644000175000017500000000337300000000000026113 0ustar00coreycorey00000000000000[Unit] Description=Trove Guest After=syslog.target network.target [Install] WantedBy=multi-user.target [Service] Type=simple User=GUEST_USERNAME Group=GUEST_USERNAME # This script is only for testing purpose for dev_mode=true, the controller # IP address should be defined in /etc/trove/controller.conf, e.g. # CONTROLLER=192.168.32.151 EnvironmentFile=/etc/trove/controller.conf ExecStartPre=/bin/bash -c "sudo mkdir -p GUEST_LOGDIR" # If ~/trove-installed does not exist, copy the trove source from # the user's development environment, then touch the sentinel file ExecStartPre=/bin/bash -c "test -e /home/GUEST_USERNAME/trove-installed || sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:PATH_TROVE/ /home/GUEST_USERNAME/trove && touch /home/GUEST_USERNAME/trove-installed" # If /etc/trove does not exist, create it and then copy the trove-guestagent.conf # from /etc/trove on the user's development environment, ExecStartPre=/bin/bash -c "test -d /etc/trove/conf.d || sudo mkdir -p /etc/trove/conf.d && sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:/etc/trove/trove-guestagent.conf ~GUEST_USERNAME/ && sudo mv ~GUEST_USERNAME/trove-guestagent.conf /etc/trove/conf.d/trove-guestagent.conf" ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove /home/GUEST_USERNAME/trove GUEST_LOGDIR" # Start trove-guest.service ExecStart=/bin/bash -c "/home/GUEST_USERNAME/trove/contrib/trove-guestagent --config-dir=/etc/trove/conf.d" TimeoutSec=300 Restart=on-failure # PostgreSQL doesn't play nice with PrivateTmp PrivateTmp=false././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/files/trove-guest.upstart.conf0000644000175000017500000000347400000000000026127 0ustar00coreycorey00000000000000description "Trove Guest" author "Auto-Gen" start on (filesystem and net-device-up IFACE!=lo) stop on runlevel [016] chdir /var/run pre-start script mkdir -p /var/run/trove chown GUEST_USERNAME:root /var/run/trove/ mkdir -p /var/lock/trove chown GUEST_USERNAME:root /var/lock/trove/ mkdir -p GUEST_LOGDIR chown GUEST_USERNAME:root GUEST_LOGDIR # Copy the trove source from the user's development environment if [ ! -d /home/GUEST_USERNAME/trove ]; then sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@CONTROLLER_IP:PATH_TROVE/ /home/GUEST_USERNAME/trove fi # Ensure conf dir exists and is readable mkdir -p /etc/trove/conf.d chmod -R +r /etc/trove end script script # For backwards compatibility until https://review.opendev.org/#/c/100381 merges TROVE_CONFIG="--config-dir=/etc/trove/conf.d" if [ ! -f /etc/trove/conf.d/guest_info ] && [ ! -f /etc/trove/conf.d/trove-guestagent.conf ]; then chmod +r /etc/guest_info sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@CONTROLLER_IP:/etc/trove/trove-guestagent.conf ~GUEST_USERNAME/ mv ~GUEST_USERNAME/trove-guestagent.conf /etc/trove/trove-guestagent.conf TROVE_CONFIG="--config-file=/etc/guest_info --config-file=/etc/trove/trove-guestagent.conf" fi # Requests: CA directories not supported in older Pythons, a custom bundle file is needed cat /usr/local/share/ca-certificates/*.crt > /usr/local/share/ca-certificates/custom.bundle exec su -c "REQUESTS_CA_BUNDLE=/usr/local/share/ca-certificates/custom.bundle /home/GUEST_USERNAME/trove/contrib/trove-guestagent $TROVE_CONFIG" GUEST_USERNAME end script ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/functions0000644000175000017500000002603300000000000022120 0ustar00coreycorey00000000000000#!/bin/bash # This file format was stolen from devstack <3 # This method was stolen from devstack # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. # Set global RECLONE=yes to simulate a clone when dest-dir exists # git_clone remote dest-dir branch function git_clone { [[ "$OFFLINE" = "True" ]] && return GIT_REMOTE=$1 GIT_DEST=$2 GIT_BRANCH=$3 if echo $GIT_BRANCH | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $GIT_DEST ]]; then git_timed clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST git_timed fetch $GIT_REMOTE $GIT_BRANCH && git_timed checkout FETCH_HEAD else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then git_timed clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags git_timed checkout $GIT_BRANCH elif [[ "$RECLONE" == "yes" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $GIT_DEST # set the url to pull from and fetch git_timed remote set-url origin $GIT_REMOTE git_timed fetch origin # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) find $GIT_DEST -name '*.pyc' -delete git_timed checkout -f origin/$GIT_BRANCH # a local branch might not exist git_timed branch -D $GIT_BRANCH || true git_timed checkout -b $GIT_BRANCH fi fi } # Determinate is the given option present in the INI file # ini_has_option config-file section option function ini_has_option() { local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") [ -n "$line" ] } # Get an option from an INI file # iniget config-file section option function iniget() { local file=$1 local section=$2 local option=$3 local line line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") echo ${line#*=} } # Set an option in an INI file # iniset config-file section option value function iniset() { local file=$1 local section=$2 local option=$3 local value=$4 if ! grep -q "^\[$section\]" "$file"; then # Add section at the end echo -e "\n[$section]" >>"$file" fi if ! ini_has_option "$file" "$section" "$option"; then # Add it sed -i -e "/^\[$section\]/ a\\ $option = $value " "$file" else # Replace it sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" fi } # Determine OS Vendor, Release and Update # Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora # Returns results in global variables: # os_VENDOR - vendor name # os_RELEASE - release # os_UPDATE - update # os_PACKAGE - package type # os_CODENAME - vendor's codename for release # GetOSVersion GetOSVersion() { # Figure out which vendor we are if [[ -x "`which sw_vers 2>/dev/null`" ]]; then # OS/X os_VENDOR=`sw_vers -productName` os_RELEASE=`sw_vers -productVersion` os_UPDATE=${os_RELEASE##*.} os_RELEASE=${os_RELEASE%.*} os_PACKAGE="" if [[ "$os_RELEASE" =~ "10.7" ]]; then os_CODENAME="lion" elif [[ "$os_RELEASE" =~ "10.6" ]]; then os_CODENAME="snow leopard" elif [[ "$os_RELEASE" =~ "10.5" ]]; then os_CODENAME="leopard" elif [[ "$os_RELEASE" =~ "10.4" ]]; then os_CODENAME="tiger" elif [[ "$os_RELEASE" =~ "10.3" ]]; then os_CODENAME="panther" else os_CODENAME="" fi elif [[ -x $(which lsb_release 2>/dev/null) ]]; then os_VENDOR=$(lsb_release -i -s) os_RELEASE=$(lsb_release -r -s) os_UPDATE="" os_PACKAGE="rpm" if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then os_PACKAGE="deb" elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then lsb_release -d -s | grep -q openSUSE if [[ $? -eq 0 ]]; then os_VENDOR="openSUSE" fi elif [[ $os_VENDOR == "openSUSE project" ]]; then os_VENDOR="openSUSE" elif [[ $os_VENDOR =~ Red.*Hat ]]; then os_VENDOR="Red Hat" fi os_CODENAME=$(lsb_release -c -s) elif [[ -r /etc/redhat-release ]]; then # Red Hat Enterprise Linux Server release 5.5 (Tikanga) # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo) # CentOS release 5.5 (Final) # CentOS Linux release 6.0 (Final) # Fedora release 16 (Verne) # XenServer release 6.2.0-70446c (xenenterprise) os_CODENAME="" for r in "Red Hat" CentOS Fedora XenServer; do os_VENDOR=$r if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` os_CODENAME=${ver#*|} os_RELEASE=${ver%|*} os_UPDATE=${os_RELEASE##*.} os_RELEASE=${os_RELEASE%.*} break fi os_VENDOR="" done os_PACKAGE="rpm" elif [[ -r /etc/SuSE-release ]]; then for r in openSUSE "SUSE Linux"; do if [[ "$r" = "SUSE Linux" ]]; then os_VENDOR="SUSE LINUX" else os_VENDOR=$r fi if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` break fi os_VENDOR="" done os_PACKAGE="rpm" # If lsb_release is not installed, we should be able to detect Debian OS elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then os_VENDOR="Debian" os_PACKAGE="deb" os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}') os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g') fi export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME } # Translate the OS version values into common nomenclature # Sets ``DISTRO`` from the ``os_*`` values function GetDistro() { GetOSVersion if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then DISTRO_NAME=ubuntu DISTRO_RELEASE=$os_CODENAME elif [[ "$os_VENDOR" =~ (Fedora) ]]; then DISTRO_NAME=fedora DISTRO_RELEASE=$os_RELEASE elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then DISTRO_NAME=opensuse DISTRO_RELEASE=$os_RELEASE elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then DISTRO_NAME=sle # For SLE, also use the service pack if [[ -z "$os_UPDATE" ]]; then DISTRO_RELEASE=$os_RELEASE else DISTRO_RELEASE="${os_RELEASE}sp${os_UPDATE}" fi elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then # Drop the . release as we assume it's compatible DISTRO_NAME=rhel DISTRO_RELEASE=${os_RELEASE::1} elif [[ "$os_VENDOR" =~ (XenServer) ]]; then DISTRO_NAME=xs DISTRO_RELEASE=$os_RELEASE else # Catch-all for now is Vendor + Release + Update DISTRO_NAME=$os_VENDOR DISTRO_RELEASE=$os_RELEASE.$os_UPDATE fi export DISTRO_NAME export DISTRO_RELEASE } # Determine if current distribution is a Fedora-based distribution # (Fedora, RHEL, CentOS, etc). # is_fedora function is_fedora { if [[ -z "$os_VENDOR" ]]; then GetOSVersion fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] } # Determine if current distribution is a SUSE-based distribution # (openSUSE, SLE). # is_suse function is_suse { if [[ -z "$os_VENDOR" ]]; then GetOSVersion fi [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] } # Get the path to the directory where python executables are installed. # get_python_exec_prefix function get_python_exec_prefix() { if is_fedora || is_suse; then echo "/usr/bin" else echo "/usr/local/bin" fi } # Returns 'true' if 'true', 'yes', 'on' or '1' - false, otherwise. # Converts values to lower case first. # If no default is provided, assumes false. function get_bool() { local VARIABLE="$1" local DEFAULT=${2:-false} VALUE=${!VARIABLE:-$DEFAULT} VALUE=$(eval echo "$VALUE" | tr '[:upper:]' '[:lower:]') if [[ "1 yes true on" =~ "$VALUE" ]]; then VALUE=true else VALUE=false fi echo $VALUE } # Get the project branch to switch to. Uses PROJECT_BRANCH_NAME, # then BRANCH_DEFAULT, then 'master' function get_project_branch() { local PROJECT_BRANCH_NAME=$1 local BRANCH_DEFAULT=${2:-master} PROJECT_BRANCH_OVERRIDE=${!PROJECT_BRANCH_NAME} BRANCH=${PROJECT_BRANCH_OVERRIDE:-$BRANCH_DEFAULT} echo "$BRANCH" } # Try to switch to a branch or commit in a repo # Fails if the branch/commit doesn't exist function git_checkout() { local PROJECT=$1 local REPO_DIR=$2 local REPO_BRANCH=$3 local REPO_BRANCH_VAR_NAME=$4 if [ -n "$REPO_BRANCH" ]; then pushd "$REPO_DIR" if [ $PROJECT == "diskimage-builder" ] || [ $PROJECT == "tripleo-image-elements" ]; then REPO_BRANCH=master fi CURRENT_BRANCH=$(git branch | grep "\*" | awk '{print $2}') GIT_STATUS=$(git checkout "$REPO_BRANCH" &> /dev/null || echo "failed") if [[ "$GIT_STATUS" = "failed" ]]; then exclaim "${COLOR_RED}Could not switch to branch/commit '$REPO_BRANCH' in $PROJECT, exiting${COLOR_NONE}" echo "Please set '$REPO_BRANCH_VAR_NAME' to a valid branch/commit and try again." if [[ "$CURRENT_BRANCH" != "master" ]]; then echo "Note: This repo is currently on branch ${CURRENT_BRANCH} - if this is correct," echo "you should set $REPO_BRANCH_VAR_NAME=${CURRENT_BRANCH} and re-run your command." else echo "Note: This error may also mean that there are modified files in $PROJECT." echo " If that is the case, please stash them and re-run your command." fi exit 1 else if [[ "$REPO_BRANCH" != "$CURRENT_BRANCH" ]]; then exclaim "${COLOR_BLUE}Switched to $PROJECT branch '$REPO_BRANCH'${COLOR_NONE}" else echo "Using $PROJECT branch '$REPO_BRANCH'" fi fi popd fi } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/functions_qemu0000644000175000017500000001147200000000000023150 0ustar00coreycorey00000000000000#!/bin/bash # # Additional functions that would mostly just pertain to a Ubuntu + Qemu setup # function build_vm() { exclaim "Actually building the image, this can take up to 15 minutes" rm -rf ~/.cache/image-create local datastore_type=$1 local guest_os=$2 local guest_release=$3 local dev_mode=$4 local guest_username=$5 local image_output=$6 local elementes="base vm" local trove_elements_path=${PATH_TROVE}/integration/scripts/files/elements local GUEST_IMAGETYPE=${GUEST_IMAGETYPE:-"qcow2"} local GUEST_IMAGESIZE=${GUEST_IMAGESIZE:-4} local GUEST_CACHEDIR=${GUEST_CACHEDIR:-"$HOME/.cache/image-create"} local working_dir=$(dirname ${image_output}) export GUEST_USERNAME=${guest_username} # In dev mode, the trove guest agent needs to download trove code from # trove-taskmanager host during service initialization. if [[ "${dev_mode,,}" == "true" ]]; then export PATH_TROVE=${PATH_TROVE} export ESCAPED_PATH_TROVE=$(echo ${PATH_TROVE} | sed 's/\//\\\//g') export GUEST_LOGDIR=${GUEST_LOGDIR:-"/var/log/trove/"} export ESCAPED_GUEST_LOGDIR=$(echo ${GUEST_LOGDIR} | sed 's/\//\\\//g') export TROVESTACK_SCRIPTS=${TROVESTACK_SCRIPTS} export HOST_SCP_USERNAME=${HOST_SCP_USERNAME:-$(whoami)} export HOST_USERNAME=${HOST_SCP_USERNAME} export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"} export DEST=${DEST:-'/opt/stack'} export TROVE_BRANCH=${TROVE_BRANCH:-'master'} manage_ssh_keys fi # For system-wide installs, DIB will automatically find the elements, so we only check local path if [ "${DIB_LOCAL_ELEMENTS_PATH}" ]; then export ELEMENTS_PATH=${trove_elements_path}:${DIB_LOCAL_ELEMENTS_PATH} else export ELEMENTS_PATH=${trove_elements_path} fi export DIB_RELEASE=${guest_release} export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive" # https://cloud-images.ubuntu.com/releases is more stable than the daily # builds(https://cloud-images.ubuntu.com/xenial/current/), # e.g. sometimes SHA256SUMS file is missing in the daily builds declare -A releasemapping=( ["xenial"]="16.04" ["bionic"]="18.04") export DIB_CLOUD_IMAGES="https://cloud-images.ubuntu.com/releases/${DIB_RELEASE}/release/" export BASE_IMAGE_FILE="ubuntu-${releasemapping[${DIB_RELEASE}]}-server-cloudimg-amd64-root.tar.gz" TEMP=$(mktemp -d ${working_dir}/diskimage-create.XXXXXXX) pushd $TEMP > /dev/null elementes="$elementes ${guest_os}" if [[ "${dev_mode,,}" == "false" ]]; then elementes="$elementes pip-and-virtualenv" elementes="$elementes pip-cache" elementes="$elementes guest-agent" else # Install guest agent dependencies, user, etc. elementes="$elementes ${guest_os}-guest" # Install guest agent service elementes="$elementes ${guest_os}-${guest_release}-guest" fi elementes="$elementes ${guest_os}-${datastore_type}" elementes="$elementes ${guest_os}-${guest_release}-${datastore_type}" # Build the image disk-image-create -x \ -a amd64 \ -o ${image_output} \ -t ${GUEST_IMAGETYPE} \ --image-size ${GUEST_IMAGESIZE} \ --image-cache ${GUEST_CACHEDIR} \ $elementes # out of $TEMP popd > /dev/null sudo rm -rf $TEMP exclaim "Image ${image_output} was built successfully." } function build_guest_image() { exclaim "Params for build_guest_image function: $@" local datastore_type=${1:-"mysql"} local guest_os=${2:-"ubuntu"} local guest_release=${3:-"xenial"} local dev_mode=${4:-"true"} local guest_username=${5:-"ubuntu"} local output=$6 VALID_SERVICES='mysql percona mariadb redis cassandra couchbase mongodb postgresql couchdb vertica db2 pxc' if ! [[ " $VALID_SERVICES " =~ " $datastore_type " ]]; then exclaim "You did not pass in a valid datastore type. Valid types are:" $VALID_SERVICES exit 1 fi build_vm ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} ${output} } function clean_instances() { LIST=`virsh -q list|awk '{print $1}'` for i in $LIST; do sudo virsh destroy $i; done } # In dev mode, guest agent needs to ssh into the controller to download code. function manage_ssh_keys() { if [ -d ${SSH_DIR} ]; then echo "${SSH_DIR} already exists" else echo "Creating ${SSH_DIR} for ${HOST_SCP_USERNAME}" sudo -Hiu ${HOST_SCP_USERNAME} mkdir -m go-w -p ${SSH_DIR} fi if [ ! -f ${SSH_DIR}/id_rsa.pub ]; then /usr/bin/ssh-keygen -f ${SSH_DIR}/id_rsa -q -N "" fi cat ${SSH_DIR}/id_rsa.pub >> ${SSH_DIR}/authorized_keys sort ${SSH_DIR}/authorized_keys | uniq > ${SSH_DIR}/authorized_keys.uniq mv ${SSH_DIR}/authorized_keys.uniq ${SSH_DIR}/authorized_keys chmod 600 ${SSH_DIR}/authorized_keys } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/image-projects-list0000644000175000017500000000005100000000000023762 0ustar00coreycorey00000000000000diskimage-builder tripleo-image-elements ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7041094 trove-12.1.0.dev92/integration/scripts/local.conf.d/0000755000175000017500000000000000000000000022421 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/ceilometer_cinder.conf.rc0000644000175000017500000000011200000000000027341 0ustar00coreycorey00000000000000[[post-config|\$CINDER_CONF]] [DEFAULT] notification_driver = messagingv2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/ceilometer_nova.conf.rc0000644000175000017500000000010200000000000027037 0ustar00coreycorey00000000000000[[post-config|\$NOVA_CONF]] [DEFAULT] instance_usage_audit = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/ceilometer_services.conf.rc0000644000175000017500000000010500000000000027722 0ustar00coreycorey00000000000000[[post-config|\$CEILOMETER_CONF]] [notification] store_events = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/sample.rc0000644000175000017500000000270400000000000024233 0ustar00coreycorey00000000000000# # Files in this directory are automatically added to the devstack # local.conf file, between a specific set of tags. # # Filenames must end with '.rc' to be recognized; sample.rc is # ignored. # # A '\' is required in front of any devstack variables since all # .rc files are parsed first (using eval). # # Meta section headings must be included in each file, such as: # [[local|localrc]] # as the order of inserting the files is not guaranteed. # # All files are inherently included by default - to exclude a file, # add a variable 'FILENAME_IN_UPPERCASE_MINUS_RC=false' in trovestack.rc # For Example: USING_VAGRANT=false (for the using_vagrant.rc file). # # Symbolic links are followed, so additional files can be loaded # by placing them in an external directory and linking it in # local.conf.d (this should allow complete flexibility in setting # up testing options). # For Example: # cd /path/to/trove/integration/scripts/local.conf.d # ln -s $HOME/local.conf.d local.conf.d # cp /path/to/my_conf.rc $HOME/local.conf.d [[local|localrc]] # Put regular devstack variables under this meta section heading. # This section is written out to a file and sourced by devstack, # so it can contain logic as well. # The following section types should only contain ini file style # section headings and name=value pairs [[post-config|\$TROVE_CONF]] [[post-config|\$TROVE_TASKMANAGER_CONF]] [[post-config|\$TROVE_CONDUCTOR_CONF]] [[post-config|\$TROVE_API_PASTE_INI]] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/trove_services.conf.rc0000644000175000017500000000110200000000000026727 0ustar00coreycorey00000000000000[[post-config|\$TROVE_CONF]] [profiler] enabled = $ENABLE_PROFILER trace_sqlalchemy = $PROFILER_TRACE_SQL [[post-config|\$TROVE_TASKMANAGER_CONF]] [profiler] enabled = $ENABLE_PROFILER trace_sqlalchemy = $PROFILER_TRACE_SQL [[post-config|\$TROVE_CONDUCTOR_CONF]] [profiler] enabled = $ENABLE_PROFILER trace_sqlalchemy = $PROFILER_TRACE_SQL [[post-config|\$TROVE_GUESTAGENT_CONF]] [profiler] enabled = $ENABLE_PROFILER trace_sqlalchemy = $PROFILER_TRACE_SQL [[post-config|\$TROVE_API_PASTE_INI]] [filter:osprofiler] enabled = $ENABLE_PROFILER hmac_keys = $PROFILER_HMAC_KEYS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/use_kvm.rc0000644000175000017500000000010500000000000024414 0ustar00coreycorey00000000000000[[local|localrc]] # force kvm as the libvirt type. LIBVIRT_TYPE=kvm ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/use_uuid_token.rc0000644000175000017500000000005600000000000025772 0ustar00coreycorey00000000000000[[local|localrc]] KEYSTONE_TOKEN_FORMAT=UUID ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.d/using_vagrant.rc0000644000175000017500000000057200000000000025622 0ustar00coreycorey00000000000000[[local|localrc]] # This is similar to code found at # https://github.com/bcwaldon/vagrant_devstack/blob/master/Vagrantfile # and seems to make instances ping'able in VirtualBox. FLAT_INTERFACE=eth1 PUBLIC_INTERFACE=eth1 FLOATING_RANGE=`ip_chunk eth0 1`.`ip_chunk eth0 2`.`ip_chunk eth0 3`.128/28 HOST_IP=`ip_chunk eth0 1`.`ip_chunk eth0 2`.`ip_chunk eth0 3`.`ip_chunk eth0 4` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/local.conf.rc0000644000175000017500000000152000000000000022523 0ustar00coreycorey00000000000000$TROVE_PRESENT_TAG # Set some arguments for devstack. # # Note: This file contains autogenerated parts. # All lines are removed from between the tag/end of tag # markers (lines with '$MARKER_TOKEN' at beginning and end) and # are replaced by trovestack. # Edits to these sections will not persist. # # See the '$USER_OPTS_TAG' section # for ways to insert user args into this file. # # # This section is for things that belong in localrc # It comes from $DEFAULT_LOCALRC # [[local|localrc]] $LOCALRC_OPTS_TAG $LOCALRC_OPTS_TAG_END # # User options here were inserted from the file USER_LOCAL_CONF # (defaults to $USERHOME/.$LOCAL_CONF) # $USER_OPTS_TAG $USER_OPTS_TAG_END # # Additional options here were inserted by trovestack # automatically from files in $LOCAL_CONF_D # $ADD_OPTS_TAG $ADD_OPTS_TAG_END ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/localrc.rc0000644000175000017500000000446700000000000022141 0ustar00coreycorey00000000000000# These passwords originally come from trovestack.rc. MYSQL_PASSWORD=$MYSQL_PASSWORD RABBIT_PASSWORD=$RABBIT_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$SERVICE_PASSWORD IP_VERSION=4 TROVE_LOGDIR=$TROVE_LOGDIR # Enable the Trove plugin for devstack enable_plugin trove $TROVE_REPO $TROVE_BRANCH # Enable Trove, Swift, and Heat ENABLED_SERVICES+=,trove,tr-api,tr-tmgr,tr-cond ENABLED_SERVICES+=,s-proxy,s-object,s-container,s-account ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng ENABLED_SERVICES+=,-n-novnc,-n-xvnc if [[ $ENABLE_NEUTRON = true ]]; then ENABLED_SERVICES+=,neutron,q-svc,q-agt,q-dhcp,q-l3,q-meta disable_service n-net else PUBLIC_INTERFACE=eth0 enable_service n-net disable_service neutron q-svc q-agt q-dhcp q-l3 q-meta fi # OSprofiler depends on Ceilometer if [[ $ENABLE_CEILOMETER = true ]] || [[ $ENABLE_PROFILER = true ]]; then CEILOMETER_BACKEND=mysql CEILOMETER_NOTIFICATION_TOPICS=notifications,profiler PROJ_BRANCH=$(get_project_branch CEILOMETER_BRANCH $PROJECT_BRANCH) enable_plugin ceilometer ${GIT_OPENSTACK}/ceilometer.git $PROJ_BRANCH fi # Enable Mistral, if configured if [[ $ENABLE_MISTRAL = true ]]; then PROJ_BRANCH=$(get_project_branch MISTRAL_BRANCH $PROJECT_BRANCH) enable_plugin mistral ${GIT_OPENSTACK}/mistral.git $PROJ_BRANCH fi # Use Git repositories for client components LIBS_FROM_GIT=python-troveclient if [[ $ENABLE_NEUTRON = true ]]; then LIBS_FROM_GIT+=,python-neutronclient fi if [[ $ENABLE_MISTRAL = true ]]; then LIBS_FROM_GIT+=,python-mistralclient fi if [[ $ENABLE_CEILOMETER = true ]]; then LIBS_FROM_GIT+=,python-ceilometerclient fi NOVNC_FROM_PACKAGE=false SWIFT_HASH=$SWIFT_HASH # Set Cinder Volume from Redstack so that later Redstack can help manage # reconnecting Volume Group to Backing File DEST=$DEST DATA_DIR=$DATA_DIR SERVICE_DIR=$SERVICE_DIR VOLUME_GROUP=${VOLUME_GROUP} VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE} # The lock_path is by default /opt/stack/nova; if this path is a shared # folder in VirtualBox things seem to break. We fix it by setting EXTRA_OPS # to force lock_path to /tmp. EXTRA_OPTS=(lock_path=$USERHOME/nova_locks rescan_timeout=180 resizefs_timeout=240 force_dhcp_release=False) UNDO_REQUIREMENTS=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/projects-list0000644000175000017500000000022300000000000022703 0ustar00coreycorey00000000000000keystone nova glance horizon swift neutron heat python-openstackclient python-novaclient python-troveclient python-neutronclient python-heatclient ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/reviews.rc0000644000175000017500000000041400000000000022172 0ustar00coreycorey00000000000000# This file will contain variables such as below, uncommented. # There will be a : separator between multiple inflight reviews # The path comes from the gerrit review system, and is the only # unique portion of each gerrit review. #REVIEW_PYTHON_NOVACLIENT=18/5018/2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/scripts/trovestack0000755000175000017500000015163100000000000022303 0ustar00coreycorey00000000000000#!/usr/bin/env bash ############################################################################### # Trove Stack Builder, the Trove Dev Machine Controller # ############################################################################### # # # This script provides all the functionality to run all the steps from # # setting up the environment, resetting the nova database to running the # # test. # # # ############################################################################### SCRIPT_DIRNAME=$(dirname "$0") PATH_TROVE=${PATH_TROVE:=$(readlink -f "${SCRIPT_DIRNAME}"/../..)} TROVESTACK_SCRIPTS=${TROVESTACK_SCRIPTS:=$(readlink -f "${SCRIPT_DIRNAME}")} TROVESTACK_TESTS=$TROVESTACK_SCRIPTS/../tests/ DEFAULT_LOCAL_CONF=local.conf.rc DEFAULT_LOCALRC=localrc.rc LOCAL_CONF=local.conf LOCALRC=localrc LOCALRC_AUTO=.localrc.auto USER_LOCAL_CONF_NAME=.devstack.$LOCAL_CONF CLOUD_ADMIN_ARG="--os-cloud=devstack-admin" # Make sure we're not affected by the local environment # by unsetting all the 'OS_' variables while read -r ENV_VAR; do unset "${ENV_VAR}"; done < <(env|grep "OS_"|awk -F= '{print $1}') # Now grab the admin credentials from devstack if it's set up. # This is to facilitate setting the ADMIN_PASSWORD correctly # for gate runs. if [ -f $DEST/devstack/accrc/admin/admin ]; then source $DEST/devstack/accrc/admin/admin fi USERHOME=$HOME # Load options not checked into VCS. if [ -f $USERHOME/.trovestack.options.rc ]; then . $USERHOME/.trovestack.options.rc fi if [ -f $TROVESTACK_SCRIPTS/options.rc ]; then . $TROVESTACK_SCRIPTS/options.rc fi # NOTE(mriedem): The gate-trove-functional-dsvm-* job config in project-config # sets this value for Jenkins runs. BRANCH_OVERRIDE=${BRANCH_OVERRIDE:-default} if [[ $BRANCH_OVERRIDE == "default" && $OVERRIDE_ZUUL_BRANCH != "master" ]]; then BRANCH_OVERRIDE=$OVERRIDE_ZUUL_BRANCH fi # Bail on errors. set -e # Get default host ip from interface function get_default_host_ip() { host_iface=$(ip route | grep default | awk '{print $5}' | head -1) echo `LC_ALL=C ip -f inet addr show ${host_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}' | head -1` } # Load functions devstack style . $TROVESTACK_SCRIPTS/functions . $TROVESTACK_SCRIPTS/functions_qemu # Pre-set DISTRO and RELEASE variables based on host OS # Can be overridden by env vars DISTRO and RELEASE GetDistro export DISTRO=${DISTRO:-$DISTRO_NAME} export RELEASE=${RELEASE:-$DISTRO_RELEASE} # Load global configuration variables. . $TROVESTACK_SCRIPTS/trovestack.rc . $TROVESTACK_SCRIPTS/reviews.rc # allow overrides from devstack if already set [[ -f $PATH_DEVSTACK_SRC/functions-common ]] && source $PATH_DEVSTACK_SRC/functions-common [[ -f $PATH_DEVSTACK_SRC/functions ]] && source $PATH_DEVSTACK_SRC/functions [[ -f $PATH_DEVSTACK_SRC/lib/apache ]] && source $PATH_DEVSTACK_SRC/lib/apache # Set up variables for the CONF files - this has to happen after loading trovestack.rc, since # TROVE_CONF_DIR is defined there - these will be used by devstack too export TROVE_CONF=$TROVE_CONF_DIR/trove.conf export TROVE_GUESTAGENT_CONF=$TROVE_CONF_DIR/trove-guestagent.conf export TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini export TEST_CONF=$TROVE_CONF_DIR/test.conf # Public facing bits SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} NETWORK_INTERFACE=${NETWORK_INTERFACE:-eth0} NETWORK_SUBNET=${NETWORK_SUBNET:-10.0.0.0/24} NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} BRIDGE_IP=${BRIDGE_IP:-172.24.4.1} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-http} # PATH_TROVE more than likely has file separators, which sed does not like # This will escape them ESCAPED_PATH_TROVE=$(echo $PATH_TROVE | sed 's/\//\\\//g') ESCAPED_TROVESTACK_SCRIPTS=$(echo $TROVESTACK_SCRIPTS | sed 's/\//\\\//g') TROVE_LOGDIR=${TROVE_LOGDIR:-$DEST/logs} TROVE_DEVSTACK_SETTINGS="$DEST/trove/devstack/settings" TROVE_DEVSTACK_PLUGIN="$DEST/trove/devstack/plugin.sh" # DATASTORE_PKG_LOCATION defines the location from where the datastore packages # can be accessed by the DIB elements. This is applicable only for datastores # that do not have a public repository from where their packages can be accessed. # This can either be a url to a private repository or a location on the local # filesystem that contains the datastore packages. DATASTORE_PKG_LOCATION=${DATASTORE_PKG_LOCATION:-} # Support entry points installation of console scripts if [[ -d $PATH_TROVE/bin ]]; then TROVE_BIN_DIR=$PATH_TROVE/bin else TROVE_BIN_DIR=$(get_python_exec_prefix) fi # set up respective package managers if is_fedora; then PKG_INSTALL_OPTS="" PKG_MGR=dnf PKG_GET_ARGS="-y" else PKG_INSTALL_OPTS="DEBIAN_FRONTEND=noninteractive" PKG_MGR=apt-get PKG_GET_ARGS="-y --allow-unauthenticated --force-yes" fi PKG_INSTALL_ARG="install" PKG_UPDATE_ARG="update" ############################################################################### # Utility functions ############################################################################### # Colors that can be used in 'exclaim' COLOR_RED='\033[0;31m' COLOR_GREEN='\033[0;32m' COLOR_BLUE='\033[0;34m' COLOR_NONE='\033[0m' function exclaim () { echo "*******************************************************************************" echo -e "$@" echo "*******************************************************************************" } function pkg_install () { echo Installing $@... sudo -E $PKG_INSTALL_OPTS $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS $PKG_INSTALL_ARG $@ } function pkg_update () { echo Updating $@... sudo -E $PKG_INSTALL_OPTS $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS $PKG_UPDATE_ARG $@ } function set_http_proxy() { if [ ! "${http_proxy}" = '' ]; then HTTP_PROXY="http_proxy=$http_proxy https_proxy=$https_proxy" fi } function get_ip_for_device() { /sbin/ifconfig $1 | awk '/inet addr/{gsub(/addr:/,"");print $2}' } function ip_chunk() { # Given 1-4 returns a bit of where the ip range starts. # Full IP= `ip_chunk 1`.`ip_chunk 2`.`ip_chunk 3`.`ip_chunk 4` get_ip_for_device $1 | cut -d. -f$2 } function dump_env() { # Print out the environment for debug purposes if [[ -n ${TROVESTACK_DUMP_ENV} ]]; then set +e exclaim "Dumping configuration, starting with env vars:" env | sort CLOUDS_YAML=${CLOUDS_YAML:-/etc/openstack/clouds.yaml} for filename in "${TEST_CONF}" "${CLOUDS_YAML}" "${TROVE_CONF}" "${PATH_DEVSTACK_SRC}/${LOCALRC}" "${PATH_DEVSTACK_SRC}/${LOCALRC_AUTO}"; do if [[ -f ${filename} ]]; then exclaim "Dumping contents of '${filename}':" cat ${filename} else exclaim "File '${filename}' not found" fi done exclaim "Dumping pip modules:" pip freeze | sort exclaim "Dumping configuration completed" set -e fi } # Add a flavor and a corresponding flavor.resize # (flavor.resize adds 16 to the memory and one more vcpu) function add_flavor() { local FLAVOR_NAME=$1 local FLAVOR_ID=$2 local FLAVOR_MEMORY_MB=$3 local FLAVOR_ROOT_GB=$4 local FLAVOR_VCPUS=$5 local FLAVOR_SKIP_RESIZE=${6:-""} if [[ -z "$FLAVOR_LIST_FOR_ADD" ]]; then FLAVOR_LIST_FOR_ADD=$(openstack $CLOUD_ADMIN_ARG flavor list | cut -d'|' -f3 | sed -e's/ /,/g') fi base_id=${FLAVOR_ID} base_name_prefix=test ephemeral_name_prefix=${base_name_prefix}.eph for name_prefix in $base_name_prefix $ephemeral_name_prefix; do reg_name=${name_prefix}.${FLAVOR_NAME}-${FLAVOR_ROOT_GB} resize_name=${reg_name}.resize ephemeral=0 if [[ $name_prefix == $ephemeral_name_prefix ]]; then ephemeral=1 fi for name in ${reg_name} ${resize_name}; do id=$base_id memory=${FLAVOR_MEMORY_MB} vcpus=${FLAVOR_VCPUS} if [[ $ephemeral != 0 ]]; then id=${id}e fi if [[ $name == ${resize_name} ]]; then id=${id}r memory=$((${FLAVOR_MEMORY_MB} + 16)) vcpus=$((${FLAVOR_VCPUS} + 1)) fi if [[ $FLAVOR_LIST_FOR_ADD != *",$name,"* ]]; then if [[ -z ${FLAVOR_SKIP_RESIZE} || ${name} == ${reg_name} ]]; then openstack $CLOUD_ADMIN_ARG flavor create $name --id $id --ram $memory --disk $FLAVOR_ROOT_GB --vcpus $vcpus --ephemeral $ephemeral fi fi done done } function get_attribute_id() { openstack --os-cloud=devstack-admin $1 list | grep " $2" | get_field $3 } ############################################################################### # Install all the required dependencies ############################################################################### function install_prep_packages() { # Called before devstack exclaim 'Updating dependencies (part 1a)...' pkg_update exclaim 'Installing dependencies (part 1b)...' pkg_install python-pip if is_fedora; then pkg_install git gettext else #pkg_install git-core kvm-ipxe gettext pkg_install git-core gettext fi sudo -H $HTTP_PROXY pip install --upgrade pip dib-utils } function install_devstack_code() { exclaim "Installing devstack..." # Installs devstack (if needed). if [ ! -d $PATH_DEVSTACK_SRC ]; then echo "DevStack not in a shared folder, cloning from git." mkdir -p $PATH_DEVSTACK_SRC git clone $DEVSTACK_REPO $PATH_DEVSTACK_SRC fi source $PATH_DEVSTACK_SRC/functions-common source $PATH_DEVSTACK_SRC/functions # Switch to a branch if specified. The order the variables are checked is: # DEVSTACK_BRANCH then PROJECT_BRANCH BRANCH_SPECIFIED=$(test -z "${DEVSTACK_BRANCH}${PROJECT_BRANCH}" || echo 'True') if [[ "${BRANCH_SPECIFIED}" = "True" ]]; then PROJ_BRANCH=$(get_project_branch DEVSTACK_BRANCH $PROJECT_BRANCH) ENV_VARS="DEVSTACK_BRANCH' or 'PROJECT_BRANCH" git_checkout "devstack" "$PATH_DEVSTACK_SRC" "$PROJ_BRANCH" "$ENV_VARS" fi exclaim "Installing devstack projects..." # Ensures present user can get to the devstack dirs sudo mkdir -p $PATH_DEVSTACK_OUTPUT if [ ! -w $PATH_DEVSTACK_OUTPUT ]; then sudo chown `whoami` $PATH_DEVSTACK_OUTPUT fi # Clones all of the code to where devstack expects it to be pushd $PATH_DEVSTACK_OUTPUT cmd_clone_projects do_not_force_update $TROVESTACK_SCRIPTS/projects-list \ $TROVESTACK_SCRIPTS/image-projects-list popd } function install_reviews_on_top_of_devstack() { exclaim "Putting gerrit review code on top of the existing devstack code" run_review_for nova $PATH_NOVA $REVIEW_NOVA run_review_for python-novaclient $PATH_PYTHON_NOVACLIENT $REVIEW_PYTHON_NOVACLIENT run_review_for keystone $PATH_KEYSTONE $REVIEW_KEYSTONE run_review_for python-keystoneclient $PATH_KEYSTONECLIENT $REVIEW_PYTHON_KEYSTONECLIENT run_review_for python-openstackclient $PATH_OPENSTACKCLIENT $REVIEW_PYTHON_OPENSTACKCLIENT run_review_for glance $PATH_GLANCE $REVIEW_GLANCE run_review_for swift $PATH_SWIFT $REVIEW_SWIFT run_review_for python-swiftclient $PATH_PYTHON_SWIFTCLIENT $REVIEW_PYTHON_SWIFTCLIENT run_review_for trove $PATH_TROVE $REVIEW_TROVE run_review_for python-troveclient $PATH_PYTHON_TROVECLIENT $REVIEW_PYTHON_TROVECLIENT } function run_review_for() { # Splits based on colon in the REVIEW_ARG and pulls from GIT_NAME=$1 PATH_ARG=$2 REVIEW_ARG=$3 for review in `echo $REVIEW_ARG| tr ":" "\n"` do # This should be the ref spec for what we pull pushd $PATH_ARG git_timed pull https://review.opendev.org/p/openstack/$GIT_NAME refs/changes/$review popd done } function fixup_broken_devstack() { # Nothing to do here, devstack is working : } # Delete all the lines from FILE_NAME between START_TAG and END_TAG # Tags must appear at the beginning of a line function clear_file_lines() { local FILE_NAME=$1 local START_TAG=$2 local END_TAG=$3 sed -i "/^$START_TAG$/,/^$END_TAG$/{/^$START_TAG/!{/^$END_TAG/!d;}}" "$FILE_NAME" } # Checks to see if a variable with the same name as FILE_NAME exists. # Returns 'true' if no varable exists or if the value of the variable # is set to 'true' - returns the VAR_NAME to set otherwise. # FILE_NAME is first converted to uppercase, the extension is removed # and all remaining '.' and spaces are replaced with '_' function check_filename_var() { local FILE_NAME=$1 DEREF_VALUE=false if [ -f "$FILE_NAME" ]; then VAR_NAME=$(basename "$FILE_NAME" ".rc" | tr '[:lower:][:blank:][:punct:]' '[:upper:]__') DEREF_VALUE=$(get_bool "$VAR_NAME" "true") if [ "$DEREF_VALUE" != "true" ]; then DEREF_VALUE=$VAR_NAME fi fi echo "$DEREF_VALUE" } # Add the contents of one file to another, after the given tag # Run through 'eval' if PARSE_FILE is true (defaults to true) # Start with a blank line if BLANK_LINE_TO_START is true (defaults to false) function add_file_contents() { local FILE_NAME=$1 local FILE_TO_ADD=$2 local TAG=$3 local PARSE_FILE=${4:-true} local BLANK_LINE_TO_START=${5:-false} TEMP_FILE=".trovestack.$$" rm -f "$TEMP_FILE" if [ "$BLANK_LINE_TO_START" = "true" ]; then echo "" > "$TEMP_FILE" fi if [ -f "$FILE_TO_ADD" ]; then echo "Adding $FILE_TO_ADD to $FILE_NAME" echo "# Contents from $FILE_TO_ADD" >> "$TEMP_FILE" if [ "$PARSE_FILE" = "true" ]; then eval echo "\"$(cat "$FILE_TO_ADD")\"" >> "$TEMP_FILE" else cat "$FILE_TO_ADD" >> "$TEMP_FILE" fi echo "# End Of Contents from $FILE_TO_ADD" >> "$TEMP_FILE" fi echo "" >> "$TEMP_FILE" sed -i "/^$TAG/r $TEMP_FILE" "$FILE_NAME" rm -f "$TEMP_FILE" } function run_devstack() { exclaim "Running devstack..." # (Re)Creating this lock directory seems sure-fire. rm -rf "$USERHOME/nova_locks" mkdir -p "$USERHOME/nova_locks" TROVE_PRESENT_TAG="# generated-by-trovestack" LOCAL_CONF_D=local.conf.d CONF_MATCH="*.rc" MARKER_TOKEN="#####" USER_LOCAL_CONF=$(readlink -f "${USER_LOCAL_CONF:-$USERHOME/$USER_LOCAL_CONF_NAME}") LOCALRC_OPTS_TAG="$MARKER_TOKEN Trovestack Localrc Options $MARKER_TOKEN" LOCALRC_OPTS_TAG_END="$MARKER_TOKEN End Of Trovestack Localrc Options $MARKER_TOKEN" USER_OPTS_TAG="$MARKER_TOKEN User Specified Options $MARKER_TOKEN" USER_OPTS_TAG_END="$MARKER_TOKEN End Of User Specified Options $MARKER_TOKEN" ADD_OPTS_TAG="$MARKER_TOKEN Additional Options $MARKER_TOKEN" ADD_OPTS_TAG_END="$MARKER_TOKEN End Of Additional Options $MARKER_TOKEN" pushd "$PATH_DEVSTACK_SRC" DEVSTACK_LOCAL_CONF=$LOCAL_CONF # remain backwards compatible with existing localrc files if [ -f "$LOCALRC" ]; then DEVSTACK_LOCAL_CONF=$LOCALRC echo "Old-style devstack config file $PATH_DEVSTACK_SRC/$DEVSTACK_LOCAL_CONF found." echo "Consider removing to generate the preferred-sytle config file $LOCAL_CONF." fi if [ -f "$DEVSTACK_LOCAL_CONF" ]; then # Check if we have already configured the devstack config file already_in_conf=$(grep "$TROVE_PRESENT_TAG" "$DEVSTACK_LOCAL_CONF" | wc -l) if [ "$already_in_conf" == 0 ]; then # We can no longer append to an existing old-style localrc file if [ "$DEVSTACK_LOCAL_CONF" == "$LOCALRC" ]; then echo "The devstack config file $PATH_DEVSTACK_SRC/$DEVSTACK_LOCAL_CONF is too old to append to." echo "Please remove and try again." exit 1 fi # Otherwise append the trovestack version to the existing file eval echo "\"$(cat "$TROVESTACK_SCRIPTS/$DEFAULT_LOCALRC")\"" >> "$DEVSTACK_LOCAL_CONF" fi else # If a devstack config file doesn't exist, create it eval echo "\"$(cat "$TROVESTACK_SCRIPTS/$DEFAULT_LOCAL_CONF")\"" > "$DEVSTACK_LOCAL_CONF" fi # We can only replace sections from the LOCAL_CONF style files if [ "$DEVSTACK_LOCAL_CONF" == "$LOCAL_CONF" ]; then # Clear out all the options clear_file_lines "$DEVSTACK_LOCAL_CONF" "$LOCALRC_OPTS_TAG" "$LOCALRC_OPTS_TAG_END" clear_file_lines "$DEVSTACK_LOCAL_CONF" "$USER_OPTS_TAG" "$USER_OPTS_TAG_END" clear_file_lines "$DEVSTACK_LOCAL_CONF" "$ADD_OPTS_TAG" "$ADD_OPTS_TAG_END" # Add the main localrc file PARSE_FILE="true" BLANK_LINE_TO_START="true" if [ -f "$TROVESTACK_SCRIPTS/$DEFAULT_LOCALRC" ]; then add_file_contents "$DEVSTACK_LOCAL_CONF" "$TROVESTACK_SCRIPTS/$DEFAULT_LOCALRC" "$LOCALRC_OPTS_TAG" "$PARSE_FILE" "$BLANK_LINE_TO_START" fi # Add any user options PARSE_FILE="false" BLANK_LINE_TO_START="true" if [ -f "$USER_LOCAL_CONF" ]; then add_file_contents "$DEVSTACK_LOCAL_CONF" "$USER_LOCAL_CONF" "$USER_OPTS_TAG" "$PARSE_FILE" "$BLANK_LINE_TO_START" fi # Add all the files in the LOCAL_CONF_D directory that match CONF_MATCH (except for sample files) # and that aren't excluded. Files are excluded by having a variable # 'FILENAME_IN_UPPERCASE_MINUS_RC=false' in trovestack.rc # For Example: USING_VAGRANT=false (for the using_vagrant.rc file). PARSE_FILE="true" BLANK_LINE_TO_START="false" while IFS= read -r -d '' CONF_FILE do FILE_NAME_VAR=$(check_filename_var "$CONF_FILE") if [ "$FILE_NAME_VAR" = "true" ]; then add_file_contents "$DEVSTACK_LOCAL_CONF" "$CONF_FILE" "$ADD_OPTS_TAG" "$PARSE_FILE" "$BLANK_LINE_TO_START" else echo "Skipping $CONF_FILE" echo "Use $FILE_NAME_VAR=true to include" fi done < <(find "$TROVESTACK_SCRIPTS/${LOCAL_CONF_D}" -name "${CONF_MATCH}" -follow -not -name "sample*.rc" -type f -print0) # this is to add a blank line for readability add_file_contents "$DEVSTACK_LOCAL_CONF" "" "$ADD_OPTS_TAG" fi ./stack.sh popd } function cmd_install() { sudo mkdir -p $TROVE_LOGDIR # Creates TROVE_LOGDIR if it does not exist if [ ! -w $TROVE_LOGDIR ]; then sudo chown `whoami` $TROVE_LOGDIR fi install_prep_packages install_devstack_code install_reviews_on_top_of_devstack fixup_broken_devstack run_devstack exclaim "${COLOR_GREEN}FINISHED INSTALL${COLOR_NONE}" } ############################################################################### # Build the image # see functions_qemu ############################################################################### # Grab a numbered field from python prettytable output # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. # get_field field-number function get_field() { while read data; do if [ "$1" -lt 0 ]; then field="(\$(NF$1))" else field="\$$(($1 + 1))" fi echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" done } function set_bin_path() { if is_fedora; then sed -i "s|%bin_path%|/usr/bin|g" $TEST_CONF else sed -i "s|%bin_path%|/usr/local/bin|g" $TEST_CONF fi } function cmd_set_datastore() { local IMAGEID=$1 local DATASTORE_TYPE=$2 # rd_manage datastore_update rd_manage datastore_update "$DATASTORE_TYPE" "" PACKAGES=${PACKAGES:-""} if [ "$DATASTORE_TYPE" == "mysql" ]; then VERSION="5.7" elif [ "$DATASTORE_TYPE" == "percona" ]; then PACKAGES=${PACKAGES:-"percona-server-server-5.6"} VERSION="5.6" elif [ "$DATASTORE_TYPE" == "pxc" ]; then PACKAGES=${PACKAGES:-"percona-xtradb-cluster-server-5.6"} VERSION="5.6" elif [ "$DATASTORE_TYPE" == "mariadb" ]; then VERSION="10.4" elif [ "$DATASTORE_TYPE" == "mongodb" ]; then PACKAGES=${PACKAGES:-"mongodb-org"} VERSION="3.2" elif [ "$DATASTORE_TYPE" == "redis" ]; then PACKAGES=${PACKAGES:-""} VERSION="3.2.6" elif [ "$DATASTORE_TYPE" == "cassandra" ]; then PACKAGES=${PACKAGES:-"cassandra"} VERSION="2.1.0" elif [ "$DATASTORE_TYPE" == "couchbase" ]; then PACKAGES=${PACKAGES:-"couchbase-server"} VERSION="2.2.0" elif [ "$DATASTORE_TYPE" == "postgresql" ]; then VERSION="9.6" elif [ "$DATASTORE_TYPE" == "couchdb" ]; then PACKAGES=${PACKAGES:-"couchdb"} VERSION="1.6.1" elif [ "$DATASTORE_TYPE" == "vertica" ]; then PACKAGES=${PACKAGES:-"vertica"} VERSION="9.0.1" elif [ "$DATASTORE_TYPE" == "db2" ]; then PACKAGES=${PACKAGES:-""} VERSION="11.1" else echo "Unrecognized datastore type. ($DATASTORE_TYPE)" exit 1 fi # trove-manage datastore_version_update rd_manage datastore_version_update "$DATASTORE_TYPE" "$VERSION" "$DATASTORE_TYPE" $IMAGEID "$PACKAGES" 1 rd_manage datastore_update "$DATASTORE_TYPE" "$VERSION" if [ -f "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json ]; then # add the configuration parameters to the database for the kick-start datastore rd_manage db_load_datastore_config_parameters "$DATASTORE_TYPE" "$VERSION" "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json fi } ############################################################################### # Run Unit Tests ############################################################################### function cmd_unit_tests() { exclaim "Running Trove Unit Tests..." $PATH_TROVE/run_tests.sh -N } ############################################################################### # Start various OpenStack daemons interactively in a screen session ############################################################################### function cmd_start_deps() { if ! sudo vgs $VOLUME_GROUP; then exclaim "Reconnecting Volume Group to Backing File" sudo losetup -f --show ${VOLUME_BACKING_FILE} fi if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then exclaim "Re-mounting Swift Disk Image" sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 || true fi if [[ -e $PATH_DEVSTACK_SRC/stack-screenrc ]]; then screen -dmS stack -c $PATH_DEVSTACK_SRC/stack-screenrc fi } function cmd_stop_deps() { if screen -ls | grep -q stack; then screen -S stack -X quit rm -f $DEST/status/stack/* fi } ############################################################################### # Initialize Trove ############################################################################### function rd_manage() { pushd $PATH_TROVE $TROVE_BIN_DIR/trove-manage --config-file=$TROVE_CONF "$@" popd } function install_test_packages() { DATASTORE_TYPE=$1 sudo -H $HTTP_PROXY pip install openstack.nose_plugin proboscis pexpect if [ "$DATASTORE_TYPE" = "couchbase" ]; then if [ "$DISTRO" == "ubuntu" ]; then # Install Couchbase SDK for scenario tests. sudo -H $HTTP_PROXY curl http://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add - echo "deb http://packages.couchbase.com/ubuntu trusty trusty/main" | sudo tee /etc/apt/sources.list.d/couchbase-csdk.list sudo -H $HTTP_PROXY apt-get update sudo -H $HTTP_PROXY apt-get --allow-unauthenticated -y install libcouchbase-dev sudo -H $HTTP_PROXY pip install --upgrade couchbase fi fi } function mod_confs() { local DATASTORE_TYPE=$1 local DATASTORE_VERSION=$2 exclaim "Running mod_confs ..." sudo install -b --mode 0664 $TROVESTACK_SCRIPTS/conf/test_begin.conf $TEST_CONF # cmd_dsvm_gate_tests will set this to be $HOME/report TROVE_REPORT_DIR=${TROVE_REPORT_DIR:=$TROVESTACK_SCRIPTS/../report/} EXTRA_CONF=$TROVESTACK_SCRIPTS/conf/test.extra.conf if [ -e $EXTRA_CONF ]; then cat $EXTRA_CONF >> $TEST_CONF fi # Append datastore specific configuration file DATASTORE_CONF=$TROVESTACK_SCRIPTS/conf/$DATASTORE_TYPE.conf if [ ! -f $DATASTORE_CONF ]; then exclaim "Datastore configuration file ${DATASTORE_CONF} not found" exit 1 fi cat $DATASTORE_CONF | sudo tee -a $TEST_CONF > /dev/null cat $TROVESTACK_SCRIPTS/conf/test_end.conf | sudo tee -a $TEST_CONF > /dev/null #Add the paths to the test conf sed -i "s,%report_directory%,$TROVE_REPORT_DIR,g" $TEST_CONF sed -i "s,%service_host%,$SERVICE_HOST,g" $TEST_CONF # Add the region name into test.conf sed -i "s/%region_name%/${REGION_NAME}/g" $TEST_CONF # Add the tenant id's into test.conf sed -i "s/%service_tenant_id%/$(get_attribute_id project service 1)/g" $TEST_CONF sed -i "s/%alt_demo_tenant_id%/$(get_attribute_id project alt_demo 1)/g" $TEST_CONF sed -i "s/%demo_tenant_id%/$(get_attribute_id project demo 1)/g" $TEST_CONF sed -i "s/%admin_password%/$ADMIN_PASSWORD/g" $TEST_CONF sed -i "s/%service_password%/$SERVICE_PASSWORD/g" $TEST_CONF # Enable neutron tests if needed sed -i "s/%neutron_enabled%/$ENABLE_NEUTRON/g" $TEST_CONF # Enable backup related tests if Swift is enabled sed -i "s/%swift_enabled%/$ENABLE_SWIFT/g" $TEST_CONF # If neutron is enabled, the devstack plugin has already set up the shared # private network for testing. if [[ $ENABLE_NEUTRON = true ]]; then TROVE_NET_ID=$(openstack $CLOUD_ADMIN_ARG network list | grep " $TROVE_PRIVATE_NETWORK_NAME " | awk '{print $2}') TROVE_SUBNET_ID=$(openstack $CLOUD_ADMIN_ARG subnet list | grep " $TROVE_PRIVATE_SUBNET_NAME " | awk '{print $2}') echo "Using network ${TROVE_PRIVATE_NETWORK_NAME} (${TROVE_NET_ID}): ${TROVE_PRIVATE_SUBNET_NAME} (${TROVE_SUBNET_ID})" sed -i "s,%shared_network%,$TROVE_NET_ID,g" $TEST_CONF sed -i "s,%shared_network_subnet%,$TROVE_SUBNET_ID,g" $TEST_CONF else # do not leave invalid keys in the configuration when using Nova for networking sed -i "/%shared_network%/d" $TEST_CONF sed -i "/%shared_network_subnet%/d" $TEST_CONF fi if [ "$DATASTORE_TYPE" = "vertica" ]; then # Vertica needs more time than mysql for its boot/start/stop operations. setup_cluster_configs cluster_member_count 3 elif [ "$DATASTORE_TYPE" = "pxc" ]; then setup_cluster_configs min_cluster_member_count 2 elif [ "$DATASTORE_TYPE" = "cassandra" ]; then setup_cluster_configs cluster_member_count 2 elif [ "$DATASTORE_TYPE" = "mongodb" ]; then setup_cluster_configs cluster_member_count 2 # Decrease the number of required config servers per cluster to save resources. iniset $TROVE_CONF $DATASTORE_TYPE num_config_servers_per_cluster 1 fi sed -i "s/%datastore_type%/$DATASTORE_TYPE/g" $TEST_CONF sed -i "s/%datastore_version%/${DATASTORE_VERSION}/g" $TEST_CONF set_bin_path } function setup_cluster_configs() { # Setting cluster_member_count to 2 to decrease cluster spawn time. iniset $TROVE_CONF $DATASTORE_TYPE $1 $2 } # Add useful flavors for testing (with corresponding *.resize flavors) function add_test_flavors() { # name id ram root_vol vcpu # the ram and vcpu for name.resize are automatically calculated # eph and non-eph flavors are created for each entry add_flavor 'tiny' 10 768 4 1 add_flavor 'small' 15 1024 6 1 add_flavor 'small' 16 1024 7 1 add_flavor 'small' 17 1024 8 1 add_flavor 'medium' 20 1536 7 1 add_flavor 'medium' 21 1536 8 1 add_flavor 'large' 25 2048 8 1 add_flavor 'large' 26 2048 13 1 add_flavor 'large' 27 2048 18 1 # This will allow Nova to create an instance, but not enough disk to boot the image add_flavor 'fault_1' 30 1536 1 1 'skip_resize' # This should be enough memory to cause Nova to fail entirely due to too much allocation add_flavor 'fault_2' 31 131072 7 1 'skip_resize' } function cmd_test_init() { local DATASTORE_TYPE=$1 local DATASTORE_VERSION=$2 if [ -z "${DATASTORE_TYPE}" ]; then exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exit 1 fi exclaim 'Initializing Configuration for Running Tests...' exclaim "Installing python test packages." install_test_packages "${DATASTORE_TYPE}" exclaim "Modifying test.conf and guest.conf with appropriate values." mod_confs "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" exclaim "Creating Test Flavors." add_test_flavors exclaim "Re-installing python-troveclient from git" pip3 uninstall -y python-troveclient pip3 install -U git+https://opendev.org/openstack/python-troveclient@master#egg=python-troveclient } function cmd_build_image() { exclaim "Params for cmd_build_image function: $@" local IMAGE_DATASTORE_TYPE=${1:-'mysql'} local IMAGE_GUEST_OS=${2:-'ubuntu'} local IMAGE_GUEST_RELEASE=${3:-'xenial'} local DEV_MODE=${4:-'true'} local guest_username=${5:-'ubuntu'} local output=$6 if [[ -z "$output" ]]; then image_name="trove-datastore-${IMAGE_GUEST_OS}-${IMAGE_GUEST_RELEASE}-${IMAGE_DATASTORE_TYPE}" image_folder=$HOME/images output="${image_folder}/${image_name}" fi # Always rebuild the image. sudo rm -f $output sudo mkdir -p $(dirname $output); sudo chmod 777 -R $(dirname $output) echo "Ensuring we have all packages needed to build image." sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS update sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS install qemu git kpartx debootstrap sudo -H $HTTP_PROXY pip install diskimage-builder exclaim "Use diskimage-builder to actually build the Trove Guest Agent Image." build_guest_image $IMAGE_DATASTORE_TYPE $IMAGE_GUEST_OS $IMAGE_GUEST_RELEASE $DEV_MODE ${guest_username} $output } # Build guest image and upload to Glance, register the datastore and configuration parameters. # We could skip the image build and upload by: # 1. MYSQL_IMAGE_ID is passed, or # 2. There is an image in Glance contains the datastore name function cmd_build_and_upload_image() { local datastore_type=$1 local guest_os=${2:-"ubuntu"} local guest_release=${3:-"xenial"} local dev_mode=${4:-"true"} local guest_username=${5:-"ubuntu"} local output_dir=${6:-"$HOME/images"} if [ -z "${datastore_type}" ]; then exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exit 1 fi image_var="${datastore_type^^}_IMAGE_ID" glance_imageid=`eval echo '$'"$image_var"` if [[ -z $glance_imageid ]]; then # Find the first image id with the name contains datastore_type. glance_imageid=$(openstack $CLOUD_ADMIN_ARG image list | grep "$datastore_type" | awk 'NR==1 {print}' | awk '{print $2}') if [[ -z $glance_imageid ]]; then mkdir -p ${output_dir} name=trove-datastore-${guest_os}-${guest_release}-${datastore_type} output=${output_dir}/$name.qcow2 cmd_build_image ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} $output glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image create $name --public --disk-format qcow2 --container-format bare --file $output --property hw_rng_model='virtio' -c id -f value) [[ -z "$glance_imageid" ]] && echo "Glance upload failed!" && exit 1 fi fi exclaim "Using Glance image ID: $glance_imageid" exclaim "Updating Datastores" cmd_set_datastore "${glance_imageid}" "${datastore_type}" } function cmd_initialize() { exclaim '(Re)Initializing Trove...' pushd $PATH_DEVSTACK_SRC ./unstack.sh ./stack.sh popd } ############################################################################### # Start Trove specific daemons interactively in a screen session ############################################################################### function tr_screen_it { if screen -ls | grep -q stack; then echo "Starting $@..." screen -S stack -p $1 -X stuff "$2"$'\015' fi } function init_fake_mode() { # Create a test conf which, unlike the conf which runs on a user's machine, # takes advantage of the running keystone service we have in our VM. # You could think of this fake mode, which runs in the VM as being # slightly less fake than the default one which runs outside of it. CONF_FILE=/tmp/trove.conf.test cp $PATH_TROVE/etc/trove/trove.conf.test $CONF_FILE # Switch keystone from the fake class to the real one. sed -i \ "s/trove.tests.fakes.keystone/keystone.middleware.auth_token/g" \ $CONF_FILE sed -i "s/log_file = rdtest.log//g" $CONF_FILE sed -i "s/use_stderr = False/use_stderr = True/g" $CONF_FILE cd $PATH_TROVE set -e rm -f trove_test.sqlite set +e $TROVE_BIN_DIR/trove-manage --config-file=$CONF_FILE db_sync sqlite3 trove_test.sqlite \ "INSERT INTO datastores VALUES ('a00000a0-00a0-0a00-00a0-000a000000aa', \ 'mysql', 'b00000b0-00b0-0b00-00b0-000b000000bb'); \ INSERT INTO datastores values ('e00000e0-00e0-0e00-00e0-000e000000ee', \ 'Test_Datastore_1', ''); \ INSERT INTO datastore_versions VALUES ('b00000b0-00b0-0b00-00b0-000b000000bb', \ 'a00000a0-00a0-0a00-00a0-000a000000aa', $MYSQL_VER, \ 'c00000c0-00c0-0c00-00c0-000c000000cc', $MYSQL_PKG, 1, 'mysql'); \ INSERT INTO datastore_versions VALUES ('d00000d0-00d0-0d00-00d0-000d000000dd', \ 'a00000a0-00a0-0a00-00a0-000a000000aa', 'inactive_version', \ '', '', 0, 'manager1'); \ INSERT INTO datastore_configuration_parameters VALUES \ ('00000000-0000-0000-0000-000000000001', \ 'key_buffer_size', 'b00000b0-00b0-0b00-00b0-000b000000bb', \ 0, 4294967296, 0, 'integer', 0, NULL); \ INSERT INTO datastore_configuration_parameters VALUES \ ('00000000-0000-0000-0000-000000000002', \ 'connect_timeout', 'b00000b0-00b0-0b00-00b0-000b000000bb', \ 0, 65535, 1, 'integer', 0, NULL); \ INSERT INTO datastore_configuration_parameters VALUES \ ('00000000-0000-0000-0000-000000000003', \ 'join_buffer_size', 'b00000b0-00b0-0b00-00b0-000b000000bb', \ 0, 4294967296, 0, 'integer', 0, NULL); \ INSERT INTO datastore_configuration_parameters VALUES \ ('00000000-0000-0000-0000-000000000004', \ 'local_infile', 'b00000b0-00b0-0b00-00b0-000b000000bb', \ 0, 1, 0, 'integer', 0, NULL); \ INSERT INTO datastore_configuration_parameters VALUES \ ('00000000-0000-0000-0000-000000000005', \ 'collation_server', 'b00000b0-00b0-0b00-00b0-000b000000bb', \ 0, NULL, NULL, 'string', 0, NULL); \ " } function cmd_start() { TOP_DIR=$PATH_DEVSTACK_SRC source ${TOP_DIR}/stackrc if screen -ls | grep -q stack; then USE_SCREEN=True LOGDIR=$TROVE_LOGDIR RUNNING=$(screen -S stack -Q windows) if [[ "$RUNNING" =~ " tr-" ]]; then exclaim "${COLOR_RED}WARNING: Trove services appear to be running. Please run 'stop' or 'restart'${COLOR_NONE}" else source "$TROVE_DEVSTACK_SETTINGS" source /dev/stdin < <(sed -n '/^function start_trove\(\)/,/^}/p' "$TROVE_DEVSTACK_PLUGIN") start_trove fi else source "$TROVE_DEVSTACK_SETTINGS" source /dev/stdin < <(sed -n '/^function start_trove\(\)/,/^}/p' "$TROVE_DEVSTACK_PLUGIN") start_trove fi } function cmd_start_fake() { init_fake_mode CONF_FILE=/tmp/trove.conf.test tr_screen_it tr-fake "cd $PATH_TROVE; $TROVE_BIN_DIR/trove-fake-mode --config-file=$CONF_FILE $@" } function cmd_run() { cd $PATH_TROVE; $TROVE_BIN_DIR/trove-api \ --config-file=$TROVE_CONF $@ } function cmd_run_fake() { init_fake_mode CONF_FILE=/tmp/trove.conf.test $TROVE_BIN_DIR/trove-fake-mode --config-file=$CONF_FILE $@ } ############################################################################### # Stop any active Trove screen session ############################################################################### function cmd_stop() { TOP_DIR=$PATH_DEVSTACK_SRC source ${TOP_DIR}/stackrc if screen -ls | grep -q stack; then rm -f $DEST/status/stack/tr-* USE_SCREEN=True source "$TROVE_DEVSTACK_SETTINGS" source /dev/stdin < <(sed -n '/^function stop_trove\(\)/,/^}/p' "$TROVE_DEVSTACK_PLUGIN") MAX_RETRY=5 COUNT=1 while true; do RUNNING=$(screen -S stack -Q windows) if [[ "$RUNNING" =~ " tr-" ]]; then stop_trove else break fi ((COUNT++)) if [ "$COUNT" -gt "$MAX_RETRY" ]; then exclaim "${COLOR_RED}WARNING: Could not stop Trove services after ${MAX_RETRY} attempts${COLOR_NONE}" break fi done else source "$TROVE_DEVSTACK_SETTINGS" source /dev/stdin < <(sed -n '/^function stop_trove\(\)/,/^}/p' "$TROVE_DEVSTACK_PLUGIN") stop_trove fi } ############################################################################### # Run Integration Tests ############################################################################### function cmd_int_tests() { exclaim "Running Trove Integration Tests..." if [ ! $USAGE_ENDPOINT ]; then export USAGE_ENDPOINT=trove.tests.util.usage.FakeVerifier fi cd $TROVESTACK_SCRIPTS if [ $# -lt 1 ]; then args="--group=mysql" else args="$@" fi # Referenced in test script export TROVE_TEST_SSH_USER=${TROVE_TEST_SSH_USER:-"ubuntu"} export TROVE_TEST_SSH_KEY_FILE=${TROVE_TEST_SSH_KEY_FILE:-"$HOME/.ssh/id_rsa"} dump_env # -- verbose makes it prettier. # -- logging-clear-handlers keeps the novaclient and other things from # spewing logs to stdout. args="$INT_TEST_OPTIONS -B $TROVESTACK_TESTS/integration/int_tests.py --verbose --logging-clear-handlers $args" echo "Running: python $args" python $args } function cmd_int_tests_white_box() { export PYTHONPATH=$PYTHONPATH:$PATH_TROVE export PYTHONPATH=$PYTHONPATH:$PATH_NOVA cmd_int_tests --test-config white_box=True \ --config-file=$TROVE_CONF \ --nova-flags=/etc/nova/nova.conf $@ } ############################################################################### # Misc. tools ############################################################################### function mysql_nova() { echo mysql nova --execute "$@" mysql -u root -p$MYSQL_PASSWORD nova --execute "$@" 2> /dev/null } function mysql_trove() { echo mysql trove --execute "$@" mysql -u root -p$MYSQL_PASSWORD trove --execute "$@" 2> /dev/null } function cmd_wipe_logs() { for file in `ls $TROVE_LOGDIR/*.log` do echo "Reseting log file $file..." echo "Reset at `date`" > $file done } function cmd_rd_sql() { mysql -u root -p$MYSQL_PASSWORD trove } function cmd_fake_sql() { pushd $PATH_TROVE sqlite3 trove_test.sqlite $@ popd } function cmd_vagrant_ssh() { # Runs a command on a vagrant VM from the host machine. VHOST=`vagrant ssh_config host | awk '/HostName/{print $2}'` VUSER=`vagrant ssh_config host | awk '/User /{print $2}'` VPORT=`vagrant ssh_config host | awk '/Port/{print $2}'` VIDFILE=`vagrant ssh_config host | awk '/IdentityFile/{print $2}'` echo ssh ${VUSER}@${VHOST} -p ${VPORT} -i ${VIDFILE} -o NoHostAuthenticationForLocalhost=yes "$@" ssh ${VUSER}@${VHOST} -p ${VPORT} -i ${VIDFILE} -o NoHostAuthenticationForLocalhost=yes "$@" } function cmd_run_ci() { local DATASTORE_TYPE=$1 local RESTART_TROVE=${2:-$(get_bool RESTART_TROVE "true")} if [ -z "${DATASTORE_TYPE}" ]; then exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exit 1 fi exclaim "Running CI suite..." set +e cmd_stop_deps cmd_stop set -e cmd_install cmd_test_init "${DATASTORE_TYPE}" cmd_build_and_upload_image "${DATASTORE_TYPE}" "${RESTART_TROVE}" # Test in fake mode. exclaim "Testing in fake mode." cmd_start_fake FAKE_MODE=True cmd_int_tests cmd_stop # Test in real mode. exclaim "Testing in real mode." cmd_start FAKE_MODE=False cmd_int_tests } function cmd_wipe_queues() { # Obliterate rabbit. for i in stop_app reset start_app "change_password guest $RABBIT_PASSWORD"; \ do sudo rabbitmqctl $i; done } function cmd_clear() { cmd_int_tests --group=dbaas.api.instances.delete clean_instances mysql_nova "DELETE FROM instance_info_caches;" mysql_nova "DELETE FROM instances;" mysql_trove "DELETE FROM instances;" mysql_trove "DELETE FROM service_statuses;" cmd_wipe_queues } function exec_cmd_on_output() { local output_cmd=$1 local exec_cmd=$2 local delete_sleep_time=${3:-0} local skip_pattern=${4:-""} echo "Cleaning up objects from '${output_cmd}'" local skip_cmd="cat" if [[ -n "${skip_pattern}" ]]; then local temp_skip_cmd=(grep -v "${skip_pattern}") skip_cmd=${temp_skip_cmd[*]} fi local max_retry=10 local count=1 local again= while true; do ids=$($output_cmd | ${skip_cmd} | grep -v -e'---' | grep -iv ' id ' | cut -d'|' -f2) if [[ -n $ids ]]; then for id in $ids; do echo -e "Executing: ${exec_cmd} ${id} ${again}" # don't stop if we get an error executing the delete, and don't print # out anything from stderr set +e ${exec_cmd} "${id}" &> /dev/null set -e done sleep "${delete_sleep_time}" else break fi ((count++)) if [[ "$count" -gt "$max_retry" ]]; then exclaim "${COLOR_RED}WARNING: '$output_cmd' still returning output after ${max_retry} delete attempts${COLOR_NONE}" break fi again="${COLOR_BLUE}(again)${COLOR_NONE}" done } function cmd_clean() { local project_name=${1:-alt_demo} exclaim "Cleaning up project '${COLOR_BLUE}${project_name}${COLOR_NONE}'" # reset any stuck backups mysql_trove "update backups set state='COMPLETED'" # clear out any DS version metadata mysql_trove "delete from datastore_version_metadata" # reset any stuck instances, and clear all replicas mysql_trove "update instances set task_id=2, slave_of_id=null" # reset any stuck clusters mysql_trove "update clusters set task_id=1" # get rid of any extraneous quota usage mysql_trove "delete from quota_usages" # mark all instance modules as deleted mysql_trove "update instance_modules set deleted=1" if [[ ! -f "${PATH_DEVSTACK_SRC}"/accrc/${project_name}/admin ]]; then echo "Could not find credentials file for project '${project_name}'" exit 1 fi source "${PATH_DEVSTACK_SRC}"/accrc/${project_name}/admin local cloud_arg=$CLOUD_ADMIN_ARG if [[ $project_name == *"alt"* ]]; then cloud_arg="--os-cloud=devstack-alt-admin" elif [[ $project_name == "demo" ]]; then cloud_arg="--os-cloud=devstack" fi # delete any trove clusters exec_cmd_on_output "trove cluster-list" "trove cluster-delete" 20 # delete any trove instances exec_cmd_on_output "trove list" "trove delete" 10 # delete any backups exec_cmd_on_output "trove backup-list" "trove backup-delete" # clean up any remaining nova instances or cinder volumes exec_cmd_on_output "openstack $cloud_arg server list" "openstack $cloud_arg server delete" 5 exec_cmd_on_output "openstack $cloud_arg volume list" "openstack $cloud_arg volume delete" 1 # delete any config groups since all instances should be gone now exec_cmd_on_output "trove configuration-list" "trove configuration-delete" # delete any modules too exec_cmd_on_output "trove module-list" "trove module-delete" # make sure that security groups are also gone, except the default exec_cmd_on_output "openstack $cloud_arg security group list" "openstack $cloud_arg security group delete" 0 "default" # delete server groups exec_cmd_on_output "openstack $cloud_arg server group list" "openstack $cloud_arg server group delete" } function cmd_kick_start() { local DATASTORE_TYPE=$1 local DATASTORE_VERSION=$2 if [ -z "${DATASTORE_TYPE}" ]; then exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exit 1 fi exclaim "Running kick-start for $DATASTORE_TYPE" cmd_test_init "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" dump_env } # Start functional test. The guest image should be created and registered in # appropriate datastore before the test, the configuration parameters should # also be loaded as well. DevStack has done all of that. function cmd_gate_tests() { local DATASTORE_TYPE=${1:-'mysql'} local TEST_GROUP=${2:-${DATASTORE_TYPE}} local DATASTORE_VERSION=${3:-'5.7'} local HOST_SCP_USERNAME=${4:-$(whoami)} local GUEST_USERNAME=${5:-'ubuntu'} exclaim "Running cmd_gate_tests ..." export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/gate-tests-report/} export TROVE_REPORT_DIR=$HOME/gate-tests-report/ export TROVESTACK_DUMP_ENV=true export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"} # The user is used to connect with the db instance during testing. export TROVE_TEST_SSH_USER=${TROVE_TEST_SSH_USER:-"ubuntu"} # This var is used to ssh into the db instance during testing. export TROVE_TEST_SSH_KEY_FILE=${SSH_DIR}/id_rsa cd $TROVESTACK_SCRIPTS # Build and upload guest image, register datastore version. cmd_build_and_upload_image ${DATASTORE_TYPE} cmd_kick_start "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" cmd_int_tests --group=$TEST_GROUP } function cmd_reset_task() { mysql_trove "UPDATE instances SET task_id=1 WHERE id='$1'" } function cmd_clone_projects() { local UPDATE_PROJECTS=$1 local PROJECT_LIST_FILES=${@:2} for project in $(cat $PROJECT_LIST_FILES); do if [ ! -d $PATH_DEVSTACK_OUTPUT/$project ]; then echo "Creating a new clone of $project..." git_clone $GIT_OPENSTACK/"$project".git ${PATH_DEVSTACK_OUTPUT}/$project master else if [ $UPDATE_PROJECTS != "force_update" ]; then echo "$project was already cloned or exists in a shared folder. Ignoring..." else echo "$project was already cloned. Pulling changes to update." cd $PATH_DEVSTACK_OUTPUT/$project git pull fi fi # Switch to a branch if specified. The order the variables are checked is: # _BRANCH then PROJECT_CLIENT_BRANCH (if a client) then PROJECT_BRANCH # Note: For the Trove project, only TROVE_BRANCH and PYTHON_TROVECLIENT_BRANCH are used PROJECT_BRANCH_NAME=$(eval echo "${project}_BRANCH" | tr '[:lower:]-' '[:upper:]_') PROJECT_BRANCH_VALUE=${!PROJECT_BRANCH_NAME} # TROVE_BRANCH is defaulted to master if not set, so use the original value here if [[ "$project" = "trove" ]]; then PROJECT_BRANCH_VALUE=${TROVE_BRANCH_ORIG} fi BRANCH_SPECIFIED=$(test -z "${PROJECT_BRANCH_VALUE}${PROJECT_CLIENT_BRANCH}${PROJECT_BRANCH}" || echo 'True') if [[ "${BRANCH_SPECIFIED}" = "True" ]]; then # Set up the default branch and env var names for the project DEFAULT_BRANCH="$PROJECT_BRANCH" ENV_VARS="$PROJECT_BRANCH_NAME' or 'PROJECT_BRANCH" # Don't use 'PROJECT_BRANCH' or 'PROJECT_CLIENT_BRANCH' for the Trove project if [[ "$project" =~ "trove" ]]; then DEFAULT_BRANCH=master ENV_VARS="$PROJECT_BRANCH_NAME" # Use 'PROJECT_CLIENT_BRANCH' first for clients elif [[ "$project" =~ "client" ]]; then DEFAULT_BRANCH="${PROJECT_CLIENT_BRANCH:-$PROJECT_BRANCH}" ENV_VARS="$PROJECT_BRANCH_NAME' or 'PROJECT_CLIENT_BRANCH' or 'PROJECT_BRANCH" fi PROJ_BRANCH=$(get_project_branch $PROJECT_BRANCH_NAME $DEFAULT_BRANCH) git_checkout "$project" "$PATH_DEVSTACK_OUTPUT/$project" "$PROJ_BRANCH" "$ENV_VARS" fi done } function cmd_repl() { INT_TEST_OPTIONS=-i cmd_int_tests_white_box --repl --group=_does_not_exist_ $@ } ############################################################################### # Process the user provided command and run the appropriate command ############################################################################### # Let's not run this as the root user if [ $EUID -eq 0 ]; then echo "You are running this script as root. You need to run as a regular user" exit 1 fi # Set this to exit immediately on error set -o errexit set_http_proxy function print_usage() { echo "Usage: $0 [command]" echo " Commands : --setup environment-- install - Install all the required dependencies and bring up tr-api and tr-tmgr - devstack config can be altered by using a USER_LOCAL_CONF file which will be copied into devstack/local.conf on each 'install' run (defaults to \$HOME/$USER_LOCAL_CONF_NAME) - Set DEVSTACK_BRANCH to switch the branch/commit of devstack (i.e. 'stable/kilo' or '7ef2462') test-init - Configure the test configuration files and add keystone test users build-image - Builds the vm image for the trove guest initialize - Reinitialize the trove database, users, services, and test config --helper for environment-- kick-start - kick start the setup of trove. (trovestack test-init/build-image in one step) - Set REBUILD_IMAGE=True to force rebuild (won't use cached image) --trove dependency services-- start-deps - Start or resume daemons Trove depends on. stop-deps - Kill daemons Trove depends on. --trove services-- start - Start or resume Trove daemons. stop - Kill Trove daemons. restart - Runs stop then start for Trove services. --tests-- unit-tests - Run the unit tests.dependencies int-tests - Runs the integration tests (requires all daemons). See trove/tests/int_tests.py for list of registered groups. Examples: Run original MySQL tests: ./trovestack int-tests Run all MySQL scenario tests: ./trovestack int-tests --group=mysql-supported Run single Redis scenario tests: ./trovestack int-tests --group=redis-supported-single Run specific functional tests: ./trovestack int-tests --group=module-create --group=configuration-create simple-tests - Runs the simple integration tests (requires all daemons). dsvm-gate-tests - Configures and runs the int-tests in a devstack vm-gate environment(legacy Zuul v2 jobs only). gate-tests - Configures and runs the int-tests in a devstack vm-gate environment. --tools-- debug - Debug this script (shows all commands). wipe-logs - Resets all log files. rd-sql - Opens the Trove MySQL database. vagrant-ssh - Runs a command from the host on the server. clear - Destroy instances and rabbit queues. clean - Clean up resources created by a failed test run. Takes project_name as an optional parameter (defaults to alt_demo). run - Starts RD but not in a screen. run-fake - Runs the server in fake mode. update-projects - Git pull on all the daemons trove dependencies. reset-task - Sets an instance task to NONE. wipe-queues - Resets RabbitMQ queues. " exit 1 } function run_command() { # Print the available commands if [ $# -lt 1 ]; then print_usage fi case "$1" in "build-image" ) shift; cmd_build_image $@;; "upload-image" ) shift; cmd_build_and_upload_image $@;; "int-tests" ) shift; cmd_int_tests $@;; "debug" ) shift; echo "Enabling debugging."; \ set -o xtrace; TROVESTACK_DUMP_ENV=true; run_command $@;; "gate-tests" ) shift; cmd_gate_tests $@;; "wipe-queues" ) shift; cmd_wipe_queues $@;; * ) echo "'$1' not a valid command" exit 1 esac } run_command $@ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/scripts/trovestack.rc0000644000175000017500000001043100000000000022673 0ustar00coreycorey00000000000000# Paths inside the VM. [ -z $SERVICE_HOST ] && SERVICE_HOST=`get_default_host_ip` [ -z $DEST ] && DEST=/opt/stack [ -z $BRIDGE_IP ] && BRIDGE_IP=172.24.4.1 [ -z $PATH_DEVSTACK_SRC ] && PATH_DEVSTACK_SRC=~/devstack [ -z $TROVE_CONF_DIR ] && TROVE_CONF_DIR=/etc/trove [ -z $MYSQL_HOST ] && MYSQL_HOST=$SERVICE_HOST # Set up the region name # Try REGION_NAME then OS_REGION_NAME then RegionOne (the devstack default) REGION_NAME=${REGION_NAME:-${OS_REGION_NAME:-RegionOne}} # Enable Neutron ENABLE_NEUTRON=$(get_bool ENABLE_NEUTRON true) # Enable Swift ENABLE_SWIFT=$(get_bool ENABLE_SWIFT true) # Enable osprofiler - note: Enables Ceilometer as well ENABLE_PROFILER=$(get_bool ENABLE_PROFILER false) PROFILER_TRACE_SQL=$(get_bool PROFILER_TRACE_SQL false) [ -z $PROFILER_HMAC_KEYS ] && PROFILER_HMAC_KEYS=SECRET_KEY # Enable ceilometer ENABLE_CEILOMETER=$(get_bool ENABLE_CEILOMETER $ENABLE_PROFILER) # Enable Mistral ENABLE_MISTRAL=$(get_bool ENABLE_MISTRAL false) # Don't include certain .rc files in local.conf.d by default USING_VAGRANT=$(get_bool USING_VAGRANT false) USE_KVM=$(get_bool USE_KVM false) USE_UUID_TOKEN=$(get_bool USE_UUID_TOKEN false) # Specify configuration for Ceilometer CEILOMETER_SERVICES_CONF=$(get_bool CEILOMETER_SERVICES_CONF $ENABLE_CEILOMETER) CEILOMETER_CINDER_CONF=$(get_bool CEILOMETER_CINDER_CONF false) CEILOMETER_NOVA_CONF=$(get_bool CEILOMETER_NOVA_CONF false) # Paths for various OpenStack components PATH_DEVSTACK_OUTPUT=$DEST PATH_NOVA=$DEST/nova PATH_KEYSTONE=$DEST/keystone PATH_GLANCE=$DEST/glance PATH_SWIFT=$DEST/swift # PATH_TROVE is set at the top of trovestack PATH_PYTHON_NOVACLIENT=$DEST/python-novaclient PATH_KEYSTONECLIENT=$DEST/python-keystoneclient PATH_OPENSTACKCLIENT=$DEST/python-openstackclient PATH_PYTHON_SWIFTCLIENT=$DEST/python-swiftclient PATH_PYTHON_TROVECLIENT=$DEST/python-troveclient PATH_TROVE_DASHBOARD=$DEST/trove-dashboard # Save the state of TROVE_BRANCH first, since it's used in trovestack TROVE_BRANCH_ORIG=${TROVE_BRANCH} # Devstack and OpenStack git repo source paths, etc. GIT_BASE=${GIT_BASE:-https://opendev.org} GIT_OPENSTACK=${GIT_OPENSTACK:-${GIT_BASE}/openstack} DEVSTACK_REPO=${DEVSTACK_REPO:-${GIT_BASE}/openstack-dev/devstack.git} TROVE_REPO=${TROVE_REPO:-${GIT_OPENSTACK}/trove.git} TROVE_DIR=${TROVE_DIR:-${PATH_TROVE}} TROVE_BRANCH=${TROVE_BRANCH:-master} TROVE_CLIENT_REPO=${TROVE_CLIENT_REPO:-${TROVECLIENT_REPO:-${GIT_OPENSTACK}/python-troveclient.git}} TROVE_CLIENT_DIR=${TROVE_CLIENT_DIR:-${TROVECLIENT_DIR:-${PATH_PYTHON_TROVECLIENT}}} TROVE_CLIENT_BRANCH=${TROVE_CLIENT_BRANCH:-${TROVECLIENT_BRANCH:-master}} TROVE_DASHBOARD_REPO=${TROVE_DASHBOARD_REPO:-${TROVEDASHBOARD_REPO:-${GIT_OPENSTACK}/trove-dashboard.git}} TROVE_DASHBOARD_DIR=${TROVE_DASHBOARD_DIR:-${TROVEDASHBOARD_DIR:-${PATH_TROVE_DASHBOARD}}} TROVE_DASHBOARD_BRANCH=${TROVE_DASHBOARD_BRANCH:-${TROVEDASHBOARD_BRANCH:-master}} # Trove specific networking options TROVE_PRIVATE_NETWORK_NAME=private TROVE_PRIVATE_SUBNET_NAME=private-subnet # Destination for working data DATA_DIR=${DEST}/data # Destination for status files SERVICE_DIR=${DEST}/status # Cinder Volume Group Name VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-${DATA_DIR}/${VOLUME_GROUP}-backing-file} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-51200M} # Passwords used by devstack. MYSQL_PASSWORD=e1a2c042c828d3566d0a RABBIT_PASSWORD=f7999d1955c5014aa32c SERVICE_TOKEN=be19c524ddc92109a224 ADMIN_PASSWORD=${ADMIN_PASSWORD:-${OS_PASSWORD:-3de4922d8b6ac5a1aad9}} SERVICE_PASSWORD=${SERVICE_PASSWORD:-"secretservice"} # Swift hash used by devstack. SWIFT_HASH=12go358snjw24501 # Swift Disk Image SWIFT_DATA_DIR=${DATA_DIR}/swift SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img # The following values can be used to tweak how devstack sets # up Trove. If not explicitly set, the defaults in the code are used. # To make changes without modifying the repo, add these variables # to options.rc or ~/trovestack.options.rc #export TROVE_MAX_ACCEPTED_VOLUME_SIZE=10 #export TROVE_MAX_INSTANCES_PER_TENANT=10 #export TROVE_MAX_VOLUMES_PER_TENANT=40 #export TROVE_AGENT_CALL_LOW_TIMEOUT=15 #export TROVE_AGENT_CALL_HIGH_TIMEOUT=300 #export TROVE_RESIZE_TIME_OUT=3600 #export TROVE_USAGE_TIMEOUT=1500 #export TROVE_STATE_CHANGE_WAIT_TIME=180 # Image MYSQL_IMAGE_ID=${MYSQL_IMAGE_ID:-""} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6561086 trove-12.1.0.dev92/integration/tests/0000755000175000017500000000000000000000000017634 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7041094 trove-12.1.0.dev92/integration/tests/integration/0000755000175000017500000000000000000000000022157 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/tests/integration/core.test.conf0000644000175000017500000000312400000000000024734 0ustar00coreycorey00000000000000{ "report_directory":"rdli-test-report", "white_box":false, "test_mgmt":false, "use_local_ovz":false, "use_venv":false, "glance_code_root":"/opt/stack/glance", "glance_api_conf":"/vagrant/conf/glance-api.conf", "glance_reg_conf":"/vagrant/conf/glance-reg.conf", "glance_images_directory": "/glance_images", "glance_image": "fakey_fakerson.tar.gz", "instance_flavor_name":"m1.rd-tiny", "instance_bigger_flavor_name":"m1.rd-smaller", "nova_code_root":"/opt/stack/nova", "nova_conf":"/home/vagrant/nova.conf", "keystone_code_root":"/opt/stack/keystone", "keystone_conf":"/etc/keystone/keystone.conf", "trove_code_root":"/opt/stack/trove", "trove_conf":"/tmp/trove.conf", "trove_version":"v1.0", "trove_api_updated":"2012-08-01T00:00:00Z", "trove_must_have_volume":false, "trove_can_have_volume":true, "trove_main_instance_has_volume": true, "trove_max_accepted_volume_size": 1000, "trove_max_instances_per_user": 55, "trove_max_volumes_per_user": 100, "use_reaper":false, "root_removed_from_instance_api": true, "root_timestamp_disabled": false, "openvz_disabled": false, "management_api_disabled": true, "dbaas_image": 1, "dns_driver":"trove.dns.rsdns.driver.RsDnsDriver", "dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory", "databases_page_size": 20, "instances_page_size": 20, "users_page_size": 20, "rabbit_runs_locally":false, "dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory", "sentinel": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/tests/integration/int_tests.py0000644000175000017500000002124600000000000024552 0ustar00coreycorey00000000000000#!/usr/bin/env python # # # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Runs the tests. There are a few initialization issues to deal with. The first is flags, which must be initialized before any imports. The test configuration has the same problem (it was based on flags back when the tests resided outside of the Nova code). The command line is picked apart so that Nose won't see commands it isn't compatible with, such as "--flagfile" or "--group". This script imports all other tests to make them known to Proboscis before passing control to proboscis.TestProgram which itself calls nose, which then call unittest.TestProgram and exits. If "repl" is a command line argument, then the original stdout and stderr is saved and sys.exit is neutralized so that unittest.TestProgram will not exit and instead sys.stdout and stderr are restored so that interactive mode can be used. """ from __future__ import absolute_import import atexit import gettext import os import six import sys import proboscis from nose import config from nose import core from tests.colorizer import NovaTestRunner if os.environ.get("PYDEV_DEBUG", "False") == 'True': from pydev import pydevd pydevd.settrace('10.0.2.2', port=7864, stdoutToServer=True, stderrToServer=True) def add_support_for_localization(): """Adds support for localization in the logging. If ../nova/__init__.py exists, add ../ to Python search path, so that it will override what happens to be installed in /usr/(local/)lib/python... """ path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir) possible_topdir = os.path.normpath(path) if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) if six.PY3: gettext.install('nova') else: gettext.install('nova', unicode=True) MAIN_RUNNER = None def initialize_rdl_config(config_file): from trove.common import cfg from oslo_log import log from trove.db import get_db_api conf = cfg.CONF cfg.parse_args(['int_tests'], default_config_files=[config_file]) log.setup(conf, None) try: get_db_api().configure_db(conf) conf_file = conf.find_file(conf.api_paste_config) except RuntimeError as error: import traceback print(traceback.format_exc()) sys.exit("ERROR: %s" % error) def _clean_up(): """Shuts down any services this program has started and shows results.""" from tests.util import report report.update() if MAIN_RUNNER is not None: MAIN_RUNNER.on_exit() from tests.util.services import get_running_services for service in get_running_services(): sys.stderr.write("Stopping service ") for c in service.cmd: sys.stderr.write(c + " ") sys.stderr.write("...\n\r") service.stop() def import_tests(): # The DNS stuff is problematic. Not loading the other tests allow us to # run its functional tests only. ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True' if not ADD_DOMAINS: # F401 unused imports needed for tox tests from trove.tests.api import backups # noqa from trove.tests.api import configurations # noqa from trove.tests.api import databases # noqa from trove.tests.api import datastores # noqa from trove.tests.api import instances as rd_instances # noqa from trove.tests.api import instances_actions as acts # noqa from trove.tests.api import instances_delete # noqa from trove.tests.api import instances_resize # noqa from trove.tests.api import limits # noqa from trove.tests.api.mgmt import datastore_versions # noqa from trove.tests.api.mgmt import instances_actions as mgmt_acts # noqa from trove.tests.api import replication # noqa from trove.tests.api import root # noqa from trove.tests.api import user_access # noqa from trove.tests.api import users # noqa from trove.tests.api import versions # noqa from trove.tests.db import migrations # noqa # Groups that exist as core int-tests are registered from the # trove.tests.int_tests module from trove.tests import int_tests def run_main(test_importer): add_support_for_localization() # Strip non-nose arguments out before passing this to nosetests repl = False nose_args = [] conf_file = "~/test.conf" show_elapsed = True groups = [] print("RUNNING TEST ARGS : " + str(sys.argv)) extra_test_conf_lines = [] rdl_config_file = None nova_flag_file = None index = 0 while index < len(sys.argv): arg = sys.argv[index] if arg[:2] == "-i" or arg == '--repl': repl = True elif arg[:7] == "--conf=": conf_file = os.path.expanduser(arg[7:]) print("Setting TEST_CONF to " + conf_file) os.environ["TEST_CONF"] = conf_file elif arg[:8] == "--group=": groups.append(arg[8:]) elif arg == "--test-config": if index >= len(sys.argv) - 1: print('Expected an argument to follow "--test-conf".') sys.exit() conf_line = sys.argv[index + 1] extra_test_conf_lines.append(conf_line) elif arg[:11] == "--flagfile=": pass elif arg[:14] == "--config-file=": rdl_config_file = arg[14:] elif arg[:13] == "--nova-flags=": nova_flag_file = arg[13:] elif arg.startswith('--hide-elapsed'): show_elapsed = False else: nose_args.append(arg) index += 1 # Many of the test decorators depend on configuration values, so before # start importing modules we have to load the test config followed by the # flag files. from trove.tests.config import CONFIG # Find config file. if not "TEST_CONF" in os.environ: raise RuntimeError("Please define an environment variable named " + "TEST_CONF with the location to a conf file.") file_path = os.path.expanduser(os.environ["TEST_CONF"]) if not os.path.exists(file_path): raise RuntimeError("Could not find TEST_CONF at " + file_path + ".") # Load config file and then any lines we read from the arguments. CONFIG.load_from_file(file_path) for line in extra_test_conf_lines: CONFIG.load_from_line(line) if CONFIG.white_box: # If white-box testing, set up the flags. # Handle loading up RDL's config file madness. initialize_rdl_config(rdl_config_file) # Set up the report, and print out how we're running the tests. from tests.util import report from datetime import datetime report.log("Trove Integration Tests, %s" % datetime.now()) report.log("Invoked via command: " + str(sys.argv)) report.log("Groups = " + str(groups)) report.log("Test conf file = %s" % os.environ["TEST_CONF"]) if CONFIG.white_box: report.log("") report.log("Test config file = %s" % rdl_config_file) report.log("") report.log("sys.path:") for path in sys.path: report.log("\t%s" % path) # Now that all configurations are loaded its time to import everything test_importer() atexit.register(_clean_up) c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, plugins=core.DefaultPluginManager()) runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, config=c, show_elapsed=show_elapsed, known_bugs=CONFIG.known_bugs) MAIN_RUNNER = runner if repl: # Turn off the following "feature" of the unittest module in case # we want to start a REPL. sys.exit = lambda x: None proboscis.TestProgram(argv=nose_args, groups=groups, config=c, testRunner=MAIN_RUNNER).run_and_exit() sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ if __name__ == "__main__": run_main(import_tests) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/tests/integration/localhost.test.conf0000644000175000017500000000373000000000000025777 0ustar00coreycorey00000000000000{ "include-files":["core.test.conf"], "fake_mode": true, "dbaas_url":"http://localhost:8779/v1.0", "version_url":"http://localhost:8779", "nova_auth_url":"http://localhost:8779/v1.0/auth", "trove_auth_url":"http://localhost:8779/v1.0/auth", "trove_client_insecure":false, "auth_strategy":"fake", "trove_version":"v1.0", "trove_api_updated":"2012-08-01T00:00:00Z", "trove_dns_support":false, "trove_ip_support":false, "nova_client": null, "users": [ { "auth_user":"admin", "auth_key":"password", "tenant":"admin-1000", "requirements": { "is_admin":true, "services": ["trove"] } }, { "auth_user":"jsmith", "auth_key":"password", "tenant":"2500", "requirements": { "is_admin":false, "services": ["trove"] } }, { "auth_user":"hub_cap", "auth_key":"password", "tenant":"3000", "requirements": { "is_admin":false, "services": ["trove"] } } ], "flavors": [ { "id": 1, "name": "m1.tiny", "ram": 512 }, { "id": 2, "name": "m1.small", "ram": 2048 }, { "id": 3, "name": "m1.medium", "ram": 4096 }, { "id": 4, "name": "m1.large", "ram": 8192 }, { "id": 5, "name": "m1.xlarge", "ram": 16384 }, { "id": 6, "name": "tinier", "ram": 506 }, { "id": 7, "name": "m1.rd-tiny", "ram": 512 }, { "id": 8, "name": "m1.rd-smaller", "ram": 768 } ], "sentinel": null } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7041094 trove-12.1.0.dev92/integration/tests/integration/tests/0000755000175000017500000000000000000000000023321 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/tests/integration/tests/__init__.py0000644000175000017500000000165100000000000025435 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`tests` -- Integration / Functional Tests for Nova =================================== .. automodule:: tests :platform: Unix :synopsis: Tests for Nova. .. moduleauthor:: Nirmal Ranganathan .. moduleauthor:: Tim Simpson """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/tests/integration/tests/colorizer.py0000644000175000017500000003770200000000000025714 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Unittest runner for Nova. To run all tests python run_tests.py To run a single test: python run_tests.py test_compute:ComputeTestCase.test_run_terminate To run a single test module: python run_tests.py test_compute or python run_tests.py api.test_wsgi """ import gettext import heapq import logging import os import unittest import six import sys import time if six.PY3: gettext.install('nova') else: gettext.install('nova', unicode=True) from nose import config from nose import core from nose import result from proboscis import case from proboscis import SkipTest class _AnsiColorizer(object): """ A colorizer is an object that loosely wraps around a stream, allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): """ A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except: raise # guess false in case of error return False supported = classmethod(supported) def write(self, text, color): """ Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class _Win32Colorizer(object): """ See _AnsiColorizer docstring. """ def __init__(self, stream): from win32console import GetStdHandle, STD_OUT_HANDLE, \ FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \ FOREGROUND_INTENSITY red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN, FOREGROUND_BLUE, FOREGROUND_INTENSITY) self.stream = stream self.screenBuffer = GetStdHandle(STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, 'green': green | bold, 'blue': blue | bold, 'yellow': red | green | bold, 'magenta': red | blue | bold, 'cyan': green | blue | bold, 'white': red | green | blue | bold } def supported(cls, stream=sys.stdout): try: import win32console screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes try: screenBuffer.SetConsoleTextAttribute( win32console.FOREGROUND_RED | win32console.FOREGROUND_GREEN | win32console.FOREGROUND_BLUE) except pywintypes.error: return False else: return True supported = classmethod(supported) def write(self, text, color): color = self._colors[color] self.screenBuffer.SetConsoleTextAttribute(color) self.stream.write(text) self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) class _NullColorizer(object): """ See _AnsiColorizer docstring. """ def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): return True supported = classmethod(supported) def write(self, text, color): self.stream.write(text) def get_elapsed_time_color(elapsed_time): if elapsed_time > 1.0: return 'yellow' elif elapsed_time > 0.25: return 'cyan' else: return 'green' class NovaTestResult(case.TestResult): def __init__(self, *args, **kw): self.show_elapsed = kw.pop('show_elapsed') self.known_bugs = kw.pop('known_bugs', {}) super(NovaTestResult, self).__init__(*args, **kw) self.num_slow_tests = 5 self.slow_tests = [] # this is a fixed-sized heap self._last_case = None self.colorizer = None # NOTE(vish): reset stdout for the terminal check stdout = sys.stdout sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: if colorizer.supported(): self.colorizer = colorizer(self.stream) break sys.stdout = stdout # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate # error results in it failing to be initialized later. Otherwise, # _handleElapsedTime will fail, causing the wrong error message to # be outputted. self.start_time = time.time() def _intercept_known_bugs(self, test, err): name = str(test) excuse = self.known_bugs.get(name, None) if excuse: tracker_id, error_string = excuse if error_string in str(err[1]): skip = SkipTest("KNOWN BUG: %s\n%s" % (tracker_id, str(err[1]))) self.onError(test) super(NovaTestResult, self).addSkip(test, skip) else: result = (RuntimeError, RuntimeError( 'Test "%s" contains known bug %s.\n' 'Expected the following error string:\n%s\n' 'What was seen was the following:\n%s\n' 'If the bug is no longer happening, please change ' 'the test config.' % (name, tracker_id, error_string, str(err))), None) self.onError(test) super(NovaTestResult, self).addError(test, result) return True return False def getDescription(self, test): return str(test) def _handleElapsedTime(self, test): self.elapsed_time = time.time() - self.start_time item = (self.elapsed_time, test) # Record only the n-slowest tests using heap if len(self.slow_tests) >= self.num_slow_tests: heapq.heappushpop(self.slow_tests, item) else: heapq.heappush(self.slow_tests, item) def _writeElapsedTime(self, test): color = get_elapsed_time_color(self.elapsed_time) self.colorizer.write(" %.2f" % self.elapsed_time, color) def _writeResult(self, test, long_result, color, short_result, success): if self.showAll: self.colorizer.write(long_result, color) if self.show_elapsed and success: self._writeElapsedTime(test) self.stream.writeln() elif self.dots: self.stream.write(short_result) self.stream.flush() # NOTE(vish): copied from unittest with edit to add color def addSuccess(self, test): if self._intercept_known_bugs(test, None): return unittest.TestResult.addSuccess(self, test) self._handleElapsedTime(test) self._writeResult(test, 'OK', 'green', '.', True) # NOTE(vish): copied from unittest with edit to add color def addFailure(self, test, err): if self._intercept_known_bugs(test, err): return self.onError(test) unittest.TestResult.addFailure(self, test, err) self._handleElapsedTime(test) self._writeResult(test, 'FAIL', 'red', 'F', False) # NOTE(vish): copied from nose with edit to add color def addError(self, test, err): """Overrides normal addError to add support for errorClasses. If the exception is a registered class, the error will be added to the list for that class, not errors. """ if self._intercept_known_bugs(test, err): return self.onError(test) self._handleElapsedTime(test) stream = getattr(self, 'stream', None) ec, ev, tb = err try: exc_info = self._exc_info_to_string(err, test) except TypeError: # 2.3 compat exc_info = self._exc_info_to_string(err) for cls, (storage, label, isfail) in self.errorClasses.items(): if result.isclass(ec) and issubclass(ec, cls): if isfail: test.passed = False storage.append((test, exc_info)) # Might get patched into a streamless result if stream is not None: if self.showAll: message = [label] detail = result._exception_detail(err[1]) if detail: message.append(detail) stream.writeln(": ".join(message)) elif self.dots: stream.write(label[:1]) return self.errors.append((test, exc_info)) test.passed = False if stream is not None: self._writeResult(test, 'ERROR', 'red', 'E', False) @staticmethod def get_doc(cls_or_func): """Grabs the doc abbreviated doc string.""" try: return cls_or_func.__doc__.split("\n")[0].strip() except (AttributeError, IndexError): return None def startTest(self, test): unittest.TestResult.startTest(self, test) self.start_time = time.time() test_name = None try: entry = test.test.__proboscis_case__.entry if entry.method: current_class = entry.method.im_class test_name = self.get_doc(entry.home) or entry.home.__name__ else: current_class = entry.home except AttributeError: current_class = test.test.__class__ if self.showAll: if current_class.__name__ != self._last_case: self.stream.writeln(current_class.__name__) self._last_case = current_class.__name__ try: doc = self.get_doc(current_class) except (AttributeError, IndexError): doc = None if doc: self.stream.writeln(' ' + doc) if not test_name: if hasattr(test.test, 'shortDescription'): test_name = test.test.shortDescription() if not test_name: test_name = test.test._testMethodName self.stream.write('\t%s' % str(test_name).ljust(60)) self.stream.flush() class NovaTestRunner(core.TextTestRunner): def __init__(self, *args, **kwargs): self.show_elapsed = kwargs.pop('show_elapsed') self.known_bugs = kwargs.pop('known_bugs', {}) self.__result = None self.__finished = False self.__start_time = None super(NovaTestRunner, self).__init__(*args, **kwargs) def _makeResult(self): self.__result = NovaTestResult( self.stream, self.descriptions, self.verbosity, self.config, show_elapsed=self.show_elapsed, known_bugs=self.known_bugs) self.__start_time = time.time() return self.__result def _writeSlowTests(self, result_): # Pare out 'fast' tests slow_tests = [item for item in result_.slow_tests if get_elapsed_time_color(item[0]) != 'green'] if slow_tests: slow_total_time = sum(item[0] for item in slow_tests) self.stream.writeln("Slowest %i tests took %.2f secs:" % (len(slow_tests), slow_total_time)) for elapsed_time, test in sorted(slow_tests, reverse=True): time_str = "%.2f" % elapsed_time self.stream.writeln(" %s %s" % (time_str.ljust(10), test)) def on_exit(self): if self.__result is None: print("Exiting before tests even started.") else: if not self.__finished: msg = "Tests aborted, trying to print available results..." print(msg) stop_time = time.time() self.__result.printErrors() self.__result.printSummary(self.__start_time, stop_time) self.config.plugins.finalize(self.__result) if self.show_elapsed: self._writeSlowTests(self.__result) def run(self, test): result_ = super(NovaTestRunner, self).run(test) if self.show_elapsed: self._writeSlowTests(result_) self.__finished = True return result_ if __name__ == '__main__': logging.setup() # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much show_elapsed = True argv = [] test_fixture = os.getenv("UNITTEST_FIXTURE", "trove") for x in sys.argv: if x.startswith('test_'): argv.append('%s.tests.%s' % (test_fixture, x)) elif x.startswith('--hide-elapsed'): show_elapsed = False else: argv.append(x) testdir = os.path.abspath(os.path.join(test_fixture, "tests")) c = config.Config(stream=sys.stdout, env=os.environ, verbosity=3, workingDir=testdir, plugins=core.DefaultPluginManager()) runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, config=c, show_elapsed=show_elapsed) sys.exit(not core.run(config=c, testRunner=runner, argv=argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/integration/tests/integration/tests/initialize.py0000644000175000017500000000455400000000000026044 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from proboscis import test from proboscis.asserts import fail from tests.util.services import Service from trove.tests.config import CONFIG def dbaas_url(): return str(CONFIG.values.get("dbaas_url")) def nova_url(): return str(CONFIG.values.get("nova_client")['url']) class Daemon(object): """Starts a daemon.""" def __init__(self, alternate_path=None, conf_file_name=None, extra_cmds=None, service_path_root=None, service_path=None): # The path to the daemon bin if the other one doesn't work. self.alternate_path = alternate_path self.extra_cmds = extra_cmds or [] # The name of a test config value which points to a conf file. self.conf_file_name = conf_file_name # The name of a test config value, which is inserted into the service_path. self.service_path_root = service_path_root # The first path to the daemon bin we try. self.service_path = service_path or "%s" def run(self): # Print out everything to make it print("Looking for config value %s..." % self.service_path_root) print(CONFIG.values[self.service_path_root]) path = self.service_path % CONFIG.values[self.service_path_root] print("Path = %s" % path) if not os.path.exists(path): path = self.alternate_path if path is None: fail("Could not find path to %s" % self.service_path_root) conf_path = str(CONFIG.values[self.conf_file_name]) cmds = CONFIG.python_cmd_list() + [path] + self.extra_cmds + \ [conf_path] print("Running cmds: %s" % cmds) self.service = Service(cmds) if not self.service.is_service_alive(): self.service.start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7041094 trove-12.1.0.dev92/integration/tests/integration/tests/util/0000755000175000017500000000000000000000000024276 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/tests/integration/tests/util/__init__.py0000644000175000017500000000000000000000000026375 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/tests/integration/tests/util/report.py0000644000175000017500000000420300000000000026162 0ustar00coreycorey00000000000000"""Creates a report for the test. """ import os import shutil from os import path from trove.tests.config import CONFIG USE_LOCAL_OVZ = CONFIG.use_local_ovz class Reporter(object): """Saves the logs from a test run.""" def __init__(self, root_path): self.root_path = root_path if not path.exists(self.root_path): os.mkdir(self.root_path) for file in os.listdir(self.root_path): if file.endswith(".log"): os.remove(path.join(self.root_path, file)) def _find_all_instance_ids(self): instances = [] if USE_LOCAL_OVZ: for dir in os.listdir("/var/lib/vz/private"): instances.append(dir) return instances def log(self, msg): with open("%s/report.log" % self.root_path, 'a') as file: file.write(str(msg) + "\n") def _save_syslog(self): try: shutil.copyfile("/var/log/syslog", "host-syslog.log") except (shutil.Error, IOError) as err: self.log("ERROR logging syslog : %s" % (err)) def _update_instance(self, id): root = "%s/%s" % (self.root_path, id) def save_file(path, short_name): if USE_LOCAL_OVZ: try: shutil.copyfile("/var/lib/vz/private/%s/%s" % (id, path), "%s-%s.log" % (root, short_name)) except (shutil.Error, IOError) as err: self.log("ERROR logging %s for instance id %s! : %s" % (path, id, err)) else: #TODO: Can we somehow capture these (maybe SSH to the VM)? pass save_file("/var/log/firstboot", "firstboot") save_file("/var/log/syslog", "syslog") save_file("/var/log/nova/guest.log", "nova-guest") def _update_instances(self): for id in self._find_all_instance_ids(): self._update_instance(id) def update(self): self._update_instances() self._save_syslog() REPORTER = Reporter(CONFIG.report_directory) def log(msg): REPORTER.log(msg) def update(): REPORTER.update() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/tests/integration/tests/util/rpc.py0000644000175000017500000000726200000000000025443 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test utility for RPC checks. Functionality to check for rabbit here depends on having rabbit running on the same machine as the tests, so that the rabbitmqctl commands will function. The functionality is turned on or off by the test config "rabbit_runs_locally". """ import re from trove.tests.config import CONFIG from services import start_proc if CONFIG.values.get('rabbit_runs_locally', False) == True: DIRECT_ACCESS = True class Rabbit(object): def declare_queue(self, topic): """Call this to declare a queue from Python.""" #from trove.rpc.impl_kombu import Connection from trove.openstack.common.rpc import create_connection with create_connection() as conn: consumer = conn.declare_topic_consumer(topic=topic) def get_queue_items(self, queue_name): """Determines if the queue exists and if so the message count. If the queue exists the return value is an integer, otherwise its None. Be careful because queue_name is used in a regex and can't have any unescaped characters. """ proc = start_proc(["/usr/bin/sudo", "rabbitmqctl", "list_queues"], shell=False) for line in iter(proc.stdout.readline, ""): print("LIST QUEUES:" + line) m = re.search(r"%s\s+([0-9]+)" % queue_name, line) if m: return int(m.group(1)) return None @property def is_alive(self): """Calls list_queues, should fail.""" try: stdout, stderr = self.run(0, "rabbitmqctl", "list_queues") for lines in stdout, stderr: for line in lines: if "no_exists" in line: return False return True except Exception: return False def reset(self): out, err = self.run(0, "rabbitmqctl", "reset") print(out) print(err) def run(self, check_exit_code, *cmd): cmds = ["/usr/bin/sudo"] + list(cmd) proc = start_proc(cmds) lines = proc.stdout.readlines() err_lines = proc.stderr.readlines() return lines, err_lines def start(self): print("Calling rabbitmqctl start_app") out = self.run(0, "rabbitmqctl", "start_app") print(out) out, err = self.run(0, "rabbitmqctl", "change_password", "guest", CONFIG.values['rabbit_password']) print(out) print(err) def stop(self): print("Calling rabbitmqctl stop_app") out = self.run(0, "rabbitmqctl", "stop_app") print(out) else: DIRECT_ACCESS = False class Rabbit(object): def __init__(self): raise RuntimeError("rabbit_runs_locally is set to False in the " "test config, so this test cannot be run.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/integration/tests/integration/tests/util/services.py0000644000175000017500000002253100000000000026476 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functions to initiate and shut down services needed by the tests.""" import os import re import subprocess import time from collections import namedtuple from httplib2 import Http from nose.plugins.skip import SkipTest from proboscis import decorators def _is_web_service_alive(url): """Does a HTTP GET request to see if the web service is up.""" client = Http() try: resp = client.request(url, 'GET') return resp != None except Exception: return False _running_services = [] def get_running_services(): """ Returns the list of services which this program has started.""" return _running_services def start_proc(cmd, shell=False): """Given a command, starts and returns a process.""" env = os.environ.copy() proc = subprocess.Popen( cmd, shell=shell, stdin=subprocess.PIPE, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env ) return proc MemoryInfo = namedtuple("MemoryInfo", ['mapped', 'writeable', 'shared']) class Service(object): """Starts and stops a service under test. The methods to start and stop the service will not actually do anything if they detect the service is already running on this machine. This is because it may be useful for developers to start the services themselves some other way. """ # TODO(tim.simpson): Hard to follow, consider renaming certain attributes. def __init__(self, cmd): """Defines a service to run.""" if not isinstance(cmd, list): raise TypeError() self.cmd = cmd self.do_not_manage_proc = False self.proc = None def __del__(self): if self.is_running: self.stop() def ensure_started(self): """Starts the service if it is not running.""" if not self.is_running: self.start() def find_proc_id(self): """Finds and returns the process id.""" if not self.cmd: return False # The cmd[1] signifies the executable python script. It gets invoked # as python /path/to/executable args, so the entry is # /path/to/executable actual_command = self.cmd[1].split("/")[-1] proc_command = ["/usr/bin/pgrep", "-f", actual_command] proc = start_proc(proc_command, shell=False) # this is to make sure there is only one pid returned from the pgrep has_two_lines = False pid = None for line in iter(proc.stdout.readline, ""): if has_two_lines: raise RuntimeError("Found PID twice.") pid = int(line) has_two_lines = True return pid def get_memory_info(self): """Returns how much memory the process is using according to pmap.""" pid = self.find_proc_id() if not pid: raise RuntimeError("Can't find PID, so can't get memory.") proc = start_proc(["/usr/bin/pmap", "-d", str(pid)], shell=False) for line in iter(proc.stdout.readline, ""): m = re.search(r"mapped\:\s([0-9]+)K\s+" r"writeable/private:\s([0-9]+)K\s+" r"shared:\s+([0-9]+)K", line) if m: return MemoryInfo(int(m.group(1)), int(m.group(2)), int(m.group(3))) raise RuntimeError("Memory info not found.") def get_fd_count_from_proc_file(self): """Returns file descriptors according to /proc//status.""" pid = self.find_proc_id() with open("/proc/%d/status" % pid) as status: for line in status.readlines(): index = line.find(":") name = line[:index] value = line[index + 1:] if name == "FDSize": return int(value) raise RuntimeError("FDSize not found!") def get_fd_count(self): """Returns file descriptors according to /proc//status.""" pid = self.find_proc_id() cmd = "Finding file descriptors..." print("CMD" + cmd) proc = start_proc(['ls', '-la', '/proc/%d/fd' % pid], shell=False) count = -3 has_two_lines = False for line in iter(proc.stdout.readline, ""): print("\t" + line) count += 1 if not count: raise RuntimeError("Could not get file descriptors!") return count with open("/proc/%d/fd" % pid) as status: for line in status.readlines(): index = line.find(":") name = line[:index] value = line[index + 1:] if name == "FDSize": return int(value) raise RuntimeError("FDSize not found!") def kill_proc(self): """Kills the process, wherever it may be.""" pid = self.find_proc_id() if pid: start_proc("sudo kill -9 " + str(pid), shell=True) time.sleep(1) if self.is_service_alive(): raise RuntimeError('Cannot kill process, PID=' + str(self.proc.pid)) def is_service_alive(self, proc_name_index=1): """Searches for the process to see if its alive. This function will return true even if this class has not started the service (searches using ps). """ if not self.cmd: return False time.sleep(1) # The cmd[1] signifies the executable python script. It gets invoked # as python /path/to/executable args, so the entry is # /path/to/executable actual_command = self.cmd[proc_name_index].split("/")[-1] print(actual_command) proc_command = ["/usr/bin/pgrep", "-f", actual_command] print(proc_command) proc = start_proc(proc_command, shell=False) line = proc.stdout.readline() print(line) # pgrep only returns a pid. if there is no pid, it'll return nothing return len(line) != 0 @property def is_running(self): """Returns true if the service has already been started. Returns true if this program has started the service or if it previously detected it had started. The main use of this property is to know if the service was already begun by this program- use is_service_alive for a more definitive answer. """ return self.proc or self.do_not_manage_proc def restart(self, extra_args): if self.do_not_manage_proc: raise RuntimeError("Can't restart proc as the tests don't own it.") self.stop() time.sleep(2) self.start(extra_args=extra_args) def start(self, time_out=30, extra_args=None): """Starts the service if necessary.""" extra_args = extra_args or [] if self.is_running: raise RuntimeError("Process is already running.") if self.is_service_alive(): self.do_not_manage_proc = True return self.proc = start_proc(self.cmd + extra_args, shell=False) if not self._wait_for_start(time_out=time_out): self.stop() raise RuntimeError("Issued the command successfully but the " "service (" + str(self.cmd + extra_args) + ") never seemed to start.") _running_services.append(self) def stop(self): """Stops the service, but only if this program started it.""" if self.do_not_manage_proc: return if not self.proc: raise RuntimeError("Process was not started.") self.proc.terminate() self.proc.kill() self.proc.wait() self.proc.stdin.close() self.kill_proc() self.proc = None global _running_services _running_services = [svc for svc in _running_services if svc != self] def _wait_for_start(self, time_out): """Waits until time_out (in seconds) for service to appear.""" give_up_time = time.time() + time_out while time.time() < give_up_time: if self.is_service_alive(): return True return False class NativeService(Service): def is_service_alive(self): return super(NativeService, self).is_service_alive(proc_name_index=0) class WebService(Service): """Starts and stops a web service under test.""" def __init__(self, cmd, url): """Defines a service to run.""" Service.__init__(self, cmd) if not isinstance(url, (str, unicode)): raise TypeError() self.url = url self.do_not_manage_proc = self.is_service_alive() def is_service_alive(self): """Searches for the process to see if its alive.""" return _is_web_service_alive(self.url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/lower-constraints.txt0000644000175000017500000000570500000000000020414 0ustar00coreycorey00000000000000alabaster==0.7.10 alembic==0.9.8 amqp==2.2.2 appdirs==1.4.3 asn1crypto==0.24.0 astroid==1.6.5 # LGPLv2.1 Babel==2.3.4 bandit==1.1.0 beautifulsoup4==4.6.0 cachetools==2.0.1 cassandra-driver==2.1.4 certifi==2018.1.18 cffi==1.11.5 chardet==3.0.4 cliff==2.11.0 cmd2==0.8.1 colorama==0.3.9 contextlib2==0.5.5 CouchDB==0.8 coverage==4.0 cryptography==2.1.4 debtcollector==1.19.0 decorator==4.2.1 deprecation==2.0 diskimage-builder==1.1.2 doc8==0.6.0 docutils==0.14 dogpile.cache==0.6.5 dulwich==0.19.0 enum34===1.0.4 enum-compat==0.0.2 eventlet==0.18.2 extras==1.0.0 fasteners==0.14.1 fixtures==3.0.0 future==0.16.0 futurist==1.6.0 gitdb2==2.0.3 GitPython==2.1.8 greenlet==0.4.13 httplib2==0.9.1 idna==2.6 imagesize==1.0.0 iso8601==0.1.11 Jinja2==2.10 jmespath==0.9.3 jsonpatch==1.21 jsonpointer==2.0 jsonschema==2.6.0 keystoneauth1==3.4.0 keystonemiddleware==4.17.0 kombu==4.1.0 linecache2==1.0.0 logilab-common==1.4.1 lxml==3.4.1 Mako==1.0.7 MarkupSafe==1.0 mccabe==0.4.0 mock==2.0.0 monotonic==1.4 mox3==0.25.0 msgpack==0.5.6 munch==2.2.0 netaddr==0.7.18 netifaces==0.10.6 networkx==1.11 nose==1.3.7 nosexcover==1.0.10 openstack.nose-plugin==0.7 openstackdocstheme==1.32.1 openstacksdk==0.12.0 os-api-ref==1.4.0 os-client-config==1.29.0 os-service-types==1.2.0 osc-lib==1.10.0 oslo.cache==1.29.0 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.19.2 oslo.db==4.27.0 oslo.i18n==3.15.3 oslo.log==3.36.0 oslo.messaging==5.29.0 oslo.middleware==3.31.0 oslo.policy==1.30.0 oslo.serialization==2.18.0 oslo.service==1.24.0 oslo.upgradecheck==0.1.0 oslo.utils==3.33.0 oslotest==3.2.0 osprofiler==1.4.0 packaging==17.1 passlib==1.7.0 Paste==2.0.2 PasteDeploy==1.5.0 pbr==2.0.0 pexpect==3.1 pika-pool==0.1.3 pika==0.10.0 prettytable==0.7.2 proboscis==1.2.5.3 psycopg2==2.6.2 ptyprocess==0.5.2 pycadf==2.7.0 pycparser==2.18 Pygments==2.2.0 pyinotify==0.9.6 pylint==1.9.2 # GPLv2 pymongo==3.0.2 PyMySQL==0.7.6 pyOpenSSL==17.5.0 pyparsing==2.2.0 pyperclip==1.6.0 python-cinderclient==3.3.0 python-dateutil==2.7.0 python-designateclient==2.7.0 python-editor==1.0.3 python-glanceclient==2.8.0 python-heatclient==1.10.0 python-keystoneclient==3.8.0 python-mimeparse==1.6.0 python-mistralclient==3.3.0 python-neutronclient==6.7.0 python-novaclient==9.1.0 python-subunit==1.2.0 python-swiftclient==3.2.0 python-troveclient==2.2.0 pytz==2018.3 PyYAML==3.12 redis==2.10.0 reno==2.5.0 repoze.lru==0.7 requests==2.18.4 requestsexceptions==1.4.0 restructuredtext-lint==1.1.3 rfc3986==1.1.0 Routes==2.3.1 simplejson==3.13.2 six==1.10.0 smmap2==2.0.3 snowballstemmer==1.2.1 Sphinx==1.6.2 sphinxcontrib-websupport==1.0.1 sqlalchemy-migrate==0.11.0 SQLAlchemy==1.0.10 sqlparse==0.2.4 statsd==3.2.2 stestr==1.1.0 stevedore==1.20.0 Tempita==0.5.2 tenacity==4.9.0 termcolor==1.1.0 testresources==2.0.1 testscenarios==0.5.0 testtools==2.2.0 traceback2==1.4.0 unittest2==1.1.0 urllib3==1.22 vine==1.1.4 voluptuous==0.11.1 waitress==1.1.0 warlock==1.3.0 WebOb==1.7.1 WebTest==2.0.27 wrapt==1.10.11 wsgi-intercept==1.4.1 xmltodict==0.10.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7041094 trove-12.1.0.dev92/playbooks/0000755000175000017500000000000000000000000016152 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7041094 trove-12.1.0.dev92/playbooks/image-build/0000755000175000017500000000000000000000000020331 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/playbooks/image-build/post.yaml0000644000175000017500000000040100000000000022175 0ustar00coreycorey00000000000000- hosts: all name: Copy image tasks: - synchronize: src: "{{ ansible_user_dir }}/images" dest: "{{ zuul.executor.work_root }}/artifacts/" mode: pull verify_host: true rsync_opts: - "--exclude=/*/*/" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/playbooks/image-build/run.yaml0000644000175000017500000000125000000000000022017 0ustar00coreycorey00000000000000- hosts: all tasks: - name: Ensure artifacts/images directory exists file: path: '{{ ansible_user_dir }}/images' state: directory - name: Build Trove guest image shell: >- ./trovestack build-image \ {{ datastore_type }} \ {{ guest_os }} \ {{ guest_os_release }} \ {{ dev_mode }} \ {{ guest_username }} \ {{ ansible_user_dir }}/images/trove-{{ branch }}-{{ datastore_type }}-{{ guest_os }}-{{ guest_os_release }}{{ image_suffix }} args: chdir: "{{ ansible_user_dir }}/src/opendev.org/openstack/trove/integration/scripts" tags: - skip_ansible_lint ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6561086 trove-12.1.0.dev92/playbooks/legacy/0000755000175000017500000000000000000000000017416 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7041094 trove-12.1.0.dev92/playbooks/legacy/grenade-dsvm-trove/0000755000175000017500000000000000000000000023127 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/playbooks/legacy/grenade-dsvm-trove/post.yaml0000644000175000017500000000063300000000000025002 0ustar00coreycorey00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/playbooks/legacy/grenade-dsvm-trove/run.yaml0000644000175000017500000000316000000000000024617 0ustar00coreycorey00000000000000- hosts: all name: Autoconverted job legacy-grenade-dsvm-trove from old job gate-grenade-dsvm-trove tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PROJECTS="openstack/grenade $PROJECTS" export PROJECTS="openstack/trove-dashboard $PROJECTS" export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_FULL=1 export DEVSTACK_GATE_GRENADE=pullup export DEVSTACK_GATE_TROVE=1 export TEMPEST_CONCURRENCY=2 export BRANCH_OVERRIDE=default export DEVSTACK_GATE_USE_PYTHON3=True if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/playbooks/trove-devstack-base.yaml0000644000175000017500000000007600000000000022712 0ustar00coreycorey00000000000000- hosts: all roles: - run-devstack - trove-devstack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/pylintrc0000644000175000017500000000023100000000000015732 0ustar00coreycorey00000000000000# pylintrc # # For trove we use the defaults, this file is just to shut up an # annoying error message from pylint. # # Don't set pylint options here. # ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6561086 trove-12.1.0.dev92/releasenotes/0000755000175000017500000000000000000000000016640 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7161098 trove-12.1.0.dev92/releasenotes/notes/0000755000175000017500000000000000000000000017770 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/.placeholder0000644000175000017500000000000000000000000022241 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/add-cassandra-log-retrieval-a295f3d0d4c56804.yaml0000644000175000017500000000011000000000000030240 0ustar00coreycorey00000000000000--- features: - Enable database log retrieval on Cassandra instances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/add-cors-support-fe3ecbecb68f7efd.yaml0000644000175000017500000000004300000000000026750 0ustar00coreycorey00000000000000--- other: - Added CORS support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/add-designate-v2-dns-driver-8d1be56ab2c71b83.yaml0000644000175000017500000000024700000000000030241 0ustar00coreycorey00000000000000--- features: - | Added support for designate v2 api with a new dns driver. To use this driver set dns_driver = trove.dns.designate.driver.DesignateDriverV2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/add-icmp-flag-58937cce344e77d9.yaml0000644000175000017500000000017600000000000025430 0ustar00coreycorey00000000000000--- features: - Add icmp option for DB security group. if icmp=True, users will be allowed to ping to DB instances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/add-instance-detailed-list-e712dccf6c9091c0.yaml0000644000175000017500000000014600000000000030224 0ustar00coreycorey00000000000000--- features: - | Added ``/instances/detail`` endpoint to fetch list of instances with details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/add-max-prep-stmts-ac1056e127de7609.yaml0000644000175000017500000000024000000000000026432 0ustar00coreycorey00000000000000--- features: - Add 'max-prepared-stmt-cnt' to the list of valid values which can be used in configuration groups for Mysql, Percona, MariaDB and PXC ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/add-new-relic-license-driver-0f314edabb7561c4.yaml0000644000175000017500000000024700000000000030466 0ustar00coreycorey00000000000000--- features: - Added a module driver for New Relics licenses. This allows activation of any New Relic software that is installed on the image. Bug 1571711 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/alter-user-portable-021f4b792e2c129b.yaml0000644000175000017500000000012700000000000026673 0ustar00coreycorey00000000000000--- fixes: - Use SET PASSWORD and RENAME USER queries to update user properties. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/associate-volume-type-datastore-97defb9279b61c1f.yaml0000644000175000017500000000101700000000000031412 0ustar00coreycorey00000000000000--- features: - Added the ability to associate datastore versions with volume types. This enables operators to limit the volume types available when launching datastores. The associations are set via the trove-manage tool commands datastore_version_volume_type_add, datastore_version_volume_type_delete, and datastore_version_volume_type_list. If a user attempts to create an instance with a volume type that is not on the approved list for the specified datastore version they will receive an error. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=trove-12.1.0.dev92/releasenotes/notes/avoid-diverged-slave-when-migrating-mariadb-master-37e2429a1ea75913.yaml 22 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/avoid-diverged-slave-when-migrating-mariadb-master-37e2429a1ea0000644000175000017500000000113700000000000033167 0ustar00coreycorey00000000000000--- fixes: - | MariaDB allows an server to be a master and a slave simutaneously, so when migrating masters, if the old master is reactivated before attaching the other replicas to the new master, new unexpected GTIDs may be created on the old master and synced to some of the other replicas by chance, as the other replicas are still connecting to the old one by the time. After that these diverged slave will fail changing to the new master. This will be fixed by first attaching the other replicas to the new master, and then dealing with old master. Fixes #1754539 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cassandra-backup-and-restore-00de234de67ea5ee.yaml0000644000175000017500000000015200000000000030647 0ustar00coreycorey00000000000000--- features: - Support has been added for Cassandra backup and restore using the Nodetool utility. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cassandra-configuration-groups-e6bcf4014a79f14f.yaml0000644000175000017500000000026600000000000031277 0ustar00coreycorey00000000000000--- features: - Implement configuration groups for Cassandra 2.1. You can now manage configuration of Cassandra datastores using the Trove configuration groups capability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cassandra-user-functions-041abfa4f4baa591.yaml0000644000175000017500000000735200000000000030141 0ustar00coreycorey00000000000000--- features: - This patch set implements the following functionality for Cassandra datastore. create/delete/get user list users change password grant/revoke/list access update attributes create/delete database list databases Notes on Cassandra users In Cassandra only SUPERUSERS can create other users and grant permissions to database resources. Trove uses the 'os_admin' superuser to perform its administrative tasks. It proactively removes the built-in 'cassandra' superuser on prepare. The users it creates are all 'normal' (NOSUPERUSER) accounts. The permissions it can grant are also limited to non-superuser operations. This is to prevent anybody from creating a new superuser via the Trove API. Updatable attributes include username and password. The configuration template had to be updated to enable authentication and authorization support (original configuration allowed anonymous connections). Default implementations used are authenticator org.apache.cassandra.auth.PasswordAuthenticator authorizer org.apache.cassandra.auth.CassandraAuthorizer The superuser password is set to a random Trove password which is then stored in a Trove-read-only file in '~/.cassandra/cqlshrc' which is also the default location for client settings. Notes on Cassandra keyspaces Cassandra stores replicas on multiple nodes to ensure reliability and fault tolerance. All replicas are equally important; there is no primary or master. A replication strategy determines the nodes where replicas are placed. The total number of replicas across the cluster is referred to as the replication factor. The above 'create database' implementation uses 'SimpleStrategy' with just a single replica on the guest machine. This is a very simplistic configuration only good for the most basic applications and demonstration purposes. SimpleStrategy is for a single data center only. The following system keyspaces have been included in the default 'ignore_dbs' configuration list and therefore excluded from all database operations 'system', 'system_auth', 'system_traces' Notes on user rename Cassandra does not have a native way for renaming users. The reason why Cassandra itself does not implement rename is apparently just lack of demand for that feature. We implement it by creating a new user, transferring permissions and dropping the old one (which also removes its existing permissions). I asked about the sanity of this rename approach on the Cassandra mailing list and IRC channel and there should not be anything inherently wrong with the proposed procedure. This method, however, requires the user to always provide a password. Additional notes Trove uses the official open-source Python driver for Cassandra to connect to the database and execute queries. The connection is implemented in CassandraConnection. It is now also used to obtain the current database status as opposed to the original method of parsing output of the client tool. The 'common/operating_system' module was extended with two new functions for reading/writing ini-style and YAML configuration files to/from Python dicts. Unit tests were added to 'guestagent/test_operating_system'. The existing Manager unit tests were extended to include the added functionality. Also includes some minor improvements to comments and log messages. Used the existing operating_system interface to update file ownership. The system module was removed and its contents moved to the Application class. This is to reduce the number of files and help facilitate overriding. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cluster-configuration-groups-37f7de9e5a343165.yaml0000644000175000017500000000012700000000000030666 0ustar00coreycorey00000000000000features: - Support attaching and detaching of configuration groups on clusters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cluster-notifications-fd205f5f0148b052.yaml0000644000175000017500000000055600000000000027337 0ustar00coreycorey00000000000000--- features: - Adds new fields "instance_ids", which is supposed to contain ids of cluster instances, in payloads of two cluster events - DBaaSClusterShrink and DBaaSClusterGrow. Moreover, additional end notifications after growing and shrinking cluster have been added. It allows better integration with tools for monitoring resources usage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cluster-volume-type-901329a3b3667cb4.yaml0000644000175000017500000000012300000000000026667 0ustar00coreycorey00000000000000--- fixes: - Allow specification of volume-type on cluster create. (Bug 1623005) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cluster_list_show_all_ips-3547635440.yaml0000644000175000017500000000022500000000000027220 0ustar00coreycorey00000000000000--- fixes: - The payload for cluster GET now returns ips for all networks, not just the first one found for each instance. Bug 1642695 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/cluster_restart-bb5abb7372131ee0.yaml0000644000175000017500000000007100000000000026354 0ustar00coreycorey00000000000000--- features: - | Add support for cluster restart. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/couchdb-backup-restore-0cc3324c3088f947.yaml0000644000175000017500000000011100000000000027257 0ustar00coreycorey00000000000000--- features: - Support has been added for CouchDB Backup and Restore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/couchdb-user-db-functions-fa41ac47fce095cb.yaml0000644000175000017500000000012200000000000030254 0ustar00coreycorey00000000000000--- features: - Support has been added for CouchDB database and user functions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/datastore-manager-refactor-5aeac4e6bfa6e07b.yaml0000644000175000017500000000007200000000000030564 0ustar00coreycorey00000000000000--- other: - Refactor the datastore guest manager code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/db2-backup-restore-96ab214cddd15181.yaml0000644000175000017500000000011700000000000026453 0ustar00coreycorey00000000000000--- features: - Support has been added for DB2 Express-C Backup and Restore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/db2-configuration-groups-ca2164be741d35f9.yaml0000644000175000017500000000013000000000000027713 0ustar00coreycorey00000000000000--- features: - Add support for configuration group management for DB2 Express-C. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/db2-online-backup-restore-3783afe752562e70.yaml0000644000175000017500000000117200000000000027617 0ustar00coreycorey00000000000000--- features: - Add support for full online backup and restore for DB2 Express-C by enabling archive logging. other: - In Mitaka release, support was added for full offline backup and restore using the default circular logging. In this release, the name of the strategy for offline backup and restore was changed from DB2Backup to DB2OfflineBackup. Hence, to enable offline backups, we should set backup_strategy=DB2OfflineBackup and for online backups, backup_strategy=DB2OnlineBackup. The property backup_namespace and restore_namespace will be the same for both types of backup and restore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/dbaas-ceilometer-notifications-5a623d0d6520be72.yaml0000644000175000017500000000024700000000000031051 0ustar00coreycorey00000000000000--- features: - Additional Ceilometer notifications have been provided by Trove including create, end, error notifications for all state-changing API calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/deprecate-default_neutron_networks-84cd00224d6b7bc1.yaml0000644000175000017500000000024700000000000032144 0ustar00coreycorey00000000000000--- deprecations: - The config option ``default_neutron_networks`` is deprecated and will be removed in the future release, use ``management_networks`` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/deprecate-long-query-time-b85af24772e2e7cb.yaml0000644000175000017500000000060700000000000030150 0ustar00coreycorey00000000000000--- deprecations: - Make 'long query time' manageable via configuration groups (see bug 1542485). Deprecate the global 'guest_log_long_query_time' option in preference of datastore-specific configurations. MySQL long_query_time Percona long_query_time Percona XtraDB Cluster long_query_time MariaDB long_query_time PostgreSQL log_min_duration_statement ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/disply_module_bools_properly-571cca9a87f28339.yaml0000644000175000017500000000016000000000000031025 0ustar00coreycorey00000000000000--- fixes: - | Module list/show now returns boolean values as True/False instead of 1/0. Bug 1656398 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/drop-py-2-7-010fe6df0c10352d.yaml0000644000175000017500000000030300000000000024740 0ustar00coreycorey00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of Trove to support py2.7 is OpenStack Train. The minimum version of Python now supported by Trove is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/drop-python-26-support-39dff0c5636edc74.yaml0000644000175000017500000000006600000000000027407 0ustar00coreycorey00000000000000--- deprecations: - Dropping support for python 2.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-apply-configuration-on-prepare-4cff827b7f3c4d33.yaml0000644000175000017500000000071500000000000032007 0ustar00coreycorey00000000000000--- fixes: - If given, apply the configuration overrides in prepare, just before creating initial users and/or databases. Failure to apply the given configuration should flip the instance into a failed state. Default implementation saves the overrides and restarts the database service to apply the changes. Datastores that do not require restart may potentially override the base implementation in 'apply_overrides_on_prepare()'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-bad-swift-endpoint-in-guestlog-05f7483509dacbbf.yaml0000644000175000017500000000043400000000000031670 0ustar00coreycorey00000000000000--- fixes: - The guest log code raises a non-serializable exception if the given Swift endpoint is invalid. This causes an ambiguous "Circular reference detected" error on the guest, and a timeout on the caller. This case is now caught and the correct exception raised. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-cluster-show-346798b3e3.yaml0000644000175000017500000000015700000000000025341 0ustar00coreycorey00000000000000--- fixes: - Fix race condition in cluster-show that returned erroneous not found error. Bug 1643002 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-cluster-type-error-71cd846897dfd32e.yaml0000644000175000017500000000062700000000000027472 0ustar00coreycorey00000000000000--- fixes: - This would apply to any type of cluster that uses the galera strategy while setting the nics on a create call. When we called cast to set() the object was a list of lists. The set method can not has a list so this was causesing a unhashable error. The change is to make the instance_nics a list of strings (what we originally expected) to resolve this issue. Bug 1570602. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-deprecated-SafeConfigParse-ca3fd3e9f52a8cc8.yaml0000644000175000017500000000030300000000000031174 0ustar00coreycorey00000000000000 --- fixes: - Since SafeConfigParser is deprecated in Python version 3.2, Trove uses ConfigParser for versions of Python >= 3.2 and SafeConfigParser for earlier versions. (Bug 1618666) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-galera_common-cluster-shrink-e2c80913423772dd.yaml0000644000175000017500000000031600000000000031273 0ustar00coreycorey00000000000000--- fixes: - | Fixes an issue in galera_common shrink that wrong load removed nodes which could missing a ClusterShrinkMustNotLeaveClusterEmpty exception or meet a NotFound error. Bug 1699953 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-gtid-parsing-9f60ad6e9e8f173f.yaml0000644000175000017500000000020700000000000026346 0ustar00coreycorey00000000000000--- fixes: - Fixed parsing of GTID references containing a list of GTIDs from xtrabackup_binlog_info file on MySql replicas. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-module-apply-after-remove-97c84c30fb320a46.yaml0000644000175000017500000000022200000000000030570 0ustar00coreycorey00000000000000--- fixes: - Fixed issue where module-apply after module-remove caused module-query to skip reporting on that module. Bug 1571799 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-mongo-cluster-grow-8fa4788af0ce5309.yaml0000644000175000017500000000025600000000000027444 0ustar00coreycorey00000000000000--- fixes: - Fixes bug 1526024, a failure in growing a mongodb cluster because of a problem in the way in which passwords were synchronized with new query routers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-mysql-replication-bf2b131994a5a772.yaml0000644000175000017500000000030300000000000027243 0ustar00coreycorey00000000000000--- fixes: - Fixes an issue with a failure to establish a new replica for MySQL in some cases where a replica already exists and some data has been inserted into the master. Bug 1563574././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-mysql-replication-ca0928069c0bfab8.yaml0000644000175000017500000000017600000000000027411 0ustar00coreycorey00000000000000--- fixes: - Fixed default configuration template for MySQL to ensure that replication uses binlog_format. Bug 1563541. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-postgres-pg-rewind-6eef0afb568439ce.yaml0000644000175000017500000000006100000000000027564 0ustar00coreycorey00000000000000fixes: - Fix Postgresql promote (bug 1633515). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-redis-configuration-f0543ede84f8aac3.yaml0000644000175000017500000000022000000000000027764 0ustar00coreycorey00000000000000--- fixes: - Fixes an issue with redis configuration,it use a wrong min value for repl-backlog-size in validation rules. Bug 1697596 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix-trove-events-8ce54233504065cf.yaml0000644000175000017500000000012600000000000026153 0ustar00coreycorey00000000000000--- fixes: - Generate trove events for the current period, and not a future period. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix_mod_inst_cmd-3a46c7233e3.yaml0000644000175000017500000000051000000000000025542 0ustar00coreycorey00000000000000--- fixes: - The module-instances command now returns a paginated list of instances. A --count_only flag was added to the command to return a summary of the applied instances based on the MD5 of the module (this is most useful for live_update modules, to see which ones haven't been updated). Bug 1554900 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix_module_apply-042fc6e61f721540.yaml0000644000175000017500000000030300000000000026257 0ustar00coreycorey00000000000000--- fixes: - Case where a new instance_modules record is written for each apply has been fixed. This issue would have potentially made it impossible to delete a module. Bug 1640010 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix_module_driver_logging-666601f411db784a.yaml0000644000175000017500000000012100000000000030136 0ustar00coreycorey00000000000000--- fixes: - An invalid module driver is now logged correctly. Bug 1579900 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fix_notification_err_msgs-e52771108633c9cf.yaml0000644000175000017500000000015400000000000030173 0ustar00coreycorey00000000000000--- fixes: - Fixed wrong call in conductor when reporting a guest notification exception. Bug 1577848 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/fixes-mariadb-config-groups-b5fa4f44a8ed7b85.yaml0000644000175000017500000000044500000000000030537 0ustar00coreycorey00000000000000--- fixes: - MariaDB historically leveraged the mysql manager for guest support including the configuration groups implementation. With MariaDB now having its own manager class that inherits from Mysql, it needs to have validation_rules and a ConfigParser setup. Bug 1532256 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/flavor-list-disk-6213c3760e374441.yaml0000644000175000017500000000007300000000000025756 0ustar00coreycorey00000000000000--- other: - Add disk column in flavor-list Bug 1617987. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/flavor-list-ephemeral-edf2dc35d5c247b3.yaml0000644000175000017500000000010200000000000027422 0ustar00coreycorey00000000000000--- other: - Add ephemeral column in flavor-list (Bug 1617980) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/flavor-list-vcpu-817b0f5715820377.yaml0000644000175000017500000000007400000000000026014 0ustar00coreycorey00000000000000--- other: - Add vCPUs column in flavor-list Bug 1261876. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/force_delete-c2b06dbead554726.yaml0000644000175000017500000000037100000000000025564 0ustar00coreycorey00000000000000features: - The reset-status command will set the task and status of an instance to ERROR after which it can be deleted. - The force-delete command will allow the deletion of an instance even if the instance is stuck in BUILD state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/grow-cluster-nic-az-0e0fe4083666c300.yaml0000644000175000017500000000017300000000000026535 0ustar00coreycorey00000000000000--- fixes: - Pass instance nic and az to cluster grow. Add specific Fix for mongodb to use the instance nic and az. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/guest-call-timeout-2781a57ca8feb89a.yaml0000644000175000017500000000031000000000000026701 0ustar00coreycorey00000000000000fixes: - Increased agent_call_high_timeout config setting to 10 minutes. This configures the length of time that the taskmanager will wait for an asynchronous guest agent call to complete. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/implement-cassandra-clustering-9f7bc3ae6817c19e.yaml0000644000175000017500000000024200000000000031267 0ustar00coreycorey00000000000000--- features: - OpenStack Trove now supports clustering for Cassandra datastores. You can access clustering capabilities through the Trove cluster API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/implement-cassandra-root-b0870d23dbf1a848.yaml0000644000175000017500000000021400000000000027772 0ustar00coreycorey00000000000000--- features: - OpenStack Trove now supports superuser access for the Cassandra datastore via the root-enable and root-disable API's. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/implement-mariadb-clustering-088ac2f6012689fb.yaml0000644000175000017500000000024000000000000030555 0ustar00coreycorey00000000000000--- features: - OpenStack Trove now supports clustering for MariaDB datastores. You can access clustering capabilities through the Trove cluster API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/implement-redis-root-347b5ee0107debb5.yaml0000644000175000017500000000022400000000000027217 0ustar00coreycorey00000000000000--- features: - OpenStack Trove now supports enable or disable authentication for Redis datastore via the root-enable and root-disable API's. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/improve-mysql-user-list-pagination-71457d934500f817.yaml0000644000175000017500000000017100000000000031477 0ustar00coreycorey00000000000000--- fixes: - Filter ignored users in the original query before the result gets paginated (like in list_databases). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/incremental_backup-1910ded0fc3474a3.yaml0000644000175000017500000000035600000000000026712 0ustar00coreycorey00000000000000features: - The --incremental flag for backup-create will add the ability to create incremental backup based on last full or incremental backup. If no full or incremental backup exists a new full backup will be created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/instance-show-comp-vol-id-964db9f52a5ac9c1.yaml0000644000175000017500000000017300000000000030066 0ustar00coreycorey00000000000000--- other: - Add Compute ID (server_id) and Volume ID (volume_id) to trove show output for admin users. Bug #1633581 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/instance-upgrade-7d464f85e025d729.yaml0000644000175000017500000000026100000000000026177 0ustar00coreycorey00000000000000features: - New instance upgrade API supports upgrading an instance of a datastore to a new datastore version. Includes implementation for MySQL family of databases. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/locality-support-for-clusters-78bb74145d867df2.yaml0000644000175000017500000000035600000000000031012 0ustar00coreycorey00000000000000--- features: - A locality flag was added to the trove ReST API to allow a user to specify whether the instances of a cluster should be on the same hypervisor (affinity) or on different hypervisors (anti-affinity). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/locality-support-for-replication-01d9b05d27b92d82.yaml0000644000175000017500000000033700000000000031436 0ustar00coreycorey00000000000000--- features: - A locality flag was added to the trove ReST API to allow a user to specify whether new replicas should be on the same hypervisor (affinity) or on different hypervisors (anti-affinity). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/make-password-length-datastore-specific-7cdb1bfeab6e6227.yaml0000644000175000017500000000012600000000000033110 0ustar00coreycorey00000000000000--- fixes: - Make 'default_password_length' per-datastore-property. Bug 1572230 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mariadb-gtid-replication-1ea972bcfe909773.yaml0000644000175000017500000000052300000000000027740 0ustar00coreycorey00000000000000--- features: - Implements replication based on GTIDs for MariaDB. Adds GTID replication strategy for MariaDB. Implements MariaDB specific GTID handling in guestagent. Configures MariaDB config template to support bin logging. Adds MariaDB helper overrides to eliminate configuration group tests from scenario tests. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mask-configuration-passwords-317ff6d2415b2ca1.yaml0000644000175000017500000000010000000000000030671 0ustar00coreycorey00000000000000--- security: - Configuration show masks any password values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/module-management-66d3979cc45ed440.yaml0000644000175000017500000000062300000000000026425 0ustar00coreycorey00000000000000--- features: - A new feature called 'module management' has been added to Trove. Users can now create, update, list and delete modules. A module is a file that is provided to Trove, and when a database instance is launched, that file is deposited on the guest instance. This feature can be used for depositing files like, for example, license files onto guest database instances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/module-ordering-92b6445a8ac3a3bf.yaml0000644000175000017500000000044400000000000026243 0ustar00coreycorey00000000000000--- features: - Modules can now be applied in a consistent order, based on the new 'priority_apply' and 'apply_order' attributes when creating them. Blueprint module-management-ordering upgrade: - For module ordering to work, db_upgrade must be run on the Trove database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/module-support-for-clusters-87b41dd7648275bf.yaml0000644000175000017500000000022200000000000030447 0ustar00coreycorey00000000000000--- features: - Support was added for applying modules to cluster instances through cluster-create and cluster-grow. Bug 1578917 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/module_reapply-342c0965a4318d4e.yaml0000644000175000017500000000026700000000000025755 0ustar00coreycorey00000000000000--- features: - | Support for the new 'reapply' command. This allows a given module to be reapplied to all instances that it had previously been applied to. Bug 1554903 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/module_reapply_update_values-1fb88dc58701368d.yaml0000644000175000017500000000037400000000000030771 0ustar00coreycorey00000000000000--- fixes: - Applying a module again will now relect the update name, type, datastore and datastore_version values. Bug 1611525 - Updating a module with all_datastores and all_datastore_versions now works correctly. Bug 1612430 ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=trove-12.1.0.dev92/releasenotes/notes/mongo-cluster-create-use-extended-perperties-ced87fde31c6c110.yaml 22 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mongo-cluster-create-use-extended-perperties-ced87fde31c6c110.0000644000175000017500000000050400000000000033150 0ustar00coreycorey00000000000000--- features: - | User can specify the number and volume of mongos/configserver with extended_properties argument when creating mongodb cluster. Currently, the supported parameters are, num_configsvr, num_mongos, configsvr_volume_size, configsvr_volume_type, mongos_volume_size and mongos_volume_type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mongo-cluster-grow-use-az-and-nic-values-207b041113e7b4fb.yaml0000644000175000017500000000025000000000000032545 0ustar00coreycorey00000000000000--- fixes: - Mongo cluster grow operations were not creating instances with the provided az and nic values. These should be used if the caller provided them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mountpoint-detection-096734f0097eb75a.yaml0000644000175000017500000000023400000000000027127 0ustar00coreycorey00000000000000--- fixes: - Improved mountpoint detection by running it as root. This prevents guests that have undiscoverable mount points from failing to unmount. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/multi-region-cd8da560bfe00de5.yaml0000644000175000017500000000017300000000000025717 0ustar00coreycorey00000000000000features: - Adds a region property to the instance model and table. This is the first step in multi-region support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mysql-config-preserve-types-77b970162bf6df08.yaml0000644000175000017500000000030400000000000030422 0ustar00coreycorey00000000000000--- fixes: - Fix IniCodec to deserialize Python objects. This also brings it in line with other codecs. guestagent_utils.to_bytes return the byte values as ints. See bug 1599656 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mysql-root-fix-35079552e25170ca.yaml0000644000175000017500000000020300000000000025552 0ustar00coreycorey00000000000000--- fixes: - Do not remove MySQL root user on root-disable so that the proper status can be reported on restore. Bug 1549600 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/mysql-user-list-pagination-9496c401c180f605.yaml0000644000175000017500000000032000000000000030066 0ustar00coreycorey00000000000000--- fixes: - Fix bug 1537986 which corrects the pagination in the mysql user list command. When internal users (ignore_users) are eliminated from the list, the pagination was not correctly handled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/percona-2.3-support-2eab8f12167e44bc.yaml0000644000175000017500000000047400000000000026612 0ustar00coreycorey00000000000000--- features: - Support has been added for Percona XtraBackup version 2.3. fixes: - Fixes bug 1558794. The 2.3 version of Percona XtraBackup performs some additional validations of the command line options passed to innobackupex. The Trove code now complies with the new validations being performed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/persist-error-message-fb69ddf885bcde84.yaml0000644000175000017500000000021000000000000027570 0ustar00coreycorey00000000000000--- features: - Errors that occur in Trove are now persisted in the database and are returned in the standard 'show' command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/pgsql-incremental-backup-acb4421f7de3ac09.yaml0000644000175000017500000000017500000000000030113 0ustar00coreycorey00000000000000--- features: - Full and incremental backup and restore strategy for postgres based on pg_basebackup and WAL shipping. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/pgsql-streaming-replication-f4df7e4047988b21.yaml0000644000175000017500000000021100000000000030444 0ustar00coreycorey00000000000000--- features: - Support for standard WAL based streaming replication for postgresql guests. Sets up read-only hot standby servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/post-upgrade-fixes-828811607826d433.yaml0000644000175000017500000000024000000000000026242 0ustar00coreycorey00000000000000--- fixes: - After upgrading the guestagent was in an inconsistent state. This became apparent after restarting or resizing the instance after upgrading. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/postgres-user-list-race-46624dc9e4420e02.yaml0000644000175000017500000000013100000000000027423 0ustar00coreycorey00000000000000--- fixes: - Close the race condition window in user-list call. Closes-Bug 1617464 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/postgresql-use-proper-guestagent-models-7ba601c7b4c001d6.yaml0000644000175000017500000000023000000000000032772 0ustar00coreycorey00000000000000--- fixes: - Implement Postgres guestagent models for databases and users. - Implement RootController extension for the Postgres datastore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/pxc-cluster-root-enable-30c366e3b5bcda51.yaml0000644000175000017500000000010300000000000027605 0ustar00coreycorey00000000000000--- features: - Adding the ability to root enable a pxc cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/pxc-grow-shrink-0b1ee689cbc77743.yaml0000644000175000017500000000114600000000000026145 0ustar00coreycorey00000000000000--- features: - The adds support for pxc to grow a cluster. * api and taskmanager support for shrinking a cluster * validate that the networks given are the same for each instance in the cluster. * make sure to add the existing networks on an instance in the cluster. * add new Error task for grow and shrink. * nova client version configuration changed to a string option rather than an int option because the nova microversions change nova api output. This was needed for the network interfaces on existing instances. * testing for grow and shrink cluster ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/quota-management-3792cbc25ebe16bb.yaml0000644000175000017500000000021200000000000026462 0ustar00coreycorey00000000000000features: - New quota management APIs for reviewing and changing the quota for a particular tenant. Requires admin privileges. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/redis-upgrade-63769ddb1b546cb9.yaml0000644000175000017500000000014000000000000025630 0ustar00coreycorey00000000000000--- features: - Support for upgrading Redis instance. - Support for upgrading Redis cluster.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/remove-override-templates-85429da7f66e006a.yaml0000644000175000017500000000011700000000000030124 0ustar00coreycorey00000000000000--- fixes: - Remove unused 'override.config.template' files. Bug 1575852 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/remove-support-of-use-nova-server-volume-2a334f57d8213810.yaml0000644000175000017500000000034200000000000032617 0ustar00coreycorey00000000000000--- fixes: - | Remove support of creating volume from Nova. The former configuration "use_nova_server_volume" is not used any more, for creating volumes, cinderclient will be always used. Fixes bug #1673408. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/return-http-204-for-disable-root-api-a818fc41fd6e75eb.yaml0000644000175000017500000000025100000000000031753 0ustar00coreycorey00000000000000--- fixes: - | Peviously root disable API returns a HTTP 200 response without any content, a HTTP 204 response which is more appropriate will be returned now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/reuse-cassandra-connections-092cf2a762a2e796.yaml0000644000175000017500000000015100000000000030412 0ustar00coreycorey00000000000000--- fixes: - Make guestagent reuse Cassandra connections to eliminate resource leaks. Bug 1566946. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/secure-mongodb-instances-1e6d7df3febab8f4.yaml0000644000175000017500000000035300000000000030276 0ustar00coreycorey00000000000000--- security: - Fixes bug 1507841, provides a configuration setting to enable Role Based Access Control (RBAC) for MongoDB clusters. If mongodb.cluster_secure is set to False (default is True) then RBAC will be disabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/slo-backups-3c35135316f837e1.yaml0000644000175000017500000000057300000000000025100 0ustar00coreycorey00000000000000--- fixes: - Backups to Swift will now use Static Large Objects for larger backups. A new configuration option 'backup_segment_max_size' can be set to adjust the segment size of the SLO. Backups that are smaller than the segment size will be uploaded as regular objects. This is an improvement over old Dynamic Large Object implementation. Bug 1489997. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/support-nova-keypair-a2cdb2da5c1511e9.yaml0000644000175000017500000000146700000000000027341 0ustar00coreycorey00000000000000features: - Added a new config option ``nova_keypair`` to specify an existing Nova keypair name for the database instance creation, the cloud administrator is responsible for the keypair management and configuration. It's recommended to create Trove database instance in the admin project for security reasons, so only the cloud administrator who has the private key can access the database instance. With the keypair support, ssh keys are no longer injected into Trove guest agent image at build time. upgrade: - Cloud administrator needs to create a Nova keypair and specify the keypair name for config option ``nova_keypair``, the private key is used to ssh into new database instances created. The previous private key is also needed to ssh into the existing database instances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/train-01-backup-filtering-90ff6deac7b411e9.yaml0000644000175000017500000000030400000000000030012 0ustar00coreycorey00000000000000--- features: - Support to filter backups by ``instance_id``, additionally, admin user can get backups of all the projects by specifying ``all_projects`` in the query string parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/train-02-management-security-group.yaml0000644000175000017500000000100500000000000027315 0ustar00coreycorey00000000000000--- features: - The cloud admin is able to apply a security group to management port(with purpose of communicating with control plane and other management tasks) of the Trove instance, by setting the ``management_security_groups`` config option. The cloud admin is responsible for managing the security group rules. The security group and its rules need to be created before deploying Trove. upgrade: - The management security group won't affect the Trove instances created before upgrade.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/train-03-public-trove-instance-8ec456bed46411e9.yaml0000644000175000017500000000043700000000000030655 0ustar00coreycorey00000000000000--- features: - Users can create ``public`` trove instance that has a floating IP attached but have the ability to define what CIDRs could access the user's database service. Refer to the `API doc `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/train-04-public-trove-images-127300c0df6c11e9.yaml0000644000175000017500000000022000000000000030200 0ustar00coreycorey00000000000000--- features: - Trove now publishes images of some specific databases on http://tarballs.openstack.org/trove/images/ for testing purpose. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/trove-status-upgrade-check-framework-b9d3d3e2463ec26d.yaml0000644000175000017500000000071600000000000032331 0ustar00coreycorey00000000000000--- prelude: > Added new tool ``trove-status upgrade check``. features: - | New framework for ``trove-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Trove upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``trove-status upgrade check`` to check if Trove deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/update-myisam-recover-opt-232b9d680bc362bf.yaml0000644000175000017500000000016700000000000030113 0ustar00coreycorey00000000000000--- fixes: - Replace the deprecated 'myisam-recover' option with its newer counterpart 'myisam-recover-options'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/use-oslo-policy-bbd1b911e6487c36.yaml0000644000175000017500000000035200000000000026134 0ustar00coreycorey00000000000000--- features: - Add RBAC (role-based access control) enforcement on all trove APIs. Allows to define a role-based access rule for every trove API call (rule definitions are available in /etc/trove/policy.json). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/use-osprofiler-options-58263c311617b127.yaml0000644000175000017500000000021700000000000027232 0ustar00coreycorey00000000000000--- other: - Starting with 1.0.0 osprofiler release config options needed for its workability are consolidated inside osprofiler itself. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/releasenotes/notes/ussuri-add-service-status-updated.yaml0000644000175000017500000000030300000000000027333 0ustar00coreycorey00000000000000--- features: - A new field named ``service_status_updated`` is added to the instance API response which e.g. could be used to validate if the instance 'HEALTHY' status is stale or not ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/releasenotes/notes/ussuri-admin-clients-a14514a835ae11ea.yaml0000644000175000017500000000051100000000000027124 0ustar00coreycorey00000000000000--- upgrade: - | Trove is now using admin clients by default to communicate with Nova, Cinder, Neutron and Glance. Deployers want to stick to the old clients need to explicitly config the following options: * remote_nova_client * remote_cinder_client * remote_neutron_client * remote_glance_client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/releasenotes/notes/ussuri-database-instance-healthy.yaml0000644000175000017500000000046200000000000027210 0ustar00coreycorey00000000000000--- features: - A new database service status ``HEALTHY`` is introduced to indicate that the service is responsive. ``HEALTHY`` is the final status after ``ACTIVE``. upgrade: - Any existing scripts that rely on the database instance ``ACTIVE`` status should now rely on ``HEALTHY`` status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/releasenotes/notes/ussuri-delete-datastoredad784e2345711ea.yaml0000644000175000017500000000014700000000000027572 0ustar00coreycorey00000000000000--- features: - Admin user can delete datastore if there are no instances or backups associated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/releasenotes/notes/ussuri-service-credential-config.yaml0000644000175000017500000000106700000000000027223 0ustar00coreycorey00000000000000--- deprecations: - | The following config options are deprecated in favor of a separate configuration section ``service_credentials`` introduced to define the Trove service user credentials for communication with other OpenStack services. .. code-block:: ini [DEFAULT] trove_auth_url os_region_name nova_proxy_admin_user nova_proxy_admin_pass nova_proxy_admin_tenant_id nova_proxy_admin_tenant_name nova_proxy_admin_user_domain_name nova_proxy_admin_project_domain_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/releasenotes/notes/ussuri-support-xfs-disk-format.yaml0000644000175000017500000000022000000000000026726 0ustar00coreycorey00000000000000--- features: - Add XFS disk format for database data volume, cloud admin can config 'ext3', 'ext4' or 'xfs' in ``volume_fstype`` option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/vertica-configuration-groups-710c892c1e3d6a90.yaml0000644000175000017500000000012600000000000030625 0ustar00coreycorey00000000000000--- features: - Implemented configuration groups capability for Vertica datastores. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/vertica-grow-shrink-cluster-e32d48f5b2e1bfab.yaml0000644000175000017500000000032000000000000030662 0ustar00coreycorey00000000000000--- features: - Implemented grow and shrink for clusters of Vertica datastore. The number of nodes in the cluster must be greater than the number required to satisfy the min_ksafety configuration setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/notes/vertica-load-via-curl-call-4d47c4e0b1b53471.yaml0000644000175000017500000000070200000000000030003 0ustar00coreycorey00000000000000--- features: - Vertica comes with a User Defined Load function that takes a URL as a load source. This can be used to load files that are stored in Swift. As this is a common use case, it is valuable to enable this by default. This can be done in the post-prepare method for Vertica. A new UDL_LIBS list has been added that describes any UDLs to be loaded into the database. This change only has one entry - the curl function. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/releasenotes/source/0000755000175000017500000000000000000000000020140 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/releasenotes/source/_static/0000755000175000017500000000000000000000000021566 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/_static/.placeholder0000644000175000017500000000000000000000000024037 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/releasenotes/source/_templates/0000755000175000017500000000000000000000000022275 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/_templates/.placeholder0000644000175000017500000000000000000000000024546 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/conf.py0000644000175000017500000002156100000000000021444 0ustar00coreycorey00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Trove Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/trove' bug_project = 'trove' bug_tag = '' html_last_updated_fmt = '%Y-%m-%d %H:%M' html_theme = 'openstackdocs' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Trove Release Notes' copyright = u'2015, Trove Developers' # Release notes are version independent. # The short X.Y version. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'TroveReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'TroveReleaseNotes.tex', u'Trove Release Notes Documentation', u'Trove Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'trovereleasenotes', u'Trove Release Notes Documentation', [u'Trove Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'TroveReleaseNotes', u'Trove Release Notes Documentation', u'Trove Developers', 'TroveReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/index.rst0000644000175000017500000000027200000000000022002 0ustar00coreycorey00000000000000====================== Trove Release Notes ====================== .. toctree:: :maxdepth: 1 unreleased train stein rocky queens pike ocata newton mitaka ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6561086 trove-12.1.0.dev92/releasenotes/source/locale/0000755000175000017500000000000000000000000021377 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6561086 trove-12.1.0.dev92/releasenotes/source/locale/fr/0000755000175000017500000000000000000000000022006 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/releasenotes/source/locale/fr/LC_MESSAGES/0000755000175000017500000000000000000000000023573 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000644000175000017500000000450100000000000026624 0ustar00coreycorey00000000000000# Corinne Verheyde , 2016. #zanata # Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Trove Release Notes 8.0.0\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-08-03 04:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 06:43+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "5.0.0" msgstr "5.0.0" msgid "5.0.1" msgstr "5.0.1" msgid "5.1.0" msgstr "5.1.0" msgid "5.1.1" msgstr "5.1.1" msgid "6.0.0" msgstr "6.0.0" msgid "" "A locality flag was added to the trove ReST API to allow a user to specify " "whether new replicas should be on the same hypervisor (affinity) or on " "different hypervisors (anti-affinity)." msgstr "" "Ajout d'un flag de localisation aux API ReST trove pour permettre à " "l'utilisateur de spécifier si les nouveaux replicas doivent être sur le même " "hyperviseur (affinité) ou sur des hyperviseurs différents (ante-affinité)." msgid "An invalid module driver is now logged correctly. Bug 1579900" msgstr "" "Un driver de module invalide est maintenant logué correctement. Bug 1579900" msgid "Bug Fixes" msgstr "Résolutions de Bugs" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Dropping support for python 2.6" msgstr "Suppression du support de python 2.6" msgid "" "Fixes an issue with a failure to establish a new replica for MySQL in some " "cases where a replica already exists and some data has been inserted into " "the master. Bug 1563574" msgstr "" "Résout un problème d'échec de mise en place d'un nouveau replica pour MySQL " "dans certains cas où un replica existe déjà et des données ont été insérées " "sur le master. Bug 1563574" msgid "Mitaka Series Release Notes" msgstr "Note de release pour Mitaka" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Other Notes" msgstr "Autres notes" msgid "Security Issues" msgstr "Problèmes de sécurités" msgid "Trove Release Notes" msgstr "Note de release pour Trove" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6561086 trove-12.1.0.dev92/releasenotes/source/locale/ja/0000755000175000017500000000000000000000000021771 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/releasenotes/source/locale/ja/LC_MESSAGES/0000755000175000017500000000000000000000000023556 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000644000175000017500000000167600000000000026621 0ustar00coreycorey00000000000000# Akihiro Motoki , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Trove Release Notes 8.0.0\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-08-03 04:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-11 07:34+0000\n" "Last-Translator: Akihiro Motoki \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "Bug Fixes" msgstr "バグ修正" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "Mitaka Series Release Notes" msgstr "Mitaka バージョンのリリースノート" msgid "New Features" msgstr "新機能" msgid "Other Notes" msgstr "その他の注意点" msgid "Security Issues" msgstr "セキュリティー上の問題" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/mitaka.rst0000644000175000017500000000023200000000000022135 0ustar00coreycorey00000000000000=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/newton.rst0000644000175000017500000000023200000000000022201 0ustar00coreycorey00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/ocata.rst0000644000175000017500000000023000000000000021754 0ustar00coreycorey00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/pike.rst0000644000175000017500000000021700000000000021622 0ustar00coreycorey00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/queens.rst0000644000175000017500000000022300000000000022167 0ustar00coreycorey00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/rocky.rst0000644000175000017500000000022100000000000022014 0ustar00coreycorey00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/stein.rst0000644000175000017500000000022100000000000022007 0ustar00coreycorey00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/train.rst0000644000175000017500000000017600000000000022013 0ustar00coreycorey00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/releasenotes/source/unreleased.rst0000644000175000017500000000016000000000000023016 0ustar00coreycorey00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/requirements.txt0000644000175000017500000000340200000000000017432 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT keystonemiddleware>=4.17.0 # Apache-2.0 Routes>=2.3.1 # MIT WebOb>=1.7.1 # MIT PasteDeploy>=1.5.0 # MIT Paste>=2.0.2 # MIT sqlalchemy-migrate>=0.11.0 # Apache-2.0 netaddr>=0.7.18 # BSD httplib2>=0.9.1 # MIT lxml!=3.7.0,>=3.4.1 # BSD passlib>=1.7.0 # BSD python-heatclient>=1.10.0 # Apache-2.0 python-novaclient>=9.1.0 # Apache-2.0 python-cinderclient>=3.3.0 # Apache-2.0 python-keystoneclient>=3.8.0 # Apache-2.0 python-swiftclient>=3.2.0 # Apache-2.0 python-designateclient>=2.7.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 python-glanceclient>=2.8.0 # Apache-2.0 python-troveclient>=2.2.0 # Apache-2.0 iso8601>=0.1.11 # MIT jsonschema>=2.6.0 # MIT Jinja2>=2.10 # BSD License (3 clause) pexpect!=3.3,>=3.1 # ISC License oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 PyMySQL>=0.7.6 # MIT License Babel!=2.4.0,>=2.3.4 # BSD six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.db>=4.27.0 # Apache-2.0 xmltodict>=0.10.1 # MIT cryptography>=2.1.4 # BSD/Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 diskimage-builder!=1.6.0,!=1.7.0,!=1.7.1,>=1.1.2 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6561086 trove-12.1.0.dev92/roles/0000755000175000017500000000000000000000000015273 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/roles/trove-devstack/0000755000175000017500000000000000000000000020234 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/roles/trove-devstack/README0000644000175000017500000000020700000000000021113 0ustar00coreycorey00000000000000Trove devstack ** Role Variables ** .. zuul:rolevar:: trove_test_group :default: mysql The test group for running trovestack. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/roles/trove-devstack/defaults/0000755000175000017500000000000000000000000022043 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/roles/trove-devstack/defaults/main.yml0000644000175000017500000000022400000000000023510 0ustar00coreycorey00000000000000devstack_base_dir: /opt/stack trove_test_datastore: 'mysql' trove_test_group: 'mysql' trove_test_datastore_version: '5.7' trove_resize_time_out: '' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/roles/trove-devstack/tasks/0000755000175000017500000000000000000000000021361 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/roles/trove-devstack/tasks/main.yml0000644000175000017500000000057300000000000023035 0ustar00coreycorey00000000000000- name: Run trovestack become: true become_user: stack shell: | export BRIDGE_IP=10.1.0.1 export DEST={{devstack_base_dir}} export PATH_DEVSTACK_SRC=$DEST/devstack export TROVE_RESIZE_TIME_OUT={{trove_resize_time_out}} cd $DEST/trove tox -etrovestack -vv -- gate-tests {{trove_test_datastore}} {{trove_test_group}} {{trove_test_datastore_version}} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/run_tests.py0000644000175000017500000002032000000000000016544 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import gettext import os import sys import traceback import eventlet from oslo_log import log as logging import proboscis import six from six.moves import urllib import wsgi_intercept from wsgi_intercept.httplib2_intercept import install as wsgi_install from trove.common import cfg from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version from trove.common import utils from trove import rpc from trove.tests.config import CONFIG from trove.tests import root_logger eventlet.monkey_patch(thread=False) CONF = cfg.CONF original_excepthook = sys.excepthook def add_support_for_localization(): """Adds support for localization in the logging. If ../nova/__init__.py exists, add ../ to Python search path, so that it will override what happens to be installed in /usr/(local/)lib/python... """ path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir) possible_topdir = os.path.normpath(path) if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) if six.PY3: gettext.install('nova') else: gettext.install('nova', unicode=True) def initialize_trove(config_file): from trove.common import pastedeploy root_logger.DefaultRootLogger() cfg.CONF(args=[], project='trove', default_config_files=[config_file]) logging.setup(CONF, None) topic = CONF.taskmanager_queue rpc.init(CONF) taskman_service = rpc_service.RpcService( CONF.taskmanager_rpc_encr_key, topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION, manager='trove.taskmanager.manager.Manager') taskman_service.start() return pastedeploy.paste_deploy_app(config_file, 'trove', {}) def datastore_init(): # Adds the datastore for mysql (needed to make most calls work). from trove.configuration.models import DatastoreConfigurationParameters from trove.datastore import models models.DBDatastore.create( id=CONFIG.dbaas_datastore_id, name=CONFIG.dbaas_datastore, default_version_id=CONFIG.dbaas_datastore_version_id) models.DBDatastore.create(id=utils.generate_uuid(), name=CONFIG.dbaas_datastore_name_no_versions, default_version_id=None) main_dsv = models.DBDatastoreVersion.create( id=CONFIG.dbaas_datastore_version_id, datastore_id=CONFIG.dbaas_datastore_id, name=CONFIG.dbaas_datastore_version, manager="mysql", image_id='c00000c0-00c0-0c00-00c0-000c000000cc', packages='test packages', active=1) models.DBDatastoreVersion.create( id="d00000d0-00d0-0d00-00d0-000d000000dd", datastore_id=CONFIG.dbaas_datastore_id, name='mysql_inactive_version', manager="mysql", image_id='c00000c0-00c0-0c00-00c0-000c000000cc', packages=None, active=0) def add_parm(name, data_type, max_size, min_size=0, restart_required=0): DatastoreConfigurationParameters.create( datastore_version_id=main_dsv.id, name=name, restart_required=restart_required, max_size=max_size, min_size=0, data_type=data_type, deleted=0, deleted_at=None) add_parm('key_buffer_size', 'integer', 4294967296) add_parm('connect_timeout', 'integer', 65535) add_parm('join_buffer_size', 'integer', 4294967296) add_parm('local_infile', 'integer', 1) add_parm('collation_server', 'string', None, None) add_parm('innodb_buffer_pool_size', 'integer', 57671680, restart_required=1) def initialize_database(): from trove.db import get_db_api from trove.db.sqlalchemy import session db_api = get_db_api() db_api.drop_db(CONF) # Destroys the database, if it exists. db_api.db_sync(CONF) session.configure_db(CONF) datastore_init() db_api.configure_db(CONF) def initialize_fakes(app): # Set up WSGI interceptor. This sets up a fake host that responds each # time httplib tries to communicate to localhost, port 8779. def wsgi_interceptor(*args, **kwargs): def call_back(env, start_response): path_info = env.get('PATH_INFO') if path_info: env['PATH_INFO'] = urllib.parse.unquote(path_info) return app.__call__(env, start_response) return call_back wsgi_intercept.add_wsgi_intercept('localhost', CONF.bind_port, wsgi_interceptor) from trove.tests.util import event_simulator event_simulator.monkey_patch() from trove.tests.fakes import taskmanager taskmanager.monkey_patch() def parse_args_for_test_config(): test_conf = 'etc/tests/localhost.test.conf' repl = False new_argv = [] for index in range(len(sys.argv)): arg = sys.argv[index] print(arg) if arg[:14] == "--test-config=": test_conf = arg[14:] elif arg == "--repl": repl = True else: new_argv.append(arg) sys.argv = new_argv return test_conf, repl def run_tests(repl): """Runs all of the tests.""" if repl: # Actually show errors in the repl. sys.excepthook = original_excepthook def no_thanks(exit_code): print("Tests finished with exit code %d." % exit_code) sys.exit = no_thanks proboscis.TestProgram().run_and_exit() if repl: import code code.interact() def import_tests(): # F401 unused imports needed for tox tests from trove.tests.api import backups # noqa from trove.tests.api import configurations # noqa from trove.tests.api import databases # noqa from trove.tests.api import datastores # noqa from trove.tests.api import instances as rd_instances # noqa from trove.tests.api import instances_actions as rd_actions # noqa from trove.tests.api import instances_delete # noqa from trove.tests.api import instances_resize # noqa from trove.tests.api import limits # noqa from trove.tests.api.mgmt import instances_actions as mgmt_actions # noqa from trove.tests.api import replication # noqa from trove.tests.api import root # noqa from trove.tests.api import user_access # noqa from trove.tests.api import users # noqa from trove.tests.api import versions # noqa from trove.tests.db import migrations # noqa def main(import_func): try: wsgi_install() add_support_for_localization() # Load Trove app # Paste file needs absolute path config_file = os.path.realpath('etc/trove/trove.conf.test') # 'etc/trove/test-api-paste.ini' app = initialize_trove(config_file) # Initialize sqlite database. initialize_database() # Swap out WSGI, httplib, and other components with test doubles. initialize_fakes(app) # Initialize the test configuration. test_config_file, repl = parse_args_for_test_config() CONFIG.load_from_file(test_config_file) import_func() from trove.tests.util import event_simulator event_simulator.run_main(functools.partial(run_tests, repl)) except Exception as e: # Printing the error manually like this is necessary due to oddities # with sys.excepthook. print("Run tests failed: %s" % e) traceback.print_exc() raise if __name__ == "__main__": main(import_tests) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8081114 trove-12.1.0.dev92/setup.cfg0000644000175000017500000000426500000000000015777 0ustar00coreycorey00000000000000[metadata] name = trove summary = OpenStack DBaaS description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/trove/latest/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] data_files = etc/trove = etc/trove/api-paste.ini packages = trove [entry_points] console_scripts = trove-api = trove.cmd.api:main trove-taskmanager = trove.cmd.taskmanager:main trove-mgmt-taskmanager = trove.cmd.taskmanager:mgmt_main trove-conductor = trove.cmd.conductor:main trove-manage = trove.cmd.manage:main trove-guestagent = trove.cmd.guest:main trove-fake-mode = trove.cmd.fakemode:main trove-status = trove.cmd.status:main trove.api.extensions = mgmt = trove.extensions.routes.mgmt:Mgmt mysql = trove.extensions.routes.mysql:Mysql trove.guestagent.module.drivers = ping = trove.guestagent.module.drivers.ping_driver:PingDriver new_relic_license = trove.guestagent.module.drivers.new_relic_license_driver:NewRelicLicenseDriver oslo.messaging.notify.drivers = trove.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver trove.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver trove.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver trove.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver trove.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver oslo.policy.policies = trove = trove.common.policies:list_rules [compile_catalog] directory = trove/locale domain = trove [update_catalog] domain = trove output_dir = trove/locale input_file = trove/locale/trove.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = trove/locale/trove.pot [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/setup.py0000644000175000017500000000200600000000000015657 0ustar00coreycorey00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/test-requirements.txt0000644000175000017500000000201200000000000020403 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 hacking>=3.0,<3.1.0 # Apache-2.0 bandit>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 nose>=1.3.7 # LGPL nosexcover>=1.0.10 # BSD openstack.nose-plugin>=0.7 # Apache-2.0 WebTest>=2.0.27 # MIT wsgi-intercept>=1.4.1 # MIT License proboscis>=1.2.5.3 # Apache-2.0 python-troveclient>=2.2.0 # Apache-2.0 mock>=2.0.0 # BSD testtools>=2.2.0 # MIT pymongo!=3.1,>=3.0.2 # Apache-2.0 redis>=2.10.0 # MIT psycopg2>=2.6.2 # LGPL/ZPL cassandra-driver!=3.6.0,>=2.1.4 # Apache-2.0 couchdb>=0.8 # Apache-2.0 stestr>=1.1.0 # Apache-2.0 doc8>=0.6.0 # Apache-2.0 astroid==1.6.5 # LGPLv2.1 pylint==1.9.2 # GPLv2 oslotest>=3.2.0 # Apache-2.0 tenacity>=4.9.0 # Apache-2.0 # Docs building openstackdocstheme>=1.32.1 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7201097 trove-12.1.0.dev92/tools/0000755000175000017500000000000000000000000015307 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/install_venv.py0000755000175000017500000001112600000000000020371 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Trove's development virtualenv """ from __future__ import print_function import os import subprocess import sys ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.venv') PIP_REQUIRES = os.path.join(ROOT, 'requirements.txt') TEST_REQUIRES = os.path.join(ROOT, 'test-requirements.txt') PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) def die(message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(): if sys.version_info < (2, 7): die("Need Python Version >= 2.7") def run_command(cmd, redirect_output=True, check_exit_code=True): """ Runs a command in an out-of-process shell, returning the output of that command. Working directory is ROOT. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: die('Command "%s" failed.\n%s', ' '.join(cmd), output) return output HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip()) HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip()) def check_dependencies(): """Make sure virtualenv is in the path.""" if not HAS_VIRTUALENV: print('not found.') # Try installing it via easy_install... if HAS_EASY_INSTALL: print('Installing virtualenv via easy_install...'), if not (run_command(['which', 'easy_install']) and run_command(['easy_install', 'virtualenv'])): die('ERROR: virtualenv not found.\nTrove development' ' requires virtualenv, please install it using your' ' favorite package management tool') print('done.') print('done.') def create_virtualenv(venv=VENV): """Creates the virtual environment and installs PIP only into the virtual environment """ print('Creating venv...'), run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print('done.') print('Installing pip in virtualenv...'), if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): die("Failed to install pip.") print('done.') def install_dependencies(venv=VENV): print('Installing dependencies with pip (this can take a while)...') # Install greenlet by hand - just listing it in the requires file does not # get it in stalled in the right order run_command(['tools/with_venv.sh', '-E', venv, 'pip', 'install', 'greenlet'], redirect_output=False) for requires in (PIP_REQUIRES, TEST_REQUIRES): run_command(['tools/with_venv.sh', '-E', venv, 'pip', 'install', '-r', requires], redirect_output=False) # Tell the virtual env how to "import trove" pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "trove.pth") f = open(pthfile, 'w') f.write("%s\n" % ROOT) def print_help(): help = """ Trove development environment setup is complete. Trove development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Trove virtualenv for the extent of your current shell session you can run: $ . .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): check_python_version() check_dependencies() create_virtualenv() install_dependencies() print_help() if __name__ == '__main__': main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/start-fake-mode.sh0000755000175000017500000000057300000000000020636 0ustar00coreycorey00000000000000#!/usr/bin/env bash # Arguments: Use --pid_file to specify a pid file location. if [ ! -d ".tox/py27" ]; then tox -epy27 fi function run() { .tox/py27/bin/python $@ } run bin/trove-manage \ --config-file=etc/trove/trove.conf.test db_recreate \ trove_test.sqlite mysql fake run bin/trove-fake-mode \ --fork --config-file=etc/trove/trove.conf.test \ $@ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/stop-fake-mode.sh0000755000175000017500000000050500000000000020461 0ustar00coreycorey00000000000000#!/usr/bin/env bash # Arguments: if given the first argument is the location of a pid file. if [ $# -lt 1 ]; then export PID_FILE=".pid" else export PID_FILE=$1 fi if [ -f $PID_FILE ]; then cat $PID_FILE kill `cat $PID_FILE` echo "Stopping server." rm $PID_FILE else echo "pid file not found." fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/trove-policy-generator.conf0000644000175000017500000000010700000000000022574 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/trove/policy.yaml.sample namespace = trove ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/trove-pylint.README0000644000175000017500000001221000000000000020636 0ustar00coreycorey00000000000000trove-pylint ------------ trove-pylint.py is a wrapper around pylint which allows for some custom processing relevant to the trove source tree, and suitable to run as a CI job for trove. The purpose is to perform a lint check on the code and detect obvious (lintable) errors and fix them. How trove-pylint works ---------------------- trove-pylint is driven by a configuration file which is by default, located in tools/trove-pylint.config. This file is a json dump of the configuration. A default configuration file looks like this. { "include": ["*.py"], "folder": "trove", "options": ["--rcfile=./pylintrc", "-E"], "ignored_files": ['trove/tests'], "ignored_codes": [], "ignored_messages": [], "ignored_file_codes": [], "ignored_file_messages": [], "ignored_file_code_messages": [], "always_error_messages": [ "Undefined variable '_'", "Undefined variable '_LE'", "Undefined variable '_LI'", "Undefined variable '_LW'", "Undefined variable '_LC'" ] } include ------- Provide a list of match specs (passed to fnmatch.fnmatch). The default is only "*.py". folder ------ Provide the name of the top level folder to lint. This is a single value. options ------- These are the pylint launch options. The default is to specify an rcfile and only errors. Specifying the rcfile is required, and the file is a dummy, to suppress an annoying warning. ignored_files ------------- This is a list of paths that we wish to ignore. When a file is considered for linting, if the path name begins with any of these provided prefixes, the file will not be linted. We ignore the tests directory because it has a high instance of false positives. ignored_codes, ignored_messages, ignored_file_codes, ignored_file_messages, and ignored_file_code_messages ----------------------------------------------------- These settings identify specific failures that are to be ignored. Each is a list, some are lists of single elements, others are lists of lists. ignored_codes, and ignored_messages are lists of single elements that are to be ignored. You could specify either the code name, or the code numeric representation. You must specify the exact message. ignored_file_codes and ignored_file_messages are lists of lists where each element is a code and a message. ignored_file_code_messages is a list of lists where each element consists of a filename, an errorcode, a message, a line number and a function name. always_error_messages --------------------- This is a list of messages which have a low chance of false positives, which are always flagged as errors. Using trove-pylint ------------------ You can check your code for errors by simply running: tox -e pylint or explicitly as: tox -e pylint check The equivalent result can be obtained by running the command: tools/trove-pylint.py or tools/trove-pylint.py check Running the tool directly may require installing addition pip modules on your machine (such as pylint), so using 'tox' is the preferred method. For example, here is the result from such a run. $ tox -e pylint check ERROR: trove/common/extensions.py 575: E1003 bad-super-call, \ TroveExtensionMiddleware.__init__: Bad first argument \ 'ExtensionMiddleware' given to super() Check failed. 367 files processed, 1 had errors, 1 errors recorded. I wish to ignore this error and keep going. To do this, I rebuild the list of errors to ignore as follows. $ tox -e pylint rebuild Rebuild completed. 367 files processed, 177 exceptions recorded. This caused the tool to add the following two things to the config file. [ "trove/common/extensions.py", "E1003", "Bad first argument 'ExtensionMiddleware' given to super()", "TroveExtensionMiddleware.__init__" ], [ "trove/common/extensions.py", "bad-super-call", "Bad first argument 'ExtensionMiddleware' given to super()", "TroveExtensionMiddleware.__init__" ], With that done, I can recheck as shown below. $ tox -e pylint Check succeeded. 367 files processed You can review the errors that are being currently ignored by reading the file tools/trove-pylint.config. If you want to fix some of these errors, identify the configuration(s) that are causing those errors to be ignored, remove them and re-run the check. Once you see that the errors are in fact being reported by the tool, go ahead and fix the problem(s) and retest. Known issues ------------ 1. The tool appears to be very sensitive to the version(s) of pylint and astroid. In testing, I've found that if the version of either of these changes, you could either have a failure of the tool (exceptions thrown, ...) or a different set of errors reported. Refer to test-requirements.txt to see the versions currently being used. If you run the tool on your machine and find that there are no errors, but find that either the CI generates errors, or that the tool run through tox generates errors, check what versions of astroid and pylint are being run in each configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/trove-pylint.config0000644000175000017500000013201700000000000021156 0ustar00coreycorey00000000000000{ "always_error_messages": [ "Undefined variable '_'", "Undefined variable '_LC'", "Undefined variable '_LE'", "Undefined variable '_LI'", "Undefined variable '_LW'" ], "folder": "trove", "ignored_codes": ["not-callable"], "ignored_file_code_messages": [ [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'deleted' member", "Backup.list" ], [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'id' member", "Backup.running" ], [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'instance_id' member", "Backup.fail_for_instance" ], [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'instance_id' member", "Backup.running" ], [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'state' member", "Backup.fail_for_instance" ], [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'state' member", "Backup.running" ], [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'tenant_id' member", "Backup.list" ], [ "trove/backup/models.py", "E1101", "Class 'DBBackup' has no 'updated' member", "Backup._paginate" ], [ "trove/backup/models.py", "E1101", "Class 'DBDatastoreVersion' has no 'datastore_id' member", "Backup.list" ], [ "trove/backup/models.py", "E1101", "Instance of 'BuiltInstance' has no 'validate_can_perform_action' member", "Backup.create._create_resources" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'checksum' member", "DBBackup.check_swift_object_exist" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'datastore_version_id' member", "DBBackup.datastore" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'datastore_version_id' member", "DBBackup.datastore_version" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'id' member", "DBBackup.check_swift_object_exist" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'location' member", "DBBackup.check_swift_object_exist" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'location' member", "DBBackup.filename" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'state' member", "DBBackup.is_done" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'state' member", "DBBackup.is_done_successfuly" ], [ "trove/backup/models.py", "E1101", "Instance of 'DBBackup' has no 'state' member", "DBBackup.is_running" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'deleted' member", "Backup.list" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'id' member", "Backup.running" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'instance_id' member", "Backup.fail_for_instance" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'instance_id' member", "Backup.running" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'state' member", "Backup.fail_for_instance" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'state' member", "Backup.running" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'tenant_id' member", "Backup.list" ], [ "trove/backup/models.py", "no-member", "Class 'DBBackup' has no 'updated' member", "Backup._paginate" ], [ "trove/backup/models.py", "no-member", "Class 'DBDatastoreVersion' has no 'datastore_id' member", "Backup.list" ], [ "trove/backup/models.py", "no-member", "Instance of 'BuiltInstance' has no 'validate_can_perform_action' member", "Backup.create._create_resources" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'checksum' member", "DBBackup.check_swift_object_exist" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'datastore_version_id' member", "DBBackup.datastore" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'datastore_version_id' member", "DBBackup.datastore_version" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'id' member", "DBBackup.check_swift_object_exist" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'location' member", "DBBackup.check_swift_object_exist" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'location' member", "DBBackup.filename" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'state' member", "DBBackup.is_done" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'state' member", "DBBackup.is_done_successfuly" ], [ "trove/backup/models.py", "no-member", "Instance of 'DBBackup' has no 'state' member", "DBBackup.is_running" ], [ "trove/cmd/manage.py", "E1101", "Class 'Commands' has no 'has' member", "Commands.params_of" ], [ "trove/cmd/manage.py", "no-member", "Class 'Commands' has no 'has' member", "Commands.params_of" ], [ "trove/common/context.py", "E1101", "Module 'inspect' has no 'getfullargspec' member", "TroveContext._remove_incompatible_context_args" ], [ "trove/common/context.py", "no-member", "Module 'inspect' has no 'getfullargspec' member", "TroveContext._remove_incompatible_context_args" ], [ "trove/common/extensions.py", "E1003", "Bad first argument 'ExtensionMiddleware' given to super()", "TroveExtensionMiddleware.__init__" ], [ "trove/common/extensions.py", "E1101", "Module 'lxml.etree' has no 'Element' member", "ExtensionsXMLSerializer._populate_ext" ], [ "trove/common/extensions.py", "E1101", "Module 'lxml.etree' has no 'Element' member", "ExtensionsXMLSerializer.index" ], [ "trove/common/extensions.py", "E1101", "Module 'lxml.etree' has no 'Element' member", "ExtensionsXMLSerializer.show" ], [ "trove/common/extensions.py", "E1101", "Module 'lxml.etree' has no 'SubElement' member", "ExtensionsXMLSerializer._populate_ext" ], [ "trove/common/extensions.py", "E1101", "Module 'lxml.etree' has no 'SubElement' member", "ExtensionsXMLSerializer.index" ], [ "trove/common/extensions.py", "E1101", "Module 'lxml.etree' has no 'tostring' member", "ExtensionsXMLSerializer._to_xml" ], [ "trove/common/extensions.py", "bad-super-call", "Bad first argument 'ExtensionMiddleware' given to super()", "TroveExtensionMiddleware.__init__" ], [ "trove/common/extensions.py", "no-member", "Module 'lxml.etree' has no 'Element' member", "ExtensionsXMLSerializer._populate_ext" ], [ "trove/common/extensions.py", "no-member", "Module 'lxml.etree' has no 'Element' member", "ExtensionsXMLSerializer.index" ], [ "trove/common/extensions.py", "no-member", "Module 'lxml.etree' has no 'Element' member", "ExtensionsXMLSerializer.show" ], [ "trove/common/extensions.py", "no-member", "Module 'lxml.etree' has no 'SubElement' member", "ExtensionsXMLSerializer._populate_ext" ], [ "trove/common/extensions.py", "no-member", "Module 'lxml.etree' has no 'SubElement' member", "ExtensionsXMLSerializer.index" ], [ "trove/common/extensions.py", "no-member", "Module 'lxml.etree' has no 'tostring' member", "ExtensionsXMLSerializer._to_xml" ], [ "trove/common/models.py", "E1101", "Instance of 'ModelBase' has no 'id' member", "ModelBase.__eq__" ], [ "trove/common/models.py", "E1101", "Instance of 'ModelBase' has no 'id' member", "ModelBase.__hash__" ], [ "trove/common/models.py", "no-member", "Instance of 'ModelBase' has no 'id' member", "ModelBase.__eq__" ], [ "trove/common/models.py", "no-member", "Instance of 'ModelBase' has no 'id' member", "ModelBase.__hash__" ], [ "trove/common/clients_admin.py", "E0611", "No name 'v1_1' in module 'novaclient'", null ], [ "trove/common/clients_admin.py", "no-name-in-module", "No name 'v1_1' in module 'novaclient'", null ], [ "trove/common/strategies/cluster/experimental/mongodb/api.py", "E1101", "Instance of 'API' has no 'get_key' member", "MongoDbCluster.add_shard" ], [ "trove/common/strategies/cluster/experimental/mongodb/api.py", "E1101", "Instance of 'API' has no 'mongodb_add_shard_cluster' member", "MongoDbCluster.add_shard" ], [ "trove/common/strategies/cluster/experimental/mongodb/api.py", "no-member", "Instance of 'API' has no 'get_key' member", "MongoDbCluster.add_shard" ], [ "trove/common/strategies/cluster/experimental/mongodb/api.py", "no-member", "Instance of 'API' has no 'mongodb_add_shard_cluster' member", "MongoDbCluster.add_shard" ], [ "trove/common/stream_codecs.py", "no-member", "Instance of 'ConfigParser' has no 'read_file' member", "IniCodec.deserialize" ], [ "trove/common/utils.py", "E1127", "Slice index is not an int, None, or instance with __index__", "MethodInspector.optional_args" ], [ "trove/common/utils.py", "E1127", "Slice index is not an int, None, or instance with __index__", "MethodInspector.required_args" ], [ "trove/common/utils.py", "invalid-slice-index", "Slice index is not an int, None, or instance with __index__", "MethodInspector.optional_args" ], [ "trove/common/utils.py", "invalid-slice-index", "Slice index is not an int, None, or instance with __index__", "MethodInspector.required_args" ], [ "trove/common/wsgi.py", "E0102", "class already defined line 43", "Router" ], [ "trove/common/wsgi.py", "E0102", "class already defined line 46", "JSONDictSerializer" ], [ "trove/common/wsgi.py", "function-redefined", "class already defined line 43", "Router" ], [ "trove/common/wsgi.py", "function-redefined", "class already defined line 46", "JSONDictSerializer" ], [ "trove/configuration/models.py", "E1101", "Instance of 'DBConfiguration' has no 'datastore_version_id' member", "DBConfiguration.datastore" ], [ "trove/configuration/models.py", "E1101", "Instance of 'DBConfiguration' has no 'datastore_version_id' member", "DBConfiguration.datastore_version" ], [ "trove/configuration/models.py", "E1101", "Instance of 'DBConfigurationParameter' has no 'configuration_key' member", "DBConfigurationParameter.__hash__" ], [ "trove/configuration/models.py", "no-member", "Instance of 'DBConfiguration' has no 'datastore_version_id' member", "DBConfiguration.datastore" ], [ "trove/configuration/models.py", "no-member", "Instance of 'DBConfiguration' has no 'datastore_version_id' member", "DBConfiguration.datastore_version" ], [ "trove/configuration/models.py", "no-member", "Instance of 'DBConfigurationParameter' has no 'configuration_key' member", "DBConfigurationParameter.__hash__" ], [ "trove/configuration/service.py", "E1101", "Instance of 'BuiltInstance' has no 'update_configuration' member", "ConfigurationsController._refresh_on_all_instances" ], [ "trove/configuration/service.py", "no-member", "Instance of 'BuiltInstance' has no 'update_configuration' member", "ConfigurationsController._refresh_on_all_instances" ], [ "trove/datastore/models.py", "E1101", "Class 'DBDatastoreVersion' has no 'active' member", "Datastores.load" ], [ "trove/datastore/models.py", "E1101", "Instance of 'BaseCapability' has no 'name' member", "BaseCapability.__repr__" ], [ "trove/datastore/models.py", "no-member", "Class 'DBDatastoreVersion' has no 'active' member", "Datastores.load" ], [ "trove/datastore/models.py", "no-member", "Instance of 'BaseCapability' has no 'name' member", "BaseCapability.__repr__" ], [ "trove/db/sqlalchemy/migrate_repo/versions/007_add_volume_flavor.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/007_add_volume_flavor.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/008_add_instance_fields.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/008_add_instance_fields.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/009_add_deleted_flag_to_instances.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/009_add_deleted_flag_to_instances.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py", "E1120", "No value for argument 'dml' in method call", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py", "no-value-for-parameter", "No value for argument 'dml' in method call", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py", "E1101", "Instance of 'Table' has no 'drop_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py", "no-member", "Instance of 'Table' has no 'drop_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py", "E1101", "Instance of 'Table' has no 'drop_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py", "no-member", "Instance of 'Table' has no 'drop_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/020_configurations.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/020_configurations.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/022_add_backup_parent_id.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/022_add_backup_parent_id.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/029_add_backup_datastore.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/029_add_backup_datastore.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/030_add_master_slave.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/030_add_master_slave.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/031_add_timestamps_to_configurations.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/031_add_timestamps_to_configurations.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/032_clusters.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/032_clusters.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py", "E1120", "No value for argument 'dml' in method call", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py", "no-value-for-parameter", "No value for argument 'dml' in method call", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/042_add_cluster_configuration_id.py", "E1101", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migrate_repo/versions/042_add_cluster_configuration_id.py", "no-member", "Instance of 'Table' has no 'create_column' member", "upgrade" ], [ "trove/db/sqlalchemy/migration.py", "E0611", "No name 'exceptions' in module 'migrate.versioning'", null ], [ "trove/db/sqlalchemy/migration.py", "E1120", "No value for argument 'repo_path' in function call", "version_control" ], [ "trove/db/sqlalchemy/migration.py", "no-name-in-module", "No name 'exceptions' in module 'migrate.versioning'", null ], [ "trove/db/sqlalchemy/migration.py", "no-value-for-parameter", "No value for argument 'repo_path' in function call", "version_control" ], [ "trove/dns/designate/driver.py", "E1101", "Instance of 'Client' has no 'domains' member", "DesignateDriver.get_dns_zones" ], [ "trove/dns/designate/driver.py", "E1101", "Instance of 'Client' has no 'records' member", "DesignateDriver._get_records" ], [ "trove/dns/designate/driver.py", "E1101", "Instance of 'Client' has no 'records' member", "DesignateDriver.create_entry" ], [ "trove/dns/designate/driver.py", "E1101", "Instance of 'Client' has no 'records' member", "DesignateDriver.delete_entry" ], [ "trove/dns/designate/driver.py", "no-member", "Instance of 'Client' has no 'domains' member", "DesignateDriver.get_dns_zones" ], [ "trove/dns/designate/driver.py", "no-member", "Instance of 'Client' has no 'records' member", "DesignateDriver._get_records" ], [ "trove/dns/designate/driver.py", "no-member", "Instance of 'Client' has no 'records' member", "DesignateDriver.create_entry" ], [ "trove/dns/designate/driver.py", "no-member", "Instance of 'Client' has no 'records' member", "DesignateDriver.delete_entry" ], [ "trove/extensions/mgmt/instances/service.py", "E1101", "Instance of 'BuiltInstance' has no 'get_diagnostics' member", "MgmtInstanceController.diagnostics" ], [ "trove/extensions/mgmt/instances/service.py", "E1101", "Instance of 'BuiltInstance' has no 'get_hwinfo' member", "MgmtInstanceController.hwinfo" ], [ "trove/extensions/mgmt/instances/service.py", "E1101", "Instance of 'BuiltInstance' has no 'rpc_ping' member", "MgmtInstanceController.rpc_ping" ], [ "trove/extensions/mgmt/instances/service.py", "no-member", "Instance of 'BuiltInstance' has no 'get_diagnostics' member", "MgmtInstanceController.diagnostics" ], [ "trove/extensions/mgmt/instances/service.py", "no-member", "Instance of 'BuiltInstance' has no 'get_hwinfo' member", "MgmtInstanceController.hwinfo" ], [ "trove/extensions/mgmt/instances/service.py", "no-member", "Instance of 'BuiltInstance' has no 'rpc_ping' member", "MgmtInstanceController.rpc_ping" ], [ "trove/extensions/security_group/models.py", "E1101", "Instance of 'SecurityGroup' has no 'id' member", "SecurityGroup.delete" ], [ "trove/extensions/security_group/models.py", "E1101", "Instance of 'SecurityGroup' has no 'id' member", "SecurityGroup.get_rules" ], [ "trove/extensions/security_group/models.py", "E1101", "Instance of 'SecurityGroup' has no 'id' member", "SecurityGroup.instance_id" ], [ "trove/extensions/security_group/models.py", "E1101", "Instance of 'SecurityGroupInstanceAssociation' has no 'security_group_id' member", "SecurityGroupInstanceAssociation.get_security_group" ], [ "trove/extensions/security_group/models.py", "E1101", "Instance of 'SecurityGroupRule' has no 'group_id' member", "SecurityGroupRule.get_security_group" ], [ "trove/extensions/security_group/models.py", "E1101", "Instance of 'SecurityGroupRule' has no 'id' member", "SecurityGroupRule.delete" ], [ "trove/extensions/security_group/models.py", "no-member", "Instance of 'SecurityGroup' has no 'id' member", "SecurityGroup.delete" ], [ "trove/extensions/security_group/models.py", "no-member", "Instance of 'SecurityGroup' has no 'id' member", "SecurityGroup.get_rules" ], [ "trove/extensions/security_group/models.py", "no-member", "Instance of 'SecurityGroup' has no 'id' member", "SecurityGroup.instance_id" ], [ "trove/extensions/security_group/models.py", "no-member", "Instance of 'SecurityGroupInstanceAssociation' has no 'security_group_id' member", "SecurityGroupInstanceAssociation.get_security_group" ], [ "trove/extensions/security_group/models.py", "no-member", "Instance of 'SecurityGroupRule' has no 'group_id' member", "SecurityGroupRule.get_security_group" ], [ "trove/extensions/security_group/models.py", "no-member", "Instance of 'SecurityGroupRule' has no 'id' member", "SecurityGroupRule.delete" ], [ "trove/guestagent/datastore/experimental/cassandra/service.py", "E0611", "No name 'Cluster' in module 'cassandra.cluster'", null ], [ "trove/guestagent/datastore/experimental/cassandra/service.py", "E0611", "No name 'NoHostAvailable' in module 'cassandra.cluster'", null ], [ "trove/guestagent/datastore/experimental/cassandra/service.py", "E1101", "Instance of 'list' has no 'split' member", "CassandraApp.get_seeds" ], [ "trove/guestagent/datastore/experimental/cassandra/service.py", "no-member", "Instance of 'list' has no 'split' member", "CassandraApp.get_seeds" ], [ "trove/guestagent/datastore/experimental/cassandra/service.py", "no-name-in-module", "No name 'Cluster' in module 'cassandra.cluster'", null ], [ "trove/guestagent/datastore/experimental/cassandra/service.py", "no-name-in-module", "No name 'NoHostAvailable' in module 'cassandra.cluster'", null ], [ "trove/guestagent/datastore/experimental/redis/service.py", "E0701", "Bad except clauses order (ConnectionError is an ancestor class of BusyLoadingError)", "RedisAppStatus._get_actual_db_status" ], [ "trove/guestagent/datastore/experimental/redis/service.py", "bad-except-order", "Bad except clauses order (ConnectionError is an ancestor class of BusyLoadingError)", "RedisAppStatus._get_actual_db_status" ], [ "trove/guestagent/strategies/backup/base.py", "E1101", "Instance of 'Popen' has no 'pid' member", "BackupRunner.__exit__" ], [ "trove/guestagent/strategies/backup/base.py", "E1101", "Instance of 'Popen' has no 'pid' member", "BackupRunner._run" ], [ "trove/guestagent/strategies/backup/base.py", "E1101", "Instance of 'Popen' has no 'stdout' member", "BackupRunner.read" ], [ "trove/guestagent/strategies/backup/base.py", "E1101", "Instance of 'Popen' has no 'terminate' member", "BackupRunner.__exit__" ], [ "trove/guestagent/strategies/backup/base.py", "E1101", "Module 'eventlet.green.subprocess' has no 'PIPE' member", "BackupRunner._run" ], [ "trove/guestagent/strategies/backup/base.py", "no-member", "Instance of 'Popen' has no 'pid' member", "BackupRunner.__exit__" ], [ "trove/guestagent/strategies/backup/base.py", "no-member", "Instance of 'Popen' has no 'pid' member", "BackupRunner._run" ], [ "trove/guestagent/strategies/backup/base.py", "no-member", "Instance of 'Popen' has no 'stdout' member", "BackupRunner.read" ], [ "trove/guestagent/strategies/backup/base.py", "no-member", "Instance of 'Popen' has no 'terminate' member", "BackupRunner.__exit__" ], [ "trove/guestagent/strategies/backup/base.py", "no-member", "Module 'eventlet.green.subprocess' has no 'PIPE' member", "BackupRunner._run" ], [ "trove/guestagent/strategies/restore/base.py", "E1101", "Instance of 'Popen' has no 'stdin' member", "RestoreRunner._unpack" ], [ "trove/guestagent/strategies/restore/base.py", "E1101", "Instance of 'RestoreRunner' has no 'base_restore_cmd' member", "RestoreRunner.__init__" ], [ "trove/guestagent/strategies/restore/base.py", "E1101", "Module 'eventlet.green.subprocess' has no 'PIPE' member", "RestoreRunner._unpack" ], [ "trove/guestagent/strategies/restore/base.py", "no-member", "Instance of 'Popen' has no 'stdin' member", "RestoreRunner._unpack" ], [ "trove/guestagent/strategies/restore/base.py", "no-member", "Instance of 'RestoreRunner' has no 'base_restore_cmd' member", "RestoreRunner.__init__" ], [ "trove/guestagent/strategies/restore/base.py", "no-member", "Module 'eventlet.green.subprocess' has no 'PIPE' member", "RestoreRunner._unpack" ], [ "trove/guestagent/strategies/restore/experimental/postgresql_impl.py", "E1101", "Instance of 'PgBaseBackup' has no 'pgsql_restore_cmd' member", "PgBaseBackup.write_recovery_file" ], [ "trove/guestagent/strategies/restore/experimental/postgresql_impl.py", "E1101", "Instance of 'Popen' has no 'stdin' member", "PgDump._execute_postgres_restore" ], [ "trove/guestagent/strategies/restore/experimental/postgresql_impl.py", "E1101", "Module 'eventlet.green.subprocess' has no 'PIPE' member", "PgDump._execute_postgres_restore" ], [ "trove/guestagent/strategies/restore/experimental/postgresql_impl.py", "no-member", "Instance of 'PgBaseBackup' has no 'pgsql_restore_cmd' member", "PgBaseBackup.write_recovery_file" ], [ "trove/guestagent/strategies/restore/experimental/postgresql_impl.py", "no-member", "Instance of 'Popen' has no 'stdin' member", "PgDump._execute_postgres_restore" ], [ "trove/guestagent/strategies/restore/experimental/postgresql_impl.py", "no-member", "Module 'eventlet.green.subprocess' has no 'PIPE' member", "PgDump._execute_postgres_restore" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstance' has no 'cluster_id' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstance' has no 'id' member", "Instances.load" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstance' has no 'id' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstance' has no 'tenant_id' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstanceModule' has no 'deleted' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstanceModule' has no 'instance_id' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstanceModule' has no 'md5' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstanceModule' has no 'module_id' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBInstanceModule' has no 'updated' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBModule' has no 'id' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBModule' has no 'md5' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'DBModule' has no 'name' member", "module_instance_count" ], [ "trove/instance/models.py", "E1101", "Class 'InstanceStatus' has no 'LOGGING' member", "SimpleInstance.status" ], [ "trove/instance/models.py", "E1101", "Instance of 'DBInstance' has no 'encrypted_key' member", "DBInstance.key" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstance' has no 'cluster_id' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstance' has no 'id' member", "Instances.load" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstance' has no 'id' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstance' has no 'tenant_id' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstanceModule' has no 'deleted' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstanceModule' has no 'instance_id' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstanceModule' has no 'md5' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstanceModule' has no 'module_id' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBInstanceModule' has no 'updated' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBModule' has no 'id' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBModule' has no 'md5' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'DBModule' has no 'name' member", "module_instance_count" ], [ "trove/instance/models.py", "no-member", "Class 'InstanceStatus' has no 'LOGGING' member", "SimpleInstance.status" ], [ "trove/instance/models.py", "no-member", "Instance of 'DBInstance' has no 'encrypted_key' member", "DBInstance.key" ], [ "trove/instance/service.py", "E1101", "Instance of 'BuiltInstance' has no 'get_default_configuration_template' member", "InstanceController.configuration" ], [ "trove/instance/service.py", "no-member", "Instance of 'BuiltInstance' has no 'get_default_configuration_template' member", "InstanceController.configuration" ], [ "trove/module/models.py", "E1101", "Class 'DBModule' has no 'datastore_id' member", "Modules.add_datastore_filter" ], [ "trove/module/models.py", "E1101", "Class 'DBModule' has no 'datastore_version_id' member", "Modules.add_ds_version_filter" ], [ "trove/module/models.py", "E1101", "Class 'DBModule' has no 'id' member", "Modules.load_by_ids" ], [ "trove/module/models.py", "E1101", "Class 'DBModule' has no 'tenant_id' member", "Modules.add_tenant_filter" ], [ "trove/module/models.py", "E1101", "Class 'DBModule' has no 'tenant_id' member", "Modules.load" ], [ "trove/module/models.py", "no-member", "Class 'DBModule' has no 'datastore_id' member", "Modules.add_datastore_filter" ], [ "trove/module/models.py", "no-member", "Class 'DBModule' has no 'datastore_version_id' member", "Modules.add_ds_version_filter" ], [ "trove/module/models.py", "no-member", "Class 'DBModule' has no 'id' member", "Modules.load_by_ids" ], [ "trove/module/models.py", "no-member", "Class 'DBModule' has no 'tenant_id' member", "Modules.add_tenant_filter" ], [ "trove/module/models.py", "no-member", "Class 'DBModule' has no 'tenant_id' member", "Modules.load" ], [ "trove/quota/quota.py", "E1101", "Class 'Enum' has no 'COMMITTED' member", "DbQuotaDriver.commit" ], [ "trove/quota/quota.py", "E1101", "Class 'Enum' has no 'RESERVED' member", "DbQuotaDriver.reserve" ], [ "trove/quota/quota.py", "E1101", "Class 'Enum' has no 'ROLLEDBACK' member", "DbQuotaDriver.rollback" ], [ "trove/quota/quota.py", "no-member", "Class 'Enum' has no 'COMMITTED' member", "DbQuotaDriver.commit" ], [ "trove/quota/quota.py", "no-member", "Class 'Enum' has no 'RESERVED' member", "DbQuotaDriver.reserve" ], [ "trove/quota/quota.py", "no-member", "Class 'Enum' has no 'ROLLEDBACK' member", "DbQuotaDriver.rollback" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'create_backup' member", "Manager.create_backup" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'detach_replica' member", "Manager.detach_replica" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'migrate' member", "Manager.migrate" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'reboot' member", "Manager.reboot" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'resize_flavor' member", "Manager.resize_flavor" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'resize_volume' member", "Manager.resize_volume" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'restart' member", "Manager.restart" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'BuiltInstance' has no 'upgrade' member", "Manager.upgrade" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'FreshInstance' has no 'create_instance' member", "Manager._create_instance" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'FreshInstance' has no 'create_instance' member", "Manager._create_replication_slave" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'FreshInstance' has no 'get_replication_master_snapshot' member", "Manager._create_replication_slave" ], [ "trove/taskmanager/manager.py", "E1101", "Instance of 'FreshInstance' has no 'wait_for_instance' member", "Manager._create_instance" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'create_backup' member", "Manager.create_backup" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'detach_replica' member", "Manager.detach_replica" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'migrate' member", "Manager.migrate" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'reboot' member", "Manager.reboot" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'resize_flavor' member", "Manager.resize_flavor" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'resize_volume' member", "Manager.resize_volume" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'restart' member", "Manager.restart" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'BuiltInstance' has no 'upgrade' member", "Manager.upgrade" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'FreshInstance' has no 'create_instance' member", "Manager._create_instance" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'FreshInstance' has no 'create_instance' member", "Manager._create_replication_slave" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'FreshInstance' has no 'get_replication_master_snapshot' member", "Manager._create_replication_slave" ], [ "trove/taskmanager/manager.py", "no-member", "Instance of 'FreshInstance' has no 'wait_for_instance' member", "Manager._create_instance" ], [ "trove/taskmanager/models.py", "E1101", "Instance of 'BuiltInstance' has no 'backup_required_for_replication' member", "FreshInstanceTasks.get_replication_master_snapshot" ], [ "trove/taskmanager/models.py", "E1101", "Instance of 'BuiltInstance' has no 'get_replication_snapshot' member", "FreshInstanceTasks.get_replication_master_snapshot" ], [ "trove/taskmanager/models.py", "E1101", "Instance of 'ResizeActionBase' has no '_assert_nova_action_was_successful' member", "ResizeActionBase._perform_nova_action" ], [ "trove/taskmanager/models.py", "E1101", "Instance of 'ResizeActionBase' has no '_initiate_nova_action' member", "ResizeActionBase._perform_nova_action" ], [ "trove/taskmanager/models.py", "E1101", "Instance of 'ResizeActionBase' has no '_record_action_success' member", "ResizeActionBase._perform_nova_action" ], [ "trove/taskmanager/models.py", "E1101", "Instance of 'ResizeActionBase' has no '_start_datastore' member", "ResizeActionBase._assert_datastore_is_ok" ], [ "trove/taskmanager/models.py", "E1101", "Instance of 'str' has no 'render' member", "FreshInstanceTasks._create_server_volume_heat" ], [ "trove/taskmanager/models.py", "E1123", "Unexpected keyword argument 'recover_func' in method call", "ResizeVolumeAction._resize_active_volume" ], [ "trove/taskmanager/models.py", "no-member", "Instance of 'BuiltInstance' has no 'backup_required_for_replication' member", "FreshInstanceTasks.get_replication_master_snapshot" ], [ "trove/taskmanager/models.py", "no-member", "Instance of 'BuiltInstance' has no 'get_replication_snapshot' member", "FreshInstanceTasks.get_replication_master_snapshot" ], [ "trove/taskmanager/models.py", "no-member", "Instance of 'ResizeActionBase' has no '_assert_nova_action_was_successful' member", "ResizeActionBase._perform_nova_action" ], [ "trove/taskmanager/models.py", "no-member", "Instance of 'ResizeActionBase' has no '_initiate_nova_action' member", "ResizeActionBase._perform_nova_action" ], [ "trove/taskmanager/models.py", "no-member", "Instance of 'ResizeActionBase' has no '_record_action_success' member", "ResizeActionBase._perform_nova_action" ], [ "trove/taskmanager/models.py", "no-member", "Instance of 'ResizeActionBase' has no '_start_datastore' member", "ResizeActionBase._assert_datastore_is_ok" ], [ "trove/taskmanager/models.py", "no-member", "Instance of 'str' has no 'render' member", "FreshInstanceTasks._create_server_volume_heat" ], [ "trove/taskmanager/models.py", "unexpected-keyword-arg", "Unexpected keyword argument 'recover_func' in method call", "ResizeVolumeAction._resize_active_volume" ], [ "trove/common/context.py", "E1101", "Instance of 'TroveContext' has no 'notification' member", "TroveContext.to_dict" ] ], "ignored_file_codes": [], "ignored_file_messages": [], "ignored_files": [ "trove/tests" ], "ignored_messages": [], "include": [ "*.py" ], "options": [ "--rcfile=./pylintrc", "-E" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/trove-pylint.py0000755000175000017500000002630600000000000020347 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2016 Tesora, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import fnmatch import json from collections import OrderedDict import os import re import six import sys from pylint import lint from pylint.reporters import text from six.moves import cStringIO as csio DEFAULT_CONFIG_FILE = "tools/trove-pylint.config" DEFAULT_IGNORED_FILES = ['trove/tests'] DEFAULT_IGNORED_CODES = [] DEFAULT_IGNORED_MESSAGES = [] DEFAULT_ALWAYS_ERROR = [ "Undefined variable '_'", "Undefined variable '_LE'", "Undefined variable '_LI'", "Undefined variable '_LW'", "Undefined variable '_LC'"] MODE_CHECK = "check" MODE_REBUILD = "rebuild" class Config(object): def __init__(self, filename=DEFAULT_CONFIG_FILE): self.default_config = { "include": ["*.py"], "folder": "trove", "options": ["--rcfile=./pylintrc", "-E"], "ignored_files": DEFAULT_IGNORED_FILES, "ignored_codes": DEFAULT_IGNORED_CODES, "ignored_messages": DEFAULT_IGNORED_MESSAGES, "ignored_file_codes": [], "ignored_file_messages": [], "ignored_file_code_messages": [], "always_error_messages": DEFAULT_ALWAYS_ERROR } self.config = self.default_config def sort_config(self): sorted_config = OrderedDict() for key in sorted(self.config.keys()): value = self.get(key) if isinstance(value, list) and not isinstance(value, six.string_types): sorted_config[key] = sorted(value) else: sorted_config[key] = value return sorted_config def save(self, filename=DEFAULT_CONFIG_FILE): if os.path.isfile(filename): os.rename(filename, "%s~" % filename) with open(filename, 'w') as fp: json.dump(self.sort_config(), fp, encoding="utf-8", indent=2, separators=(',', ': ')) def load(self, filename=DEFAULT_CONFIG_FILE): with open(filename) as fp: self.config = json.load(fp, encoding="utf-8") def get(self, attribute): return self.config[attribute] def is_file_ignored(self, f): if any(f.startswith(i) for i in self.config['ignored_files']): return True return False def is_file_included(self, f): if any(fnmatch.fnmatch(f, wc) for wc in self.config['include']): return True return False def is_always_error(self, message): if message in self.config['always_error_messages']: return True return False def ignore(self, filename, code, codename, message): # the high priority checks if self.is_file_ignored(filename): return True # never ignore messages if self.is_always_error(message): return False if code in self.config['ignored_codes']: return True if codename in self.config['ignored_codes']: return True if message and any(message.startswith(ignore_message) for ignore_message in self.config['ignored_messages']): return True if filename and message and ( [filename, message] in self.config['ignored_file_messages']): return True if filename and code and ( [filename, code] in self.config['ignored_file_codes']): return True if filename and codename and ( [filename, codename] in self.config['ignored_file_codes']): return True for fcm in self.config['ignored_file_code_messages']: if filename != fcm[0]: # This ignore rule is for a different file. continue if fcm[1] not in (code, codename): # This ignore rule is for a different code or codename. continue if message.startswith(fcm[2]): return True return False def ignore_code(self, c): _c = set(self.config['ignored_codes']) _c.add(c) self.config['ignored_codes'] = list(_c) def ignore_files(self, f): _c = set(self.config['ignored_files']) _c.add(f) self.config['ignored_files'] = list(_c) def ignore_message(self, m): _c = set(self.config['ignored_messages']) _c.add(m) self.config['ignored_messages'] = list(_c) def ignore_file_code(self, f, c): _c = set(self.config['ignored_file_codes']) _c.add((f, c)) self.config['ignored_file_codes'] = list(_c) def ignore_file_message(self, f, m): _c = set(self.config['ignored_file_messages']) _c.add((f, m)) self.config['ignored_file_messages'] = list(_c) def ignore_file_code_message(self, f, c, m, fn): _c = set(self.config['ignored_file_code_messages']) _c.add((f, c, m, fn)) self.config['ignored_file_code_messages'] = list(_c) def main(): if len(sys.argv) == 1 or sys.argv[1] == "check": return check() elif sys.argv[1] == "rebuild": return rebuild() elif sys.argv[1] == "initialize": return initialize() else: return usage() def usage(): print("Usage: %s [check|rebuild]" % sys.argv[0]) print("\tUse this tool to perform a lint check of the trove project.") print("\t check: perform the lint check.") print("\t rebuild: rebuild the list of exceptions to ignore.") return 0 class ParseableTextReporter(text.TextReporter): name = 'parseable' line_format = '{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}' # that's it folks class LintRunner(object): def __init__(self): self.config = Config() self.idline = re.compile("^[*]* Module .*") self.detail = re.compile(r"(\S+):(\d+): \[(\S+)\((\S+)\)," r" (\S+)?] (.*)") def dolint(self, filename): exceptions = set() buffer = csio() reporter = ParseableTextReporter(output=buffer) options = list(self.config.get('options')) options.append(filename) lint.Run(options, reporter=reporter, exit=False) output = buffer.getvalue() buffer.close() for line in output.splitlines(): if self.idline.match(line): continue if self.detail.match(line): mo = self.detail.search(line) tokens = mo.groups() fn = tokens[0] ln = tokens[1] code = tokens[2] codename = tokens[3] func = tokens[4] message = tokens[5] if not self.config.ignore(fn, code, codename, message): exceptions.add((fn, ln, code, codename, func, message)) return exceptions def process(self, mode=MODE_CHECK): files_processed = 0 files_with_errors = 0 errors_recorded = 0 exceptions_recorded = 0 all_exceptions = [] for (root, dirs, files) in os.walk(self.config.get('folder')): # if we shouldn't even bother about this part of the # directory structure, we can punt quietly if self.config.is_file_ignored(root): continue # since we are walking top down, let's clean up the dirs # that we will walk by eliminating any dirs that will # end up getting ignored for d in dirs: p = os.path.join(root, d) if self.config.is_file_ignored(p): dirs.remove(d) # check if we can ignore the file and process if not for f in files: p = os.path.join(root, f) if self.config.is_file_ignored(p): continue if not self.config.is_file_included(f): continue files_processed += 1 exceptions = self.dolint(p) file_had_errors = 0 for e in exceptions: # what we do with this exception depents on the # kind of exception, and the mode if self.config.is_always_error(e[5]): all_exceptions.append(e) errors_recorded += 1 file_had_errors += 1 elif mode == MODE_REBUILD: # parameters to ignore_file_code_message are # filename, code, message and function self.config.ignore_file_code_message(e[0], e[2], e[-1], e[4]) self.config.ignore_file_code_message(e[0], e[3], e[-1], e[4]) exceptions_recorded += 1 elif mode == MODE_CHECK: all_exceptions.append(e) errors_recorded += 1 file_had_errors += 1 if file_had_errors: files_with_errors += 1 for e in sorted(all_exceptions): print("ERROR: %s %s: %s %s, %s: %s" % (e[0], e[1], e[2], e[3], e[4], e[5])) return (files_processed, files_with_errors, errors_recorded, exceptions_recorded) def rebuild(self): self.initialize() (files_processed, files_with_errors, errors_recorded, exceptions_recorded) = self.process(mode=MODE_REBUILD) if files_with_errors > 0: print("Rebuild failed. %s files processed, %s had errors, " "%s errors recorded." % ( files_processed, files_with_errors, errors_recorded)) return 1 self.config.save() print("Rebuild completed. %s files processed, %s exceptions recorded." % (files_processed, exceptions_recorded)) return 0 def check(self): self.config.load() (files_processed, files_with_errors, errors_recorded, exceptions_recorded) = self.process(mode=MODE_CHECK) if files_with_errors > 0: print("Check failed. %s files processed, %s had errors, " "%s errors recorded." % ( files_processed, files_with_errors, errors_recorded)) return 1 print("Check succeeded. %s files processed" % files_processed) return 0 def initialize(self): self.config.save() return 0 def check(): exit(LintRunner().check()) def rebuild(): exit(LintRunner().rebuild()) def initialize(): exit(LintRunner().initialize()) if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/tools/with_venv.sh0000755000175000017500000000073600000000000017665 0ustar00coreycorey00000000000000#!/bin/bash set -e me=${0##*/} dir="$(dirname $0)" function print_usage() { cat >&2 < last_backup.updated: last_backup = backup return last_backup @classmethod def fail_for_instance(cls, instance_id): query = DBBackup.query() query = query.filter(DBBackup.instance_id == instance_id, DBBackup.state.in_(BackupState.RUNNING_STATES)) query = query.filter_by(deleted=False) for backup in query.all(): backup.state = BackupState.FAILED backup.save() @classmethod def delete(cls, context, backup_id): """ update Backup table on deleted flag for given Backup :param cls: :param context: context containing the tenant id and token :param backup_id: Backup uuid :return: """ # Recursively delete all children and grandchildren of this backup. query = DBBackup.query() query = query.filter_by(parent_id=backup_id, deleted=False) for child in query.all(): try: cls.delete(context, child.id) except exception.NotFound: LOG.exception("Backup %s cannot be found.", backup_id) def _delete_resources(): backup = cls.get_by_id(context, backup_id) if backup.is_running: msg = _("Backup %s cannot be deleted because it is running.") raise exception.UnprocessableEntity(msg % backup_id) cls.verify_swift_auth_token(context) api.API(context).delete_backup(backup_id) return run_with_quotas(context.project_id, {'backups': -1}, _delete_resources) @classmethod def verify_swift_auth_token(cls, context): try: client = clients.create_swift_client(context) client.get_account() except ClientException: raise exception.SwiftAuthError(tenant_id=context.project_id) except exception.NoServiceEndpoint: raise exception.SwiftNotFound(tenant_id=context.project_id) except ConnectionError: raise exception.SwiftConnectionError() def persisted_models(): return {'backups': DBBackup} class DBBackup(DatabaseModelBase): """A table for Backup records.""" _data_fields = ['name', 'description', 'location', 'backup_type', 'size', 'tenant_id', 'state', 'instance_id', 'checksum', 'backup_timestamp', 'deleted', 'created', 'updated', 'deleted_at', 'parent_id', 'datastore_version_id'] _table_name = 'backups' @property def is_running(self): return self.state in BackupState.RUNNING_STATES @property def is_done(self): return self.state in BackupState.END_STATES @property def is_done_successfuly(self): return self.state == BackupState.COMPLETED @property def filename(self): if self.location: last_slash = self.location.rfind("/") if last_slash < 0: raise ValueError(_("Bad location for backup object: %s") % self.location) return self.location[last_slash + 1:] else: return None @property def datastore(self): if self.datastore_version_id: return datastore_models.Datastore.load( self.datastore_version.datastore_id) @property def datastore_version(self): if self.datastore_version_id: return datastore_models.DatastoreVersion.load_by_uuid( self.datastore_version_id) def check_swift_object_exist(self, context, verify_checksum=False): try: parts = self.location.split('/') obj = parts[-1] container = parts[-2] client = clients.create_swift_client(context) LOG.debug("Checking if backup exists in %s", self.location) resp = client.head_object(container, obj) if verify_checksum: LOG.debug("Checking if backup checksum matches swift " "for backup %s", self.id) # swift returns etag in double quotes # e.g. '"dc3b0827f276d8d78312992cc60c2c3f"' swift_checksum = resp['etag'].strip('"') if self.checksum != swift_checksum: raise exception.RestoreBackupIntegrityError( backup_id=self.id) return True except ClientException as e: if e.http_status == 404: return False else: raise exception.SwiftAuthError(tenant_id=context.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/backup/service.py0000644000175000017500000001035300000000000020567 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import strutils from trove.backup.models import Backup from trove.backup import views from trove.common import apischema from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common import policy from trove.common import wsgi LOG = logging.getLogger(__name__) class BackupController(wsgi.Controller): """ Controller for accessing backups in the OpenStack API. """ schemas = apischema.backup def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing backups for tenant %s", tenant_id) datastore = req.GET.get('datastore') instance_id = req.GET.get('instance_id') all_projects = strutils.bool_from_string(req.GET.get('all_projects')) context = req.environ[wsgi.CONTEXT_KEY] if all_projects: policy.authorize_on_tenant(context, 'backup:index:all_projects') else: policy.authorize_on_tenant(context, 'backup:index') backups, marker = Backup.list( context, datastore=datastore, instance_id=instance_id, all_projects=all_projects ) view = views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200) def show(self, req, tenant_id, id): """Return a single backup.""" LOG.debug("Showing a backup for tenant %(tenant_id)s ID: '%(id)s'", {'tenant_id': tenant_id, 'id': id}) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) policy.authorize_on_target(context, 'backup:show', {'tenant': backup.tenant_id}) return wsgi.Result(views.BackupView(backup).data(), 200) def create(self, req, body, tenant_id): LOG.info("Creating a backup for tenant %s", tenant_id) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'backup:create') data = body['backup'] instance = data['instance'] name = data['name'] desc = data.get('description') parent = data.get('parent_id') incremental = data.get('incremental') context.notification = notification.DBaaSBackupCreate(context, request=req) with StartNotification(context, name=name, instance_id=instance, description=desc, parent_id=parent): backup = Backup.create(context, instance, name, desc, parent_id=parent, incremental=incremental) return wsgi.Result(views.BackupView(backup).data(), 202) def delete(self, req, tenant_id, id): LOG.info('Deleting backup for tenant %(tenant_id)s ' 'ID: %(backup_id)s', {'tenant_id': tenant_id, 'backup_id': id}) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) policy.authorize_on_target(context, 'backup:delete', {'tenant': backup.tenant_id}) context.notification = notification.DBaaSBackupDelete(context, request=req) with StartNotification(context, backup_id=id): Backup.delete(context, id) return wsgi.Result(None, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/backup/state.py0000644000175000017500000000162100000000000020245 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # class BackupState(object): NEW = "NEW" BUILDING = "BUILDING" SAVING = "SAVING" COMPLETED = "COMPLETED" FAILED = "FAILED" DELETE_FAILED = "DELETE_FAILED" RUNNING_STATES = [NEW, BUILDING, SAVING] END_STATES = [COMPLETED, FAILED, DELETE_FAILED] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/backup/views.py0000644000175000017500000000350500000000000020265 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class BackupView(object): def __init__(self, backup): self.backup = backup def data(self): result = { "backup": { "id": self.backup.id, "name": self.backup.name, "description": self.backup.description, "locationRef": self.backup.location, "instance_id": self.backup.instance_id, "created": self.backup.created, "updated": self.backup.updated, "size": self.backup.size, "status": self.backup.state, "parent_id": self.backup.parent_id, } } if self.backup.datastore_version_id: result['backup']['datastore'] = { "type": self.backup.datastore.name, "version": self.backup.datastore_version.name, "version_id": self.backup.datastore_version.id } return result class BackupViews(object): def __init__(self, backups): self.backups = backups def data(self): backups = [] for b in self.backups: backups.append(BackupView(b).data()["backup"]) return {"backups": backups} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7281098 trove-12.1.0.dev92/trove/cluster/0000755000175000017500000000000000000000000016767 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cluster/__init__.py0000644000175000017500000000000000000000000021066 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cluster/models.py0000644000175000017500000006611000000000000020630 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from oslo_log import log as logging from neutronclient.common import exceptions as neutron_exceptions from novaclient import exceptions as nova_exceptions from trove.cluster.tasks import ClusterTask from trove.cluster.tasks import ClusterTasks from trove.common import cfg from trove.common import clients from trove.common import exception from trove.common.i18n import _ from trove.common.notification import ( DBaaSClusterAttachConfiguration, DBaaSClusterDetachConfiguration, DBaaSClusterGrow, DBaaSClusterShrink, DBaaSClusterResetStatus, DBaaSClusterRestart) from trove.common.notification import DBaaSClusterUpgrade from trove.common.notification import DBaaSInstanceAttachConfiguration from trove.common.notification import DBaaSInstanceDetachConfiguration from trove.common.notification import EndNotification from trove.common.notification import StartNotification from trove.common import server_group as srv_grp from trove.common.strategies.cluster import strategy from trove.common import utils from trove.configuration import models as config_models from trove.datastore import models as datastore_models from trove.db import models as dbmodels from trove.instance import models as inst_models from trove.instance.tasks import InstanceTasks from trove.taskmanager import api as task_api CONF = cfg.CONF LOG = logging.getLogger(__name__) def persisted_models(): return { 'clusters': DBCluster, } class DBCluster(dbmodels.DatabaseModelBase): _data_fields = ['created', 'updated', 'name', 'task_id', 'tenant_id', 'datastore_version_id', 'deleted', 'deleted_at', 'configuration_id'] _table_name = 'clusters' def __init__(self, task_status, **kwargs): """ Creates a new persistable entity of the cluster. :param task_status: the current task of the cluster. :type task_status: trove.cluster.tasks.ClusterTask """ kwargs["task_id"] = task_status.code kwargs["deleted"] = False super(DBCluster, self).__init__(**kwargs) self.task_status = task_status def _validate(self, errors): if ClusterTask.from_code(self.task_id) is None: errors['task_id'] = "Not valid." if self.task_status is None: errors['task_status'] = "Cannot be None." @property def task_status(self): return ClusterTask.from_code(self.task_id) @task_status.setter def task_status(self, task_status): self.task_id = task_status.code class Cluster(object): DEFAULT_LIMIT = CONF.clusters_page_size def __init__(self, context, db_info, datastore=None, datastore_version=None): self.context = context self.db_info = db_info self.ds = datastore self.ds_version = datastore_version if self.ds_version is None: self.ds_version = (datastore_models.DatastoreVersion. load_by_uuid(self.db_info.datastore_version_id)) if self.ds is None: self.ds = (datastore_models.Datastore. load(self.ds_version.datastore_id)) self._db_instances = None self._server_group = None self._server_group_loaded = False self._locality = None @classmethod def get_guest(cls, instance): return clients.create_guest_client(instance.context, instance.db_info.id, instance.datastore_version.manager) @classmethod def load_all(cls, context, tenant_id): db_infos = DBCluster.find_all(tenant_id=tenant_id, deleted=False) limit = utils.pagination_limit(context.limit, Cluster.DEFAULT_LIMIT) data_view = DBCluster.find_by_pagination('clusters', db_infos, "foo", limit=limit, marker=context.marker) next_marker = data_view.next_page_marker ret = [cls(context, db_info) for db_info in data_view.collection] return ret, next_marker @classmethod def load(cls, context, cluster_id, clazz=None): try: db_info = DBCluster.find_by(context=context, id=cluster_id, deleted=False) except exception.ModelNotFoundError: raise exception.ClusterNotFound(cluster=cluster_id) if not clazz: ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) manager = ds_version.manager clazz = strategy.load_api_strategy(manager).cluster_class return clazz(context, db_info) def update_db(self, **values): self.db_info = DBCluster.find_by(id=self.id, deleted=False) for key in values: setattr(self.db_info, key, values[key]) self.db_info.save() def reset_task(self): LOG.info("Setting task to NONE on cluster %s", self.id) self.update_db(task_status=ClusterTasks.NONE) def reset_status(self): LOG.info("Resetting status to NONE on cluster %s", self.id) self.reset_task() instances = inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all() for inst in instances: instance = inst_models.load_any_instance(self.context, inst.id) instance.reset_status() @property def id(self): return self.db_info.id @property def created(self): return self.db_info.created @property def updated(self): return self.db_info.updated @property def name(self): return self.db_info.name @property def task_id(self): return self.db_info.task_status.code @property def task_name(self): return self.db_info.task_status.name @property def task_description(self): return self.db_info.task_status.description @property def tenant_id(self): return self.db_info.tenant_id @property def datastore(self): return self.ds @property def datastore_version(self): return self.ds_version @property def deleted(self): return self.db_info.deleted @property def deleted_at(self): return self.db_info.deleted_at @property def configuration_id(self): return self.db_info.configuration_id @property def db_instances(self): """DBInstance objects are persistent, therefore cacheable.""" if not self._db_instances: self._db_instances = inst_models.DBInstance.find_all( cluster_id=self.id, deleted=False).all() return self._db_instances @property def instances(self): return inst_models.Instances.load_all_by_cluster_id(self.context, self.db_info.id) @property def instances_without_server(self): return inst_models.Instances.load_all_by_cluster_id( self.context, self.db_info.id, load_servers=False) @property def server_group(self): # The server group could be empty, so we need a flag to cache it if not self._server_group_loaded and self.instances: self._server_group = None # Not all the instances may have the server group loaded, so # check them all for instance in self.instances: if instance.server_group: self._server_group = instance.server_group break self._server_group_loaded = True return self._server_group @property def locality(self): if not self._locality: if self.server_group: self._locality = srv_grp.ServerGroup.get_locality( self._server_group) return self._locality @locality.setter def locality(self, value): """This is to facilitate the fact that the server group may not be set up before the create command returns. """ self._locality = value @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): locality = srv_grp.ServerGroup.build_scheduler_hint( context, locality, name) api_strategy = strategy.load_api_strategy(datastore_version.manager) return api_strategy.cluster_class.create(context, name, datastore, datastore_version, instances, extended_properties, locality, configuration) def validate_cluster_available(self, valid_states=[ClusterTasks.NONE]): if self.db_info.task_status not in valid_states: log_fmt = ("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") exc_fmt = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") LOG.error(log_fmt, self.db_info.task_status.name) raise exception.UnprocessableEntity( exc_fmt % self.db_info.task_status.name) def delete(self): self.validate_cluster_available([ClusterTasks.NONE, ClusterTasks.DELETING]) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all() self.update_db(task_status=ClusterTasks.DELETING) # we force the server-group delete here since we need to load the # group while the instances still exist. Also, since the instances # take a while to be removed they might not all be gone even if we # do it after the delete. srv_grp.ServerGroup.delete(self.context, self.server_group, force=True) for db_inst in db_insts: instance = inst_models.load_any_instance(self.context, db_inst.id) instance.delete() task_api.API(self.context).delete_cluster(self.id) def action(self, context, req, action, param): if action == 'grow': context.notification = DBaaSClusterGrow(context, request=req) with StartNotification(context, cluster_id=self.id): instances = [] for node in param: instance = { 'flavor_id': utils.get_id_from_href(node['flavorRef']) } if 'name' in node: instance['name'] = node['name'] if 'volume' in node: instance['volume_size'] = int(node['volume']['size']) if 'modules' in node: instance['modules'] = node['modules'] if 'nics' in node: instance['nics'] = node['nics'] if 'availability_zone' in node: instance['availability_zone'] = ( node['availability_zone']) if 'type' in node: instance_type = node['type'] if isinstance(instance_type, six.string_types): instance_type = instance_type.split(',') instance['instance_type'] = instance_type instances.append(instance) return self.grow(instances) elif action == 'shrink': context.notification = DBaaSClusterShrink(context, request=req) instance_ids = [instance['id'] for instance in param] with StartNotification(context, cluster_id=self.id, instance_ids=instance_ids): instance_ids = [instance['id'] for instance in param] return self.shrink(instance_ids) elif action == "reset-status": context.notification = DBaaSClusterResetStatus(context, request=req) with StartNotification(context, cluster_id=self.id): return self.reset_status() elif action == 'restart': context.notification = DBaaSClusterRestart(context, request=req) with StartNotification(context, cluster_id=self.id): return self.restart() elif action == 'upgrade': context.notification = DBaaSClusterUpgrade(context, request=req) dv_id = param['datastore_version'] dv = datastore_models.DatastoreVersion.load(self.datastore, dv_id) with StartNotification(context, cluster_id=self.id, datastore_version=dv.id): self.upgrade(dv) self.update_db(datastore_version_id=dv.id) elif action == 'configuration_attach': configuration_id = param['configuration_id'] context.notification = DBaaSClusterAttachConfiguration(context, request=req) with StartNotification(context, cluster_id=self.id, configuration_id=configuration_id): return self.configuration_attach(configuration_id) elif action == 'configuration_detach': context.notification = DBaaSClusterDetachConfiguration(context, request=req) with StartNotification(context, cluster_id=self.id): return self.configuration_detach() else: raise exception.BadRequest(_("Action %s not supported") % action) def grow(self, instances): raise exception.BadRequest(_("Action 'grow' not supported")) def shrink(self, instance_ids): raise exception.BadRequest(_("Action 'shrink' not supported")) def rolling_restart(self): self.validate_cluster_available() self.db_info.update(task_status=ClusterTasks.RESTARTING_CLUSTER) try: cluster_id = self.db_info.id task_api.load(self.context, self.ds_version.manager ).restart_cluster(cluster_id) except Exception: self.db_info.update(task_status=ClusterTasks.NONE) raise return self.__class__(self.context, self.db_info, self.ds, self.ds_version) def rolling_upgrade(self, datastore_version): """Upgrades a cluster to a new datastore version.""" LOG.debug("Upgrading cluster %s.", self.id) self.validate_cluster_available() self.db_info.update(task_status=ClusterTasks.UPGRADING_CLUSTER) try: cluster_id = self.db_info.id ds_ver_id = datastore_version.id task_api.load(self.context, self.ds_version.manager ).upgrade_cluster(cluster_id, ds_ver_id) except Exception: self.db_info.update(task_status=ClusterTasks.NONE) raise return self.__class__(self.context, self.db_info, self.ds, self.ds_version) def restart(self): raise exception.BadRequest(_("Action 'restart' not supported")) def upgrade(self, datastore_version): raise exception.BadRequest(_("Action 'upgrade' not supported")) def configuration_attach(self, configuration_id): raise exception.BadRequest( _("Action 'configuration_attach' not supported")) def rolling_configuration_update(self, configuration_id, apply_on_all=True): cluster_notification = self.context.notification request_info = cluster_notification.serialize(self.context) self.validate_cluster_available() self.db_info.update(task_status=ClusterTasks.UPDATING_CLUSTER) try: configuration = config_models.Configuration.find( self.context, configuration_id, self.datastore_version.id) instances = [inst_models.Instance.load(self.context, instance.id) for instance in self.instances] LOG.debug("Persisting changes on cluster nodes.") # Allow re-applying the same configuration (e.g. on configuration # updates). for instance in instances: if not (instance.configuration and instance.configuration.id != configuration_id): self.context.notification = ( DBaaSInstanceAttachConfiguration(self.context, **request_info)) with StartNotification(self.context, instance_id=instance.id, configuration_id=configuration_id): with EndNotification(self.context): instance.save_configuration(configuration) else: LOG.debug( "Node '%(inst_id)s' already has the configuration " "'%(conf_id)s' attached.", {'inst_id': instance.id, 'conf_id': instance.configuration.id}) # Configuration has been persisted to all instances. # The cluster is in a consistent state with all nodes # requiring restart. # We therefore assign the configuration group ID now. # The configuration can be safely detached at this point. self.update_db(configuration_id=configuration_id) LOG.debug("Applying runtime configuration changes.") if instances[0].apply_configuration(configuration): LOG.debug( "Runtime changes have been applied successfully to the " "first node.") remaining_nodes = instances[1:] if apply_on_all: LOG.debug( "Applying the changes to the remaining nodes.") for instance in remaining_nodes: instance.apply_configuration(configuration) else: LOG.debug( "Releasing restart-required task on the remaining " "nodes.") for instance in remaining_nodes: instance.update_db(task_status=InstanceTasks.NONE) finally: self.update_db(task_status=ClusterTasks.NONE) return self.__class__(self.context, self.db_info, self.ds, self.ds_version) def configuration_detach(self): raise exception.BadRequest( _("Action 'configuration_detach' not supported")) def rolling_configuration_remove(self, apply_on_all=True): cluster_notification = self.context.notification request_info = cluster_notification.serialize(self.context) self.validate_cluster_available() self.db_info.update(task_status=ClusterTasks.UPDATING_CLUSTER) try: instances = [inst_models.Instance.load(self.context, instance.id) for instance in self.instances] LOG.debug("Removing changes from cluster nodes.") for instance in instances: if instance.configuration: self.context.notification = ( DBaaSInstanceDetachConfiguration(self.context, **request_info)) with StartNotification(self.context, instance_id=instance.id): with EndNotification(self.context): instance.delete_configuration() else: LOG.debug( "Node '%s' has no configuration attached.", instance.id) # The cluster is in a consistent state with all nodes # requiring restart. # New configuration can be safely attached at this point. configuration_id = self.configuration_id self.update_db(configuration_id=None) LOG.debug("Applying runtime configuration changes.") if instances[0].reset_configuration(configuration_id): LOG.debug( "Runtime changes have been applied successfully to the " "first node.") remaining_nodes = instances[1:] if apply_on_all: LOG.debug( "Applying the changes to the remaining nodes.") for instance in remaining_nodes: instance.reset_configuration(configuration_id) else: LOG.debug( "Releasing restart-required task on the remaining " "nodes.") for instance in remaining_nodes: instance.update_db(task_status=InstanceTasks.NONE) finally: self.update_db(task_status=ClusterTasks.NONE) return self.__class__(self.context, self.db_info, self.ds, self.ds_version) @staticmethod def load_instance(context, cluster_id, instance_id): return inst_models.load_instance_with_info( inst_models.DetailInstance, context, instance_id, cluster_id) @staticmethod def manager_from_cluster_id(context, cluster_id): db_info = DBCluster.find_by(context=context, id=cluster_id, deleted=False) ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) return ds_version.manager def is_cluster_deleting(context, cluster_id): cluster = Cluster.load(context, cluster_id) return (cluster.db_info.task_status == ClusterTasks.DELETING or cluster.db_info.task_status == ClusterTasks.SHRINKING_CLUSTER) def validate_instance_flavors(context, instances, volume_enabled, ephemeral_enabled): """Validate flavors for given instance definitions.""" nova_cli_cache = dict() for instance in instances: region_name = instance.get('region_name') flavor_id = instance['flavor_id'] try: if region_name in nova_cli_cache: nova_client = nova_cli_cache[region_name] else: nova_client = clients.create_nova_client( context, region_name) nova_cli_cache[region_name] = nova_client flavor = nova_client.flavors.get(flavor_id) if (not volume_enabled and (ephemeral_enabled and flavor.ephemeral == 0)): raise exception.LocalStorageNotSpecified( flavor=flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) def get_required_volume_size(instances, volume_enabled): """Calculate the total Trove volume size for given instances.""" volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] if volume_enabled: if len(volume_sizes) != len(instances): raise exception.ClusterVolumeSizeRequired() total_volume_size = 0 for volume_size in volume_sizes: validate_volume_size(volume_size) total_volume_size += volume_size return total_volume_size if len(volume_sizes) > 0: raise exception.VolumeNotSupported() return None def assert_homogeneous_cluster(instances, required_flavor=None, required_volume_size=None): """Verify that all instances have the same flavor and volume size (volume size = 0 if there should be no Trove volumes). """ assert_same_instance_flavors(instances, required_flavor=required_flavor) assert_same_instance_volumes(instances, required_size=required_volume_size) def assert_same_instance_flavors(instances, required_flavor=None): """Verify that all instances have the same flavor. :param required_flavor The flavor all instances should have or None if no specific flavor is required. :type required_flavor flavor_id """ flavors = {instance['flavor_id'] for instance in instances} if len(flavors) != 1 or (required_flavor is not None and required_flavor not in flavors): raise exception.ClusterFlavorsNotEqual() def assert_same_instance_volumes(instances, required_size=None): """Verify that all instances have the same volume size (size = 0 if there is not a Trove volume for the instance). :param required_size Size in GB all instance's volumes should have or 0 if there should be no attached volumes. None if no particular size is required. :type required_size int """ sizes = {instance.get('volume_size', 0) for instance in instances} if len(sizes) != 1 or (required_size is not None and required_size not in sizes): raise exception.ClusterVolumeSizesNotEqual() def validate_volume_size(size): """Verify the volume size is within the maximum limit for Trove volumes.""" if size is None: raise exception.VolumeSizeNotSpecified() max_size = CONF.max_accepted_volume_size if int(size) > max_size: msg = ("Volume 'size' cannot exceed maximum " "of %d Gb, %s cannot be accepted." % (max_size, size)) raise exception.VolumeQuotaExceeded(msg) def validate_instance_nics(context, instances): """Checking networks are same for the cluster.""" instance_nics = [] for instance in instances: nics = instance.get('nics') if nics: instance_nics.append(nics[0].get('net-id')) if len(set(instance_nics)) > 1: raise exception.ClusterNetworksNotEqual() if not instance_nics: return instance_nic = instance_nics[0] try: neutron_client = clients.create_neutron_client(context) neutron_client.find_resource('network', instance_nic) except neutron_exceptions.NotFound: raise exception.NetworkNotFound(uuid=instance_nic) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cluster/service.py0000644000175000017500000002403200000000000021002 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from trove.cluster import models from trove.cluster import views from trove.common import apischema from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common import policy from trove.common import utils from trove.common import wsgi from trove.datastore import models as datastore_models CONF = cfg.CONF LOG = logging.getLogger(__name__) class ClusterController(wsgi.Controller): """Controller for cluster functionality.""" schemas = apischema.cluster.copy() @classmethod def authorize_cluster_action(cls, context, cluster_rule_name, cluster): policy.authorize_on_target(context, 'cluster:%s' % cluster_rule_name, {'tenant': cluster.tenant_id}) @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] return action_schema.get(action_type, {}) @classmethod def get_schema(cls, action, body): action_schema = super(ClusterController, cls).get_schema(action, body) if action == 'action': action_schema = cls.get_action_schema(body, action_schema) return action_schema def action(self, req, body, tenant_id, id): LOG.debug(("Committing Action Against Cluster for " "Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nid : '%(id)s'\n\n"), {"req": req, "id": id, "tenant_id": tenant_id}) if not body: raise exception.BadRequest(_("Invalid request body.")) if len(body) != 1: raise exception.BadRequest(_("Action request should have exactly" " one action specified in body")) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, id) if ('reset-status' in body and 'force_delete' not in body['reset-status']): self.authorize_cluster_action(context, 'reset-status', cluster) elif ('reset-status' in body and 'force_delete' in body['reset-status']): self.authorize_cluster_action(context, 'force_delete', cluster) else: self.authorize_cluster_action(context, 'action', cluster) cluster.action(context, req, *next(iter(body.items()))) view = views.load_view(cluster, req=req, load_servers=False) wsgi_result = wsgi.Result(view.data(), 202) return wsgi_result def show(self, req, tenant_id, id): """Return a single cluster.""" LOG.debug(("Showing a Cluster for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nid : '%(id)s'\n\n"), {"req": req, "id": id, "tenant_id": tenant_id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, id) self.authorize_cluster_action(context, 'show', cluster) return wsgi.Result(views.load_view(cluster, req=req).data(), 200) def show_instance(self, req, tenant_id, cluster_id, instance_id): """Return a single instance belonging to a cluster.""" LOG.debug(("Showing an Instance in a Cluster for Tenant " "'%(tenant_id)s'\n" "req : '%(req)s'\n\n" "cluster_id : '%(cluster_id)s'\n\n" "instance_id : '%(instance_id)s;\n\n"), {"req": req, "tenant_id": tenant_id, "cluster_id": cluster_id, "instance_id": instance_id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, cluster_id) self.authorize_cluster_action(context, 'show_instance', cluster) instance = models.Cluster.load_instance(context, cluster.id, instance_id) return wsgi.Result(views.ClusterInstanceDetailView( instance, req=req).data(), 200) def delete(self, req, tenant_id, id): """Delete a cluster.""" LOG.debug(("Deleting a Cluster for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nid : '%(id)s'\n\n"), {"req": req, "id": id, "tenant_id": tenant_id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.Cluster.load(context, id) self.authorize_cluster_action(context, 'delete', cluster) context.notification = notification.DBaaSClusterDelete(context, request=req) with StartNotification(context, cluster_id=id): cluster.delete() return wsgi.Result(None, 202) def index(self, req, tenant_id): """Return a list of clusters.""" LOG.debug(("Showing a list of clusters for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n"), {"req": req, "tenant_id": tenant_id}) context = req.environ[wsgi.CONTEXT_KEY] # This theoretically allows the Admin tenant list clusters for # only one particular tenant as opposed to listing all clusters for # for all tenants. # * As far as I can tell this is the only call which actually uses the # passed-in 'tenant_id' for anything. if not context.is_admin and context.project_id != tenant_id: raise exception.TroveOperationAuthError( tenant_id=context.project_id ) # The rule checks that the currently authenticated tenant can perform # the 'cluster-list' action. policy.authorize_on_tenant(context, 'cluster:index') # load all clusters and instances for the tenant clusters, marker = models.Cluster.load_all(context, tenant_id) view = views.ClustersView(clusters, req=req) paged = pagination.SimplePaginatedDataView(req.url, 'clusters', view, marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id): LOG.debug(("Creating a Cluster for Tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\nbody : '%(body)s'\n\n"), {"tenant_id": tenant_id, "req": req, "body": body}) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'cluster:create') name = body['cluster']['name'] datastore_args = body['cluster'].get('datastore', {}) datastore, datastore_version = ( datastore_models.get_datastore_version(**datastore_args)) extended_properties = body['cluster'].get('extended_properties', {}) try: clusters_enabled = (CONF.get(datastore_version.manager) .get('cluster_support')) except NoSuchOptError: clusters_enabled = False if not clusters_enabled: raise exception.ClusterDatastoreNotSupported( datastore=datastore.name, datastore_version=datastore_version.name) nodes = body['cluster']['instances'] instances = [] for node in nodes: flavor_id = utils.get_id_from_href(node['flavorRef']) volume_size = volume_type = nics = availability_zone = None modules = None if 'volume' in node: volume_size = int(node['volume']['size']) volume_type = node['volume'].get('type') if 'nics' in node: nics = node['nics'] if 'availability_zone' in node: availability_zone = node['availability_zone'] if 'modules' in node: modules = node['modules'] instances.append({"flavor_id": flavor_id, "volume_size": volume_size, "volume_type": volume_type, "nics": nics, "availability_zone": availability_zone, 'region_name': node.get('region_name'), "modules": modules}) locality = body['cluster'].get('locality') if locality: locality_domain = ['affinity', 'anti-affinity'] locality_domain_msg = ("Invalid locality '%s'. " "Must be one of ['%s']" % (locality, "', '".join(locality_domain))) if locality not in locality_domain: raise exception.BadRequest(message=locality_domain_msg) configuration = body['cluster'].get('configuration') context.notification = notification.DBaaSClusterCreate(context, request=req) with StartNotification(context, name=name, datastore=datastore.name, datastore_version=datastore_version.name): cluster = models.Cluster.create(context, name, datastore, datastore_version, instances, extended_properties, locality, configuration) cluster.locality = locality view = views.load_view(cluster, req=req, load_servers=False) return wsgi.Result(view.data(), 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cluster/tasks.py0000644000175000017500000000516400000000000020474 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ClusterTask(object): """ Stores the different kind of tasks being performed by a cluster. """ _lookup = {} def __init__(self, code, name, description): self._code = int(code) self._name = name self._description = description ClusterTask._lookup[self._code] = self @property def code(self): return self._code @property def name(self): return self._name @property def description(self): return self._description def __eq__(self, other): if not isinstance(other, ClusterTask): return False return self._code == other._code @classmethod def from_code(cls, code): if code not in cls._lookup: return None return cls._lookup[code] def __str__(self): return "(%d %s %s)" % (self._code, self._name, self._description) def __repr__(self): return "ClusterTask.%s (%s)" % (self._name, self._description) class ClusterTasks(object): NONE = ClusterTask(0x01, 'NONE', 'No tasks for the cluster.') BUILDING_INITIAL = ClusterTask( 0x02, 'BUILDING', 'Building the initial cluster.') DELETING = ClusterTask(0x03, 'DELETING', 'Deleting the cluster.') ADDING_SHARD = ClusterTask( 0x04, 'ADDING_SHARD', 'Adding a shard to the cluster.') GROWING_CLUSTER = ClusterTask( 0x05, 'GROWING_CLUSTER', 'Increasing the size of the cluster.') SHRINKING_CLUSTER = ClusterTask( 0x06, 'SHRINKING_CLUSTER', 'Decreasing the size of the cluster.') UPGRADING_CLUSTER = ClusterTask( 0x07, 'UPGRADING_CLUSTER', 'Upgrading the cluster to new version.') RESTARTING_CLUSTER = ClusterTask( 0x08, 'RESTARTING_CLUSTER', 'Restarting the cluster.') UPDATING_CLUSTER = ClusterTask( 0x09, 'UPDATING_CLUSTER', 'Updating cluster configuration.') # Dissuade further additions at run-time. ClusterTask.__init__ = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cluster/views.py0000644000175000017500000001173100000000000020501 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import strategy from trove.common.views import create_links from trove.instance.views import InstanceDetailView LOG = logging.getLogger(__name__) CONF = cfg.CONF class ClusterView(object): def __init__(self, cluster, req=None, load_servers=True): self.cluster = cluster self.req = req self.load_servers = load_servers def data(self): instances, ip_list = self.build_instances() cluster_dict = { "id": self.cluster.id, "name": self.cluster.name, "task": {"id": self.cluster.task_id, "name": self.cluster.task_name, "description": self.cluster.task_description}, "created": self.cluster.created, "updated": self.cluster.updated, "links": self._build_links(), "datastore": {"type": self.cluster.datastore.name, "version": self.cluster.datastore_version.name}, "instances": instances } if ip_list: cluster_dict["ip"] = ip_list extended_properties = self.get_extended_properties() if extended_properties: cluster_dict["extended_properties"] = extended_properties if self.cluster.locality: cluster_dict['locality'] = self.cluster.locality if self.cluster.configuration_id: cluster_dict['configuration'] = self.cluster.configuration_id LOG.debug(cluster_dict) return {"cluster": cluster_dict} def _build_links(self): return create_links("clusters", self.req, self.cluster.id) def _build_instances(self, ip_to_be_published_for=[], instance_dict_to_be_published_for=[]): instances = [] ip_list = [] if self.load_servers: cluster_instances = self.cluster.instances else: cluster_instances = self.cluster.instances_without_server for instance in cluster_instances: instance_dict = { "id": instance.id, "name": instance.name, "type": instance.type, "links": create_links("instances", self.req, instance.id) } if instance.shard_id: instance_dict["shard_id"] = instance.shard_id if self.load_servers: instance_dict["status"] = instance.status if CONF.get(instance.datastore_version.manager).volume_support: instance_dict["volume"] = {"size": instance.volume_size} instance_dict["flavor"] = self._build_flavor_info( instance.flavor_id) instance_ips = instance.get_visible_ip_addresses() if self.load_servers and instance_ips: instance_dict["ip"] = instance_ips if instance.type in ip_to_be_published_for: ip_list.extend(instance_ips) if instance.type in instance_dict_to_be_published_for: instances.append(instance_dict) ip_list.sort() return instances, ip_list def build_instances(self): raise NotImplementedError() def get_extended_properties(self): return None def _build_flavor_info(self, flavor_id): return { "id": flavor_id, "links": create_links("flavors", self.req, flavor_id) } class ClusterInstanceDetailView(InstanceDetailView): def __init__(self, instance, req): super(ClusterInstanceDetailView, self).__init__(instance, req=req) def data(self): result = super(ClusterInstanceDetailView, self).data() return result class ClustersView(object): def __init__(self, clusters, req=None): self.clusters = clusters self.req = req def data(self): data = [] for cluster in self.clusters: data.append(self.data_for_cluster(cluster)) return {'clusters': data} def data_for_cluster(self, cluster): view = load_view(cluster, req=self.req, load_servers=False) return view.data()['cluster'] def load_view(cluster, req, load_servers=True): manager = cluster.datastore_version.manager return strategy.load_api_strategy(manager).cluster_view_class( cluster, req, load_servers) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73211 trove-12.1.0.dev92/trove/cmd/0000755000175000017500000000000000000000000016051 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/__init__.py0000644000175000017500000000202300000000000020157 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file implements eventlet monkey patching according to the OpenStack # guidelines and best practices found at (note the multi-line URL) # http://specs.openstack.org/openstack/ # openstack-specs/specs/eventlet-best-practices.html # # It is not safe to leave monkey patching till later. import os if not os.environ.get('NO_EVENTLET_MONKEYPATCH'): import eventlet eventlet.monkey_patch(all=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/api.py0000644000175000017500000000260600000000000017200 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from trove.cmd.common import with_initialize from trove.common import profile @with_initialize def main(CONF): from trove.common import cfg from trove.common import notification from trove.common import wsgi from trove.instance import models as inst_models notification.DBaaSAPINotification.register_notify_callback( inst_models.persist_instance_fault) cfg.set_api_config_defaults() profile.setup_profiler('api', CONF.host) conf_file = CONF.find_file(CONF.api_paste_config) workers = CONF.trove_api_workers or processutils.get_worker_count() launcher = wsgi.launch('trove', CONF.bind_port, conf_file, host=CONF.bind_host, workers=workers) launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/app.wsgi0000644000175000017500000000256400000000000017533 0ustar00coreycorey00000000000000# Copyright 2017 Amrith Kumar. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Used for deploying Trove API through mod-wsgi """ from oslo_log import log as logging from trove.cmd.common import with_initialize from trove.common import pastedeploy from trove.common import profile LOG = logging.getLogger('trove.cmd.app') @with_initialize def wsgimain(CONF): from trove.common import cfg from trove.common import notification from trove.instance import models as inst_models notification.DBaaSAPINotification.register_notify_callback( inst_models.persist_instance_fault) cfg.set_api_config_defaults() profile.setup_profiler('api', CONF.host) conf_file = CONF.find_file(CONF.api_paste_config) LOG.debug("Trove started on %s", CONF.host) return pastedeploy.paste_deploy_app(conf_file, 'trove', {}) application = wsgimain() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/common.py0000644000175000017500000000364500000000000017723 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def initialize(extra_opts=None, pre_logging=None): # Import only the modules necessary to initialize logging and determine if # debug_utils are enabled. import sys from oslo_log import log as logging from trove.common import cfg from trove.common import debug_utils conf = cfg.CONF if extra_opts: conf.register_cli_opts(extra_opts) cfg.parse_args(sys.argv) if pre_logging: pre_logging(conf) logging.setup(conf, None) debug_utils.setup() # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(conf) # Initialize Trove database. from trove.db import get_db_api get_db_api().configure_db(conf) return conf # May be used by other scripts def with_initialize(main_function=None, **kwargs): """ Decorates a script main function to make sure that dependency imports and initialization happens correctly. """ def apply(main_function): def run(): conf = initialize(**kwargs) return main_function(conf) return run if main_function: return apply(main_function) else: return apply ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/conductor.py0000644000175000017500000000320200000000000020420 0ustar00coreycorey00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_service import service as openstack_service from trove.cmd.common import with_initialize from trove.conductor import api as conductor_api @with_initialize def main(conf): from trove.common import notification from trove.common.rpc import conductor_host_serializer as sz from trove.common.rpc import service as rpc_service from trove.instance import models as inst_models notification.DBaaSAPINotification.register_notify_callback( inst_models.persist_instance_fault) topic = conf.conductor_queue server = rpc_service.RpcService( key=None, manager=conf.conductor_manager, topic=topic, rpc_api_version=conductor_api.API.API_LATEST_VERSION, secure_serializer=sz.ConductorHostSerializer) workers = conf.trove_conductor_workers or processutils.get_worker_count() launcher = openstack_service.launch(conf, server, workers=workers, restart_method='mutate') launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/fakemode.py0000644000175000017500000000427600000000000020207 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils from oslo_config import cfg as openstack_cfg from trove.cmd.common import with_initialize opts = [ openstack_cfg.BoolOpt('fork', short='f', default=False, dest='fork'), openstack_cfg.StrOpt('pid-file', default='.pid'), openstack_cfg.StrOpt('override-logfile', default=None), ] def setup_logging(conf): if conf.override_logfile: conf.use_stderr = False conf.log_file = conf.override_logfile @with_initialize(extra_opts=opts, pre_logging=setup_logging) def main(conf): if conf.fork: pid = os.fork() if pid == 0: start_server(conf) else: print("Starting server:%s" % pid) pid_file = conf.pid_file with open(pid_file, 'w') as f: f.write(str(pid)) else: start_server(conf) def start_fake_taskmanager(conf): topic = conf.taskmanager_queue from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version taskman_service = rpc_service.RpcService( key='', topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION, manager='trove.taskmanager.manager.Manager') taskman_service.start() def start_server(conf): from trove.common import wsgi conf_file = conf.find_file(conf.api_paste_config) workers = conf.trove_api_workers or processutils.get_worker_count() launcher = wsgi.launch('trove', conf.bind_port or 8779, conf_file, workers=workers) start_fake_taskmanager(conf) launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/cmd/guest.py0000644000175000017500000000502100000000000017550 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg as openstack_cfg from oslo_log import log as logging from oslo_service import service as openstack_service from trove.common import cfg from trove.common import debug_utils from trove.common.i18n import _ from trove.guestagent import api as guest_api CONF = cfg.CONF # The guest_id opt definition must match the one in common/cfg.py CONF.register_opts([openstack_cfg.StrOpt('guest_id', default=None, help="ID of the Guest Instance."), openstack_cfg.StrOpt('instance_rpc_encr_key', help=('Key (OpenSSL aes_cbc) for ' 'instance RPC encryption.'))]) def main(): cfg.parse_args(sys.argv) logging.setup(CONF, None) debug_utils.setup() from trove.guestagent import dbaas manager = dbaas.datastore_registry().get(CONF.datastore_manager) if not manager: msg = (_("Manager class not registered for datastore manager %s") % CONF.datastore_manager) raise RuntimeError(msg) if not CONF.guest_id: msg = (_("The guest_id parameter is not set. guest_info.conf " "was not injected into the guest or not read by guestagent")) raise RuntimeError(msg) # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(CONF) from trove.common.rpc import service as rpc_service server = rpc_service.RpcService( key=CONF.instance_rpc_encr_key, topic="guestagent.%s" % CONF.guest_id, manager=manager, host=CONF.guest_id, rpc_api_version=guest_api.API.API_LATEST_VERSION) launcher = openstack_service.launch(CONF, server, restart_method='mutate') launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/manage.py0000644000175000017500000003135000000000000017655 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import sys from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.configuration import models as config_models from trove.datastore import models as datastore_models from trove.db import get_db_api CONF = cfg.CONF class Commands(object): def __init__(self): self.db_api = get_db_api() def db_sync(self, repo_path=None): self.db_api.db_sync(CONF, repo_path=repo_path) def db_upgrade(self, version=None, repo_path=None): self.db_api.db_upgrade(CONF, version, repo_path=repo_path) def db_downgrade(self, version, repo_path=None): raise SystemExit(_("Database downgrade is no longer supported.")) def execute(self): exec_method = getattr(self, CONF.action.name) args = inspect.getargspec(exec_method) args.args.remove('self') kwargs = {} for arg in args.args: kwargs[arg] = getattr(CONF.action, arg) exec_method(**kwargs) def datastore_update(self, datastore_name, default_version): try: datastore_models.update_datastore(datastore_name, default_version) print("Datastore '%s' updated." % datastore_name) except exception.DatastoreVersionNotFound as e: print(e) def datastore_version_update(self, datastore, version_name, manager, image_id, packages, active): try: datastore_models.update_datastore_version(datastore, version_name, manager, image_id, packages, active) print("Datastore version '%s' updated." % version_name) except exception.DatastoreNotFound as e: print(e) def db_recreate(self, repo_path): """Drops the database and recreates it.""" self.db_api.drop_db(CONF) self.db_sync(repo_path) def db_load_datastore_config_parameters(self, datastore, datastore_version, config_file_location): print("Loading config parameters for datastore (%s) version (%s)" % (datastore, datastore_version)) config_models.load_datastore_configuration_parameters( datastore, datastore_version, config_file_location) def datastore_version_flavor_add(self, datastore_name, datastore_version_name, flavor_ids): """Adds flavors for a given datastore version id.""" try: dsmetadata = datastore_models.DatastoreVersionMetadata dsmetadata.add_datastore_version_flavor_association( datastore_name, datastore_version_name, flavor_ids.split(",")) print("Added flavors '%s' to the '%s' '%s'." % (flavor_ids, datastore_name, datastore_version_name)) except exception.DatastoreVersionNotFound as e: print(e) def datastore_version_flavor_delete(self, datastore_name, datastore_version_name, flavor_id): """Deletes a flavor's association with a given datastore.""" try: dsmetadata = datastore_models.DatastoreVersionMetadata dsmetadata.delete_datastore_version_flavor_association( datastore_name, datastore_version_name, flavor_id) print("Deleted flavor '%s' from '%s' '%s'." % (flavor_id, datastore_name, datastore_version_name)) except exception.DatastoreVersionNotFound as e: print(e) def datastore_version_volume_type_add(self, datastore_name, datastore_version_name, volume_type_ids): """Adds volume type assiciation for a given datastore version id.""" try: dsmetadata = datastore_models.DatastoreVersionMetadata dsmetadata.add_datastore_version_volume_type_association( datastore_name, datastore_version_name, volume_type_ids.split(",")) print("Added volume type '%s' to the '%s' '%s'." % (volume_type_ids, datastore_name, datastore_version_name)) except exception.DatastoreVersionNotFound as e: print(e) def datastore_version_volume_type_delete(self, datastore_name, datastore_version_name, volume_type_id): """Deletes a volume type association with a given datastore.""" try: dsmetadata = datastore_models.DatastoreVersionMetadata dsmetadata.delete_datastore_version_volume_type_association( datastore_name, datastore_version_name, volume_type_id) print("Deleted volume type '%s' from '%s' '%s'." % (volume_type_id, datastore_name, datastore_version_name)) except exception.DatastoreVersionNotFound as e: print(e) def datastore_version_volume_type_list(self, datastore_name, datastore_version_name): """Lists volume type association with a given datastore.""" try: dsmetadata = datastore_models.DatastoreVersionMetadata vtlist = dsmetadata.list_datastore_volume_type_associations( datastore_name, datastore_version_name) if vtlist.count() > 0: for volume_type in vtlist: print("Datastore: %s, Version: %s, Volume Type: %s" % (datastore_name, datastore_version_name, volume_type.value)) else: print("No Volume Type Associations found for Datastore: %s, " "Version: %s." % (datastore_name, datastore_version_name)) except exception.DatastoreVersionNotFound as e: print(e) def params_of(self, command_name): if Commands.has(command_name): return utils.MethodInspector(getattr(self, command_name)) def main(): def actions(subparser): repo_path_help = 'SQLAlchemy Migrate repository path.' parser = subparser.add_parser( 'db_sync', description='Populate the database structure') parser.add_argument('--repo_path', help=repo_path_help) parser = subparser.add_parser( 'db_upgrade', description='Upgrade the database to the ' 'specified version.') parser.add_argument( '--version', help='Target version. Defaults to the ' 'latest version.') parser.add_argument('--repo_path', help=repo_path_help) parser = subparser.add_parser( 'datastore_update', description='Add or update a datastore. ' 'If the datastore already exists, the default version will be ' 'updated.') parser.add_argument( 'datastore_name', help='Name of the datastore.') parser.add_argument( 'default_version', help='Name or ID of an existing datastore ' 'version to set as the default. When adding a new datastore, use ' 'an empty string.') parser = subparser.add_parser( 'datastore_version_update', description='Add or update a ' 'datastore version. If the datastore version already exists, all ' 'values except the datastore name and version will be updated.') parser.add_argument('datastore', help='Name of the datastore.') parser.add_argument( 'version_name', help='Name of the datastore version.') parser.add_argument( 'manager', help='Name of the manager that will administer the ' 'datastore version.') parser.add_argument( 'image_id', help='ID of the image used to create an instance of ' 'the datastore version.') parser.add_argument( 'packages', help='Packages required by the datastore version that ' 'are installed on the guest image.') parser.add_argument( 'active', type=int, help='Whether the datastore version is active or not. ' 'Accepted values are 0 and 1.') parser = subparser.add_parser( 'db_recreate', description='Drop the database and recreate it.') parser.add_argument('--repo_path', help=repo_path_help) parser = subparser.add_parser( 'db_load_datastore_config_parameters', description='Loads configuration group parameter validation rules ' 'for a datastore version into the database.') parser.add_argument( 'datastore', help='Name of the datastore.') parser.add_argument( 'datastore_version', help='Name of the datastore version.') parser.add_argument( 'config_file_location', help='Fully qualified file path to the configuration group ' 'parameter validation rules.') parser = subparser.add_parser( 'datastore_version_flavor_add', help='Adds flavor association to ' 'a given datastore and datastore version.') parser.add_argument('datastore_name', help='Name of the datastore.') parser.add_argument('datastore_version_name', help='Name of the ' 'datastore version.') parser.add_argument('flavor_ids', help='Comma separated list of ' 'flavor ids.') parser = subparser.add_parser( 'datastore_version_flavor_delete', help='Deletes a flavor ' 'associated with a given datastore and datastore version.') parser.add_argument('datastore_name', help='Name of the datastore.') parser.add_argument('datastore_version_name', help='Name of the ' 'datastore version.') parser.add_argument('flavor_id', help='The flavor to be deleted for ' 'a given datastore and datastore version.') parser = subparser.add_parser( 'datastore_version_volume_type_add', help='Adds volume_type ' 'association to a given datastore and datastore version.') parser.add_argument('datastore_name', help='Name of the datastore.') parser.add_argument('datastore_version_name', help='Name of the ' 'datastore version.') parser.add_argument('volume_type_ids', help='Comma separated list of ' 'volume_type ids.') parser = subparser.add_parser( 'datastore_version_volume_type_delete', help='Deletes a volume_type ' 'associated with a given datastore and datastore version.') parser.add_argument('datastore_name', help='Name of the datastore.') parser.add_argument('datastore_version_name', help='Name of the ' 'datastore version.') parser.add_argument('volume_type_id', help='The volume_type to be ' 'deleted for a given datastore and datastore ' 'version.') parser = subparser.add_parser( 'datastore_version_volume_type_list', help='Lists the volume_types ' 'associated with a given datastore and datastore version.') parser.add_argument('datastore_name', help='Name of the datastore.') parser.add_argument('datastore_version_name', help='Name of the ' 'datastore version.') cfg.custom_parser('action', actions) cfg.parse_args(sys.argv) try: logging.setup(CONF, None) Commands().execute() sys.exit(0) except TypeError as e: print(_("Possible wrong number of arguments supplied %s.") % e) sys.exit(2) except Exception: print(_("Command failed, please check log for more info.")) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/status.py0000644000175000017500000000477200000000000017760 0ustar00coreycorey00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_upgradecheck import upgradecheck from trove.common.i18n import _ from trove import db from trove.instance.models import DBInstance from trove.instance.tasks import InstanceTasks class Checks(upgradecheck.UpgradeCommands): """Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _check_instances_with_running_tasks(self): """Finds Trove instances with running tasks. Such instances need to communicate with Trove control plane to report status. This may rise issues if Trove services are unavailable, e.g. Trove guest agent may be left in a failed state due to communication issues. """ db_api = db.get_db_api() db_api.configure_db(cfg.CONF) query = DBInstance.query() query = query.filter(DBInstance.task_status != InstanceTasks.NONE) query = query.filter_by(deleted=False) instances_with_tasks = query.count() if instances_with_tasks: return upgradecheck.Result( upgradecheck.Code.WARNING, _("Instances with running tasks exist.")) return upgradecheck.Result(upgradecheck.Code.SUCCESS) # The format of the check functions is to return an # oslo_upgradecheck.upgradecheck.Result # object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. # If the check hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( (_("instances_with_running_tasks"), _check_instances_with_running_tasks), ) def main(): return upgradecheck.main( cfg.CONF, project="trove", upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/cmd/taskmanager.py0000644000175000017500000000340100000000000020716 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg as openstack_cfg from oslo_service import service as openstack_service from trove.cmd.common import with_initialize extra_opts = [openstack_cfg.StrOpt('taskmanager_manager')] def startup(conf, topic): from trove.common import notification from trove.common.rpc import service as rpc_service from trove.instance import models as inst_models from trove.taskmanager import api as task_api notification.DBaaSAPINotification.register_notify_callback( inst_models.persist_instance_fault) if conf.enable_secure_rpc_messaging: key = conf.taskmanager_rpc_encr_key else: key = None server = rpc_service.RpcService( key=key, manager=conf.taskmanager_manager, topic=topic, rpc_api_version=task_api.API.API_LATEST_VERSION) launcher = openstack_service.launch(conf, server, restart_method='mutate') launcher.wait() @with_initialize(extra_opts=extra_opts) def main(conf): startup(conf, conf.taskmanager_queue) @with_initialize(extra_opts=extra_opts) def mgmt_main(conf): startup(conf, "mgmt-taskmanager") ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73611 trove-12.1.0.dev92/trove/common/0000755000175000017500000000000000000000000016576 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/__init__.py0000644000175000017500000000000000000000000020675 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/common/api.py0000644000175000017500000003431500000000000017727 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import routes from trove.backup.service import BackupController from trove.cluster.service import ClusterController from trove.common import wsgi from trove.configuration.service import ConfigurationsController from trove.configuration.service import ParametersController from trove.datastore.service import DatastoreController from trove.flavor.service import FlavorController from trove.instance.service import InstanceController from trove.limits.service import LimitsController from trove.module.service import ModuleController from trove.versions import VersionsController from trove.volume_type.service import VolumeTypesController class API(wsgi.Router): """Defines the API routes.""" def __init__(self): mapper = routes.Mapper() super(API, self).__init__(mapper) self._instance_router(mapper) self._cluster_router(mapper) self._datastore_router(mapper) self._flavor_router(mapper) self._volume_type_router(mapper) self._versions_router(mapper) self._limits_router(mapper) self._backups_router(mapper) self._configurations_router(mapper) self._modules_router(mapper) def _versions_router(self, mapper): versions_resource = VersionsController().create_resource() mapper.connect("/", controller=versions_resource, action="show", conditions={'method': ['GET']}) def _datastore_router(self, mapper): datastore_resource = DatastoreController().create_resource() mapper.resource("datastore", "/{tenant_id}/datastores", controller=datastore_resource) mapper.connect("/{tenant_id}/datastores/{datastore}/versions", controller=datastore_resource, action="version_index") mapper.connect("/{tenant_id}/datastores/{datastore}/versions/{id}", controller=datastore_resource, action="version_show") mapper.connect( "/{tenant_id}/datastores/{datastore}/versions/" "{version_id}/flavors", controller=datastore_resource, action="list_associated_flavors", conditions={'method': ['GET']} ) mapper.connect( "/{tenant_id}/datastores/{datastore}/versions/" "{version_id}/volume-types", controller=datastore_resource, action="list_associated_volume_types", conditions={'method': ['GET']} ) mapper.connect("/{tenant_id}/datastores/versions/{uuid}", controller=datastore_resource, action="version_show_by_uuid") def _instance_router(self, mapper): instance_resource = InstanceController().create_resource() mapper.connect("/{tenant_id}/instances", controller=instance_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/detail", controller=instance_resource, action="detail", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances", controller=instance_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/action", controller=instance_resource, action="action", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="update", conditions={'method': ['PUT']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="edit", conditions={'method': ['PATCH']}) mapper.connect("/{tenant_id}/instances/{id}", controller=instance_resource, action="delete", conditions={'method': ['DELETE']}) mapper.connect("/{tenant_id}/instances/{id}/backups", controller=instance_resource, action="backups", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/configuration", controller=instance_resource, action="configuration", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/log", controller=instance_resource, action="guest_log_list", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/log", controller=instance_resource, action="guest_log_action", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}/modules", controller=instance_resource, action="module_list", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/instances/{id}/modules", controller=instance_resource, action="module_apply", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/instances/{id}/modules/{module_id}", controller=instance_resource, action="module_remove", conditions={'method': ['DELETE']}) def _cluster_router(self, mapper): cluster_resource = ClusterController().create_resource() mapper.connect("/{tenant_id}/clusters", controller=cluster_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/clusters/{id}", controller=cluster_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/clusters", controller=cluster_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/clusters/{id}", controller=cluster_resource, action="action", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/clusters/{cluster_id}/instances/" "{instance_id}", controller=cluster_resource, action="show_instance", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/clusters/{id}", controller=cluster_resource, action="delete", conditions={'method': ['DELETE']}) def _flavor_router(self, mapper): flavor_resource = FlavorController().create_resource() mapper.connect("/{tenant_id}/flavors", controller=flavor_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/flavors/{id}", controller=flavor_resource, action="show", conditions={'method': ['GET']}) def _volume_type_router(self, mapper): volume_type_resource = VolumeTypesController().create_resource() mapper.connect("/{tenant_id}/volume-types", controller=volume_type_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/volume-types/{id}", controller=volume_type_resource, action="show", conditions={'method': ['GET']}) def _limits_router(self, mapper): limits_resource = LimitsController().create_resource() mapper.connect("/{tenant_id}/limits", controller=limits_resource, action="index", conditions={'method': ['GET']}) def _backups_router(self, mapper): backups_resource = BackupController().create_resource() mapper.connect("/{tenant_id}/backups", controller=backups_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/backups", controller=backups_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/backups/{id}", controller=backups_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/backups/{id}", controller=backups_resource, action="delete", conditions={'method': ['DELETE']}) def _modules_router(self, mapper): modules_resource = ModuleController().create_resource() mapper.resource("modules", "/{tenant_id}/modules", controller=modules_resource) mapper.connect("/{tenant_id}/modules", controller=modules_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/modules", controller=modules_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/{tenant_id}/modules/{id}", controller=modules_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/modules/{id}", controller=modules_resource, action="update", conditions={'method': ['PUT']}) mapper.connect("/{tenant_id}/modules/{id}", controller=modules_resource, action="delete", conditions={'method': ['DELETE']}) mapper.connect("/{tenant_id}/modules/{id}/instances", controller=modules_resource, action="instances", conditions={'method': ['GET']}) mapper.connect("/{tenant_id}/modules/{id}/instances", controller=modules_resource, action="reapply", conditions={'method': ['PUT']}) def _configurations_router(self, mapper): parameters_resource = ParametersController().create_resource() path = '/{tenant_id}/datastores/versions/{version}/parameters' mapper.connect(path, controller=parameters_resource, action='index_by_version', conditions={'method': ['GET']}) path = '/{tenant_id}/datastores/versions/{version}/parameters/{name}' mapper.connect(path, controller=parameters_resource, action='show_by_version', conditions={'method': ['GET']}) path = '/{tenant_id}/datastores/{datastore}/versions/{id}' mapper.connect(path + '/parameters', controller=parameters_resource, action='index', conditions={'method': ['GET']}) mapper.connect(path + '/parameters/{name}', controller=parameters_resource, action='show', conditions={'method': ['GET']}) configuration_resource = ConfigurationsController().create_resource() mapper.connect('/{tenant_id}/configurations', controller=configuration_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/{tenant_id}/configurations', controller=configuration_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='show', conditions={'method': ['GET']}) mapper.connect('/{tenant_id}/configurations/{id}/instances', controller=configuration_resource, action='instances', conditions={'method': ['GET']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='edit', conditions={'method': ['PATCH']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/{tenant_id}/configurations/{id}', controller=configuration_resource, action='delete', conditions={'method': ['DELETE']}) def app_factory(global_conf, **local_conf): return API() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/apischema.py0000644000175000017500000006110600000000000021106 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # url_ref = { "type": "string", "minLength": 8, "pattern": r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]' r'|(?:%[0-9a-fA-F][0-9a-fA-F]))+' } boolean_string = { "type": "integer", "minimum": 0, "maximum": 1 } non_empty_string = { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^.*[0-9a-zA-Z]+.*$" } configuration_data_types = { "type": "string", "minLength": 1, "pattern": "integer|string" } configuration_integer_size = { "type": "string", "maxLength": 40, "pattern": "[0-9]+" } configuration_positive_integer = { "type": "string", "maxLength": 40, "minLength": 1, "pattern": "^0*[1-9]+[0-9]*$" } configuration_non_empty_string = { "type": "string", "minLength": 1, "maxLength": 128, "pattern": "^.*[0-9a-zA-Z]+.*$" } flavorref = { 'oneOf': [ non_empty_string, { "type": "integer" }] } volume_size = { "oneOf": [ { "type": "integer", "minimum": 1 }, configuration_positive_integer] } number_of_nodes = { "oneOf": [ { "type": "integer", "minimum": 1 }, configuration_positive_integer] } host_string = { "type": "string", "minLength": 1, "pattern": r"^[%]?[\w(-).]*[%]?$" } name_string = { "type": "string", "minLength": 1, "pattern": "^.*[0-9a-zA-Z]+.*$" } uuid = { "type": "string", "minLength": 1, "maxLength": 64, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}" "-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$" } volume = { "type": "object", "required": ["size"], "properties": { "size": volume_size, "type": { "oneOf": [ non_empty_string, {"type": "null"} ] } } } nics = { "type": "array", "maxItems": 1, "items": { "type": "object", "additionalProperties": False, "properties": { "net-id": uuid } } } databases_ref_list = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string } } } databases_ref_list_required = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string } } } databases_ref = { "type": "object", "required": ["databases"], "additionalProperties": True, "properties": { "databases": databases_ref_list_required } } databases_def = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["name"], "additionalProperties": True, "properties": { "name": non_empty_string, "character_set": non_empty_string, "collate": non_empty_string } } } user_attributes = { "type": "object", "additionalProperties": True, "minProperties": 1, "properties": { "name": name_string, "password": non_empty_string, "host": host_string } } users_list = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["name", "password"], "additionalProperties": True, "properties": { "name": name_string, "password": non_empty_string, "host": host_string, "databases": databases_ref_list } } } null_configuration_id = { "type": "null" } configuration_id = { 'oneOf': [ uuid, null_configuration_id ] } module_list = { "type": "array", "minItems": 0, "items": { "type": "object", "required": ["id"], "additionalProperties": True, "properties": { "id": uuid, } } } cluster = { "create": { "type": "object", "required": ["cluster"], "additionalProperties": True, "properties": { "cluster": { "type": "object", "required": ["name", "datastore", "instances"], "additionalProperties": True, "properties": { "name": non_empty_string, "datastore": { "type": "object", "required": ["type", "version"], "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "instances": { "type": "array", "items": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "flavorRef": flavorref, "volume": volume, "nics": nics, "availability_zone": non_empty_string, "modules": module_list, "region_name": non_empty_string } } }, "locality": non_empty_string, "extended_properties": { "type": "object", "additionalProperties": True, "properties": { "num_configsvr": number_of_nodes, "num_mongos": number_of_nodes, "configsvr_volume_size": volume_size, "configsvr_volume_type": non_empty_string, "mongos_volume_size": volume_size, "mongos_volume_type": non_empty_string } } } } } }, "action": { "add_shard": { "type": "object", "required": ["add_shard"], "additionalProperties": True, "properties": { "add_shard": { "type": "object" } } }, "grow": { "type": "object", "required": ["grow"], "additionalProperties": True, "properties": { "grow": { "type": "array", "items": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "name": non_empty_string, "flavorRef": flavorref, "volume": volume, "nics": nics, "availability_zone": non_empty_string, "modules": module_list, "related_to": non_empty_string, "type": non_empty_string, "region_name": non_empty_string } } } } }, "shrink": { "type": "object", "required": ["shrink"], "additionalProperties": True, "properties": { "shrink": { "type": "array", "items": { "type": "object", "required": ["id"], "additionalProperties": True, "properties": { "id": uuid } } } } }, "upgrade": { "type": "object", "required": ["upgrade"], "additionalProperties": True, "properties": { "upgrade": { "type": "object", "required": ["datastore_version"], "additionalProperties": True, "properties": { "datastore_version": non_empty_string } } } } } } instance = { "create": { "type": "object", "required": ["instance"], "additionalProperties": True, "properties": { "instance": { "type": "object", "required": ["name", "flavorRef"], "additionalProperties": True, "properties": { "name": non_empty_string, "configuration_id": configuration_id, "flavorRef": flavorref, "volume": volume, "databases": databases_def, "users": users_list, "restorePoint": { "type": "object", "required": ["backupRef"], "additionalProperties": True, "properties": { "backupRef": uuid } }, "availability_zone": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "nics": nics, "modules": module_list, "region_name": non_empty_string, "locality": non_empty_string, "access": { "type": "object", "properties": { "is_public": {"type": "boolean"}, "allowed_cidrs": { "type": "array", "uniqueItems": True, "items": { "type": "string", "pattern": "^([0-9]{1,3}\.){3}[0-9]{1,3}" "(\/([0-9]|[1-2][0-9]|3[0-2]))?" "$" } } } } } } } }, "edit": { "name": "instance:edit", "type": "object", "required": ["instance"], "properties": { "instance": { "type": "object", "required": [], "additionalProperties": False, "properties": { "slave_of": {}, "replica_of": {}, "name": non_empty_string, "configuration": configuration_id, "datastore_version": non_empty_string, } } } }, "action": { "resize": { "volume": { "type": "object", "required": ["resize"], "additionalProperties": True, "properties": { "resize": { "type": "object", "required": ["volume"], "additionalProperties": True, "properties": { "volume": volume } } } }, 'flavorRef': { "type": "object", "required": ["resize"], "additionalProperties": True, "properties": { "resize": { "type": "object", "required": ["flavorRef"], "additionalProperties": True, "properties": { "flavorRef": flavorref } } } } }, "restart": { "type": "object", "required": ["restart"], "additionalProperties": True, "properties": { "restart": { "type": "object" } } } } } mgmt_cluster = { "action": { 'reset-task': { "type": "object", "required": ["reset-task"], "additionalProperties": True, "properties": { "reset-task": { "type": "object" } } } } } mgmt_instance = { "action": { 'migrate': { "type": "object", "required": ["migrate"], "additionalProperties": True, "properties": { "migrate": { "type": "object" } } }, "reboot": { "type": "object", "required": ["reboot"], "additionalProperties": True, "properties": { "reboot": { "type": "object" } } }, "stop": { "type": "object", "required": ["stop"], "additionalProperties": True, "properties": { "stop": { "type": "object" } } } } } user = { "create": { "name": "users:create", "type": "object", "required": ["users"], "properties": { "users": users_list } }, "update_all": { "users": { "type": "object", "required": ["users"], "additionalProperties": True, "properties": { "users": users_list } }, "databases": databases_ref }, "update": { "type": "object", "required": ["user"], "additionalProperties": True, "properties": { "user": user_attributes } } } dbschema = { "create": { "type": "object", "required": ["databases"], "additionalProperties": True, "properties": { "databases": databases_def } } } backup = { "create": { "name": "backup:create", "type": "object", "required": ["backup"], "properties": { "backup": { "type": "object", "required": ["instance", "name"], "properties": { "description": non_empty_string, "instance": uuid, "name": non_empty_string, "parent_id": uuid, "incremental": boolean_string } } } } } guest_log = { "action": { "name": "guest_log:action", "type": "object", "required": ["name"], "properties": { "name": non_empty_string, "enable": boolean_string, "disable": boolean_string, "publish": boolean_string, "discard": boolean_string } } } module_contents = { "type": "string", "minLength": 1, "maxLength": 4294967295, "pattern": "^.*.+.*$" } module_apply_order = { "type": "integer", "minimum": 0, "maximum": 9, } module = { "create": { "name": "module:create", "type": "object", "required": ["module"], "properties": { "module": { "type": "object", "required": ["name", "module_type", "contents"], "additionalProperties": True, "properties": { "name": non_empty_string, "module_type": non_empty_string, "contents": module_contents, "description": non_empty_string, "datastore": { "type": "object", "properties": { "type": non_empty_string, "version": non_empty_string } }, "auto_apply": boolean_string, "all_tenants": boolean_string, "visible": boolean_string, "live_update": boolean_string, "priority_apply": boolean_string, "apply_order": module_apply_order, "full_access": boolean_string, } } } }, "update": { "name": "module:update", "type": "object", "required": ["module"], "properties": { "module": { "type": "object", "required": [], "additionalProperties": True, "properties": { "name": non_empty_string, "type": non_empty_string, "contents": module_contents, "description": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } }, "auto_apply": boolean_string, "all_tenants": boolean_string, "all_datastores": boolean_string, "all_datastore_versions": boolean_string, "visible": boolean_string, "live_update": boolean_string, "priority_apply": boolean_string, "apply_order": module_apply_order, "full_access": boolean_string, } } } }, "apply": { "name": "module:apply", "type": "object", "required": ["modules"], "properties": { "modules": module_list, } }, "list": { "name": "module:list", "type": "object", "required": [], "properties": { "module": uuid, "from_guest": boolean_string, "include_contents": boolean_string } }, } configuration = { "create": { "name": "configuration:create", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": ["values", "name"], "properties": { "description": non_empty_string, "values": { "type": "object", }, "name": non_empty_string, "datastore": { "type": "object", "additionalProperties": True, "properties": { "type": non_empty_string, "version": non_empty_string } } } } } }, "update": { "name": "configuration:update", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": [], "properties": { "description": non_empty_string, "values": { "type": "object", }, "name": non_empty_string } } } }, "edit": { "name": "configuration:edit", "type": "object", "required": ["configuration"], "properties": { "configuration": { "type": "object", "required": [], "properties": { "values": { "type": "object", } } } } } } mgmt_configuration = { "create": { "name": "configuration_parameter:create", "type": "object", "required": ["configuration-parameter"], "properties": { "configuration-parameter": { "type": "object", "required": ["name", "restart_required", "data_type"], "properties": { "name": configuration_non_empty_string, "data_type": configuration_data_types, "restart_required": boolean_string, "max": configuration_integer_size, "min": configuration_integer_size, } } } }, "update": { "name": "configuration_parameter:update", "type": "object", "required": ["configuration-parameter"], "properties": { "configuration-parameter": { "type": "object", "required": ["name", "restart_required", "data_type"], "properties": { "name": configuration_non_empty_string, "data_type": configuration_data_types, "restart_required": boolean_string, "max": configuration_integer_size, "min": configuration_integer_size, } } } }, } account = { 'create': { "type": "object", "name": "users", "required": ["users"], "additionalProperties": True, "properties": { "users": users_list } } } upgrade = { "create": { "type": "object", "required": ["upgrade"], "additionalProperties": True, "properties": { "upgrade": { "type": "object", "required": [], "additionalProperties": True, "properties": { "instance_version": non_empty_string, "location": non_empty_string, "metadata": {} } } } } } package_list = { "type": "array", "minItems": 0, "uniqueItems": True, "items": { "type": "string", "minLength": 1, "maxLength": 255, "pattern": "^.*[0-9a-zA-Z]+.*$" } } mgmt_datastore_version = { "create": { "name": "mgmt_datastore_version:create", "type": "object", "required": ["version"], "properties": { "version": { "type": "object", "required": ["name", "datastore_name", "image", "active"], "additionalProperties": True, "properties": { "name": non_empty_string, "datastore_name": non_empty_string, "datastore_manager": non_empty_string, "packages": package_list, "image": uuid, "active": {"enum": [True, False]}, "default": {"enum": [True, False]} } } } }, "edit": { "name": "mgmt_datastore_version:edit", "type": "object", "required": [], "additionalProperties": True, "properties": { "datastore_manager": non_empty_string, "packages": package_list, "image": uuid, "active": {"enum": [True, False]}, "default": {"enum": [True, False]}, } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/auth.py0000644000175000017500000000666400000000000020125 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log as logging from oslo_utils import strutils import webob.exc from trove.common import exception from trove.common.i18n import _ from trove.common.utils import req_to_text from trove.common import wsgi LOG = logging.getLogger(__name__) class AuthorizationMiddleware(wsgi.Middleware): def __init__(self, application, auth_providers, **local_config): self.auth_providers = auth_providers LOG.debug("Auth middleware providers: %s", auth_providers) super(AuthorizationMiddleware, self).__init__(application, **local_config) def process_request(self, request): roles = request.headers.get('X_ROLE', '').split(',') LOG.debug("Processing auth request with roles: %s", roles) tenant_id = request.headers.get('X-Tenant-Id', None) LOG.debug("Processing auth request with tenant_id: %s", tenant_id) for provider in self.auth_providers: provider.authorize(request, tenant_id, roles) @classmethod def factory(cls, global_config, **local_config): def _factory(app): LOG.debug("Created auth middleware with config: %s", local_config) return cls(app, [TenantBasedAuth()], **local_config) return _factory class TenantBasedAuth(object): # The paths differ from melange, so the regex must differ as well, # trove starts with a tenant_id tenant_scoped_url = re.compile("/(?P.*?)/.*") def authorize(self, request, tenant_id, roles): match_for_tenant = self.tenant_scoped_url.match(request.path_info) if (match_for_tenant and tenant_id == match_for_tenant.group('tenant_id')): LOG.debug(strutils.mask_password( ("Authorized tenant '%(tenant_id)s' request: " "%(request)s") % {'tenant_id': tenant_id, 'request': req_to_text(request)})) return True log_fmt = "User with tenant id %s cannot access this resource." exc_fmt = _("User with tenant id %s cannot access this resource.") LOG.error(log_fmt, tenant_id) raise webob.exc.HTTPForbidden(exc_fmt % tenant_id) def admin_context(f): """ Verify that the current context has administrative access, or throw an exception. Trove API functions typically take the form function(self, req), or function(self, req, id). """ def wrapper(*args, **kwargs): try: req = args[1] context = req.environ.get('trove.context') except Exception: raise exception.TroveError("Cannot load request context.") if not context.is_admin: raise exception.Forbidden("User does not have admin privileges.") return f(*args, **kwargs) return wrapper ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/base_exception.py0000644000175000017500000000625700000000000022152 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exceptions common to OpenStack projects """ from oslo_log import log as logging _FATAL_EXCEPTION_FORMAT_ERRORS = False LOG = logging.getLogger(__name__) class Error(Exception): def __init__(self, message=None): super(Error, self).__init__(message) class ApiError(Error): def __init__(self, message='Unknown', code='Unknown'): self.message = message self.code = code super(ApiError, self).__init__('%s: %s' % (code, message)) class NotFound(Error): pass class UnknownScheme(Error): msg = "Unknown scheme '%s' found in URI" def __init__(self, scheme): msg = self.__class__.msg % scheme super(UnknownScheme, self).__init__(msg) class BadStoreUri(Error): msg = "The Store URI %s was malformed. Reason: %s" def __init__(self, uri, reason): msg = self.__class__.msg % (uri, reason) super(BadStoreUri, self).__init__(msg) class Duplicate(Error): pass class NotAuthorized(Error): pass class NotEmpty(Error): pass class Invalid(Error): pass class BadInputError(Exception): """Error resulting from a client sending bad input to a server""" pass class MissingArgumentError(Error): pass class DatabaseMigrationError(Error): pass class ClientConnectionError(Exception): """Error resulting from a client connecting to a server""" pass def wrap_exception(f): def _wrap(*args, **kw): try: return f(*args, **kw) except Exception as e: if not isinstance(e, Error): LOG.exception('Uncaught exception') raise Error(str(e)) raise _wrap.func_name = f.func_name return _wrap class OpenstackException(Exception): """ Base Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = "An unknown exception occurred" def __init__(self, **kwargs): try: self._error_string = self.message % kwargs except Exception: if _FATAL_EXCEPTION_FORMAT_ERRORS: raise else: # at least get the core message out if something happened self._error_string = self.message def __str__(self): return self._error_string class MalformedRequestBody(OpenstackException): message = "Malformed message body: %(reason)s" class InvalidContentType(OpenstackException): message = "Invalid content type %(content_type)s" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/base_wsgi.py0000644000175000017500000006627200000000000021130 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" from __future__ import print_function import eventlet eventlet.patcher.monkey_patch(all=False, socket=True) import datetime import errno import socket import sys import time import eventlet.wsgi from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import service from oslo_service import sslutils import routes import routes.middleware import webob.dec import webob.exc from xml.dom import minidom from xml.parsers import expat from trove.common import base_exception from trove.common.i18n import _ from trove.common.utils import req_to_text from trove.common import xmlutils socket_opts = [ cfg.IntOpt('backlog', default=4096, help="Number of backlog requests to configure the socket with"), cfg.IntOpt('tcp_keepidle', default=600, help="Sets the value of TCP_KEEPIDLE in seconds for each " "server socket. Not supported on OS X."), ] CONF = cfg.CONF CONF.register_opts(socket_opts) LOG = logging.getLogger(__name__) def run_server(application, port, **kwargs): """Run a WSGI server with the given application.""" sock = eventlet.listen(('0.0.0.0', port)) eventlet.wsgi.server(sock, application, **kwargs) class Service(service.Service): """ Provides a Service API for wsgi servers. This gives us the ability to launch wsgi servers with the Launcher classes in oslo_service.service.py. """ def __init__(self, application, port, host='0.0.0.0', backlog=4096, threads=1000): self.application = application self._port = port self._host = host self._backlog = backlog if backlog else CONF.backlog self._socket = self._get_socket(host, port, self._backlog) super(Service, self).__init__(threads) def _get_socket(self, host, port, backlog): # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] sock = None retry_until = time.time() + 30 while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=backlog, family=family) if sslutils.is_enabled(CONF): sock = sslutils.wrap(CONF, sock) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s " "after trying for 30 seconds") % {'host': host, 'port': port}) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) return sock def start(self): """Start serving this service using the provided server instance. :returns: None """ super(Service, self).start() self.tg.add_thread(self._run, self.application, self._socket) @property def backlog(self): return self._backlog @property def host(self): return self._socket.getsockname()[0] if self._socket else self._host @property def port(self): return self._socket.getsockname()[1] if self._socket else self._port def stop(self): """Stop serving this API. :returns: None """ super(Service, self).stop() def _run(self, application, socket): """Start a WSGI server in a new green thread.""" logger = logging.getLogger('eventlet.wsgi') eventlet.wsgi.server(socket, application, custom_pool=self.tg.pool, log=logger) class Middleware(object): """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ def __init__(self, application): self.application = application def process_request(self, req): """ Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) class Debug(Middleware): """ Helper class that can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify def __call__(self, req): print(("*" * 40) + " REQUEST ENVIRON") for key, value in req.environ.items(): print(key, "=", value) print() resp = req.get_response(self.application) print(("*" * 40) + " RESPONSE HEADERS") for (key, value) in resp.headers.items(): print(key, "=", value) print() resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """ Iterator that prints the contents of a wrapper string iterator when iterated. """ print(("*" * 40) + " BODY") for part in app_iter: sys.stdout.write(part) sys.stdout.flush() yield part print() class Router(object): """ WSGI middleware that maps incoming requests to WSGI apps. """ def __init__(self, mapper): """ Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be a wsgi.Controller, who will route the request to the action method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, "/svrlist", controller=sc, action="list") # Actions are all implicitly defined mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify def __call__(self, req): """ Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify def _dispatch(req): """ Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return webob.exc.HTTPNotFound() app = match['controller'] return app class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" default_request_content_types = ('application/json', 'application/xml') default_accept_types = ('application/json', 'application/xml') default_accept_type = 'application/json' def best_match_content_type(self, supported_content_types=None): """Determine the requested response content-type. Based on the query extension then the Accept header. Defaults to default_accept_type if we don't find a preference """ supported_content_types = (supported_content_types or self.default_accept_types) parts = self.path.rsplit('.', 1) if len(parts) > 1: ctype = 'application/{0}'.format(parts[1]) if ctype in supported_content_types: return ctype bm = self.accept.best_match(supported_content_types) return bm or self.default_accept_type def get_content_type(self, allowed_content_types=None): """Determine content type of the request body. Does not do any body introspection, only checks header """ if "Content-Type" not in self.headers: return None content_type = self.content_type allowed_content_types = (allowed_content_types or self.default_request_content_types) if content_type not in allowed_content_types: raise base_exception.InvalidContentType(content_type=content_type) return content_type __str__ = req_to_text class Resource(object): """ WSGI app that handles (de)serialization and controller dispatch. Reads routing information supplied by RoutesMiddleware and calls the requested action method upon its deserializer, controller, and serializer. Those three objects may implement any of the basic controller action methods (create, update, show, index, delete) along with any that may be specified in the api router. A 'default' method may also be implemented to be used in place of any non-implemented actions. Deserializer methods must accept a request argument and return a dictionary. Controller methods must accept a request argument. Additionally, they must also accept keyword arguments that represent the keys returned by the Deserializer. They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib :param deserializer: object that supports webob request deserialization through controller-like actions :param serializer: object that supports webob response serialization through controller-like actions """ self.controller = controller self.serializer = serializer or ResponseSerializer() self.deserializer = deserializer or RequestDeserializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" try: action, action_args, accept = self.deserialize_request(request) except base_exception.InvalidContentType: msg = _("Unsupported Content-Type") return webob.exc.HTTPUnsupportedMediaType(explanation=msg) except base_exception.MalformedRequestBody: msg = _("Malformed request body") return webob.exc.HTTPBadRequest(explanation=msg) action_result = self.execute_action(action, request, **action_args) try: return self.serialize_response(action, action_result, accept) # return unserializable result (typically a webob exc) except Exception: return action_result def deserialize_request(self, request): return self.deserializer.deserialize(request) def serialize_response(self, action, action_result, accept): return self.serializer.serialize(action_result, accept, action) def execute_action(self, action, request, **action_args): return self.dispatch(self.controller, action, request, **action_args) def dispatch(self, obj, action, *args, **kwargs): """Find action-specific method on self and call it.""" try: method = getattr(obj, action) except AttributeError: method = getattr(obj, 'default') return method(*args, **kwargs) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): def sanitizer(obj): if isinstance(obj, datetime.datetime): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return obj return jsonutils.dump_as_bytes(data, default=sanitizer) class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): """ :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns def default(self, data): # We expect data to contain a single key which is the XML root. root_key = list(data.keys())[0] doc = minidom.Document() node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) return self.to_xml_string(node) def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) return node.toprettyxml(indent=' ', encoding='UTF-8') # NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking def _add_xmlns(self, node, has_atom=False): if self.xmlns is not None: node.setAttribute('xmlns', self.xmlns) if has_atom: node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") def _to_xml_node(self, doc, metadata, nodename, data): """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) # Set the xml namespace if one is specified # TODO(justinsb): We could also use prefixes on the keys xmlns = metadata.get('xmlns', None) if xmlns: result.setAttribute('xmlns', xmlns) # TODO(bcwaldon): accomplish this without a type-check if type(data) is list: collections = metadata.get('list_collections', {}) if nodename in collections: metadata = collections[nodename] for item in data: node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(item)) result.appendChild(node) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) # TODO(bcwaldon): accomplish this without a type-check elif type(data) is dict: collections = metadata.get('dict_collections', {}) if nodename in collections: metadata = collections[nodename] for k, v in data.items(): node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(k)) text = doc.createTextNode(str(v)) node.appendChild(text) result.appendChild(node) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in data.items(): if k in attrs: result.setAttribute(k, str(v)) else: node = self._to_xml_node(doc, metadata, k, v) result.appendChild(node) else: # Type is atom node = doc.createTextNode(str(data)) result.appendChild(node) return result def _create_link_nodes(self, xml_doc, links): link_nodes = [] for link in links: link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) if 'type' in link: link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes class ResponseHeadersSerializer(ActionDispatcher): """Default response headers serialization.""" def serialize(self, response, data, action): self.dispatch(response, data, action=action) def default(self, response, data): response.status_int = 200 class ResponseSerializer(object): """Encode the necessary pieces into a response object.""" def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) self.headers_serializer = (headers_serializer or ResponseHeadersSerializer()) def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body """ response = webob.Response() self.serialize_headers(response, response_data, action) self.serialize_body(response, response_data, content_type, action) return response def serialize_headers(self, response, data, action): self.headers_serializer.serialize(response, data, action) def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, TypeError): raise base_exception.InvalidContentType(content_type=content_type) class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer""" def deserialize(self, request, action): return self.dispatch(request, action=action) def default(self, request): return {} class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None, supported_content_types=None): self.supported_content_types = supported_content_types self.body_deserializers = { 'application/xml': XMLDeserializer(), 'application/json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) self.headers_deserializer = (headers_deserializer or RequestHeadersDeserializer()) def deserialize(self, request): """Extract necessary pieces of the request. :param request: Request object :returns: tuple of (expected controller action name, dictionary of keyword arguments to pass to the controller, the expected content type of the response) """ action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) action_args.update(self.deserialize_headers(request, action)) action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) def deserialize_headers(self, request, action): return self.headers_deserializer.deserialize(request, action) def deserialize_body(self, request, action): if not len(request.body) > 0: LOG.debug("Empty body provided in request") return {} try: content_type = request.get_content_type() except base_exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") raise if content_type is None: LOG.debug("No Content-Type provided in request") return {} try: deserializer = self.get_body_deserializer(content_type) except base_exception.InvalidContentType: LOG.debug("Unable to deserialize body as provided Content-Type") raise return deserializer.deserialize(request.body, action) def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (KeyError, TypeError): raise base_exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): return request.best_match_content_type(self.supported_content_types) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise base_exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): """ :param metadata: information needed to deserialize xml into a dictionary. """ super(XMLDeserializer, self).__init__() self.metadata = metadata or {} def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") raise base_exception.MalformedRequestBody(reason=msg) def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. :param listnames: list of XML node names whose subnodes should be considered list items. """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue elif node.nodeName in listnames: return [self._from_xml_node(n, listnames) for n in node.childNodes] else: result = dict() for attr in node.attributes.keys(): result[attr] = node.attributes[attr].nodeValue for child in node.childNodes: if child.nodeType != node.TEXT_NODE: result[child.nodeName] = self._from_xml_node(child, listnames) return result def find_first_child_named(self, parent, name): """Search a nodes children for the first child with a given name.""" for node in parent.childNodes: if node.nodeName == name: return node return None def find_children_named(self, parent, name): """Return all of a nodes children who have the given name.""" for node in parent.childNodes: if node.nodeName == name: yield node def extract_text(self, node): """Get the text field contained by the given node.""" if len(node.childNodes) == 1: child = node.childNodes[0] if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" def default(self, datastring): return {'body': self._from_xml(datastring)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/common/cfg.py0000644000175000017500000026233000000000000017715 0ustar00coreycorey00000000000000# copyright 2011 OpenStack Foundation # Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Routines for configuring Trove.""" import os.path from oslo_config import cfg from oslo_config.cfg import NoSuchOptError from oslo_config import types from oslo_log import log as logging from oslo_middleware import cors from osprofiler import opts as profiler from trove.common.i18n import _ from trove.version import version_info as version ListOfPortsType = types.Range(1, 65535) LOG = logging.getLogger(__name__) UNKNOWN_SERVICE_ID = 'unknown-service-id-error' HEAT_REMOVAL_DEPRECATION_WARNING = _('Support for heat templates in Trove is ' 'scheduled for removal. You will no ' 'longer be able to provide a heat ' 'template to Trove for the provisioning ' 'of resources.') path_opts = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help='Directory where the Trove python module is installed.'), ] versions_opts = [ cfg.StrOpt('public_endpoint', default=None, help='Public URL to use for versions endpoint. The default ' 'is None, which will use the request\'s host_url ' 'attribute to populate the URL base. If Trove is ' 'operating behind a proxy, you will want to change ' 'this to represent the proxy\'s URL.') ] common_opts = [ cfg.IPOpt('bind_host', default='0.0.0.0', help='IP address the API server will listen on.'), cfg.PortOpt('bind_port', default=8779, help='Port the API server will listen on.'), cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for trove-api.'), cfg.BoolOpt('trove_volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.ListOpt('admin_roles', default=['admin'], help='Roles to add to an admin user.'), cfg.BoolOpt('update_status_on_fail', default=True, help='Set the service and instance task statuses to ERROR ' 'when an instance fails to become active within the ' 'configured usage_timeout.'), cfg.URIOpt('nova_compute_url', help='URL without the tenant segment.'), cfg.StrOpt('nova_compute_service_type', default='compute', help='Service type to use when searching catalog.'), cfg.StrOpt('nova_compute_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.StrOpt('nova_client_version', default='2.12', help="The version of the compute service client."), cfg.StrOpt('glance_client_version', default='2', help="The version of the image service client."), cfg.BoolOpt('nova_api_insecure', default=False, help="Allow to perform insecure SSL requests to nova."), cfg.StrOpt('nova_keypair', default=None, help="Name of a Nova keypair to inject into a database " "instance to enable SSH access. The keypair should be " "prior created by the cloud operator."), cfg.URIOpt('neutron_url', help='URL without the tenant segment.'), cfg.StrOpt('neutron_service_type', default='network', help='Service type to use when searching catalog.'), cfg.StrOpt('neutron_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.BoolOpt('neutron_api_insecure', default=False, help="Allow to perform insecure SSL requests to neutron."), cfg.URIOpt('cinder_url', help='URL without the tenant segment.'), cfg.StrOpt('cinder_service_type', default='volumev2', help='Service type to use when searching catalog.'), cfg.StrOpt('cinder_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.BoolOpt('cinder_api_insecure', default=False, help="Allow to perform insecure SSL requests to cinder."), cfg.URIOpt('swift_url', help='URL ending in ``AUTH_``.'), cfg.StrOpt('swift_service_type', default='object-store', help='Service type to use when searching catalog.'), cfg.StrOpt('swift_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.BoolOpt('swift_api_insecure', default=False, help="Allow to perform insecure SSL requests to swift."), cfg.URIOpt('glance_url', help='URL ending in ``AUTH_``.'), cfg.StrOpt('glance_service_type', default='image', help='Service type to use when searching catalog.'), cfg.StrOpt('glance_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.StrOpt('trove_url', help='URL without the tenant segment.'), cfg.StrOpt('trove_service_type', default='database', help='Service type to use when searching catalog.'), cfg.StrOpt('trove_endpoint_type', default='publicURL', help='Service endpoint type to use when searching catalog.'), cfg.IPOpt('host', default='0.0.0.0', help='Host to listen for RPC messages.'), cfg.IntOpt('report_interval', default=30, help='The interval (in seconds) which periodic tasks are run.'), cfg.BoolOpt('trove_dns_support', default=False, help='Whether Trove should add DNS entries on create ' '(using Designate DNSaaS).'), cfg.StrOpt('db_api_implementation', default='trove.db.sqlalchemy.api', help='API Implementation for Trove database access.'), cfg.StrOpt('dns_driver', default='trove.dns.driver.DnsDriver', help='Driver for DNSaaS.'), cfg.StrOpt('dns_instance_entry_factory', default='trove.dns.driver.DnsInstanceEntryFactory', help='Factory for adding DNS entries.'), cfg.HostnameOpt('dns_hostname', default="localhost", help='Hostname used for adding DNS entries.'), cfg.StrOpt('dns_account_id', default="", help='Tenant ID for DNSaaS.'), cfg.URIOpt('dns_endpoint_url', default="http://0.0.0.0", help='Endpoint URL for DNSaaS.'), cfg.StrOpt('dns_service_type', default="", help='Service Type for DNSaaS.'), cfg.StrOpt('dns_region', default="", help='Region name for DNSaaS.'), cfg.URIOpt('dns_auth_url', default="http://0.0.0.0", help='Authentication URL for DNSaaS.'), cfg.StrOpt('dns_user_domain_id', default="default", help='Keystone user domain ID used for auth'), cfg.StrOpt('dns_project_domain_id', default="default", help='Keystone project domain ID used for auth'), cfg.StrOpt('dns_domain_name', default="", help='Domain name used for adding DNS entries.'), cfg.StrOpt('dns_username', default="", secret=True, help='Username for DNSaaS.'), cfg.StrOpt('dns_passkey', default="", secret=True, help='Passkey for DNSaaS.'), cfg.URIOpt('dns_management_base_url', default="http://0.0.0.0", help='Management URL for DNSaaS.'), cfg.IntOpt('dns_ttl', default=300, help='Time (in seconds) before a refresh of DNS information ' 'occurs.'), cfg.StrOpt('dns_domain_id', default="", help='Domain ID used for adding DNS entries.'), cfg.IntOpt('users_page_size', default=20, help='Page size for listing users.'), cfg.IntOpt('databases_page_size', default=20, help='Page size for listing databases.'), cfg.IntOpt('instances_page_size', default=20, help='Page size for listing instances.'), cfg.IntOpt('clusters_page_size', default=20, help='Page size for listing clusters.'), cfg.IntOpt('backups_page_size', default=20, help='Page size for listing backups.'), cfg.IntOpt('configurations_page_size', default=20, help='Page size for listing configurations.'), cfg.IntOpt('modules_page_size', default=20, help='Page size for listing modules.'), cfg.IntOpt('agent_call_low_timeout', default=15, help="Maximum time (in seconds) to wait for Guest Agent " "'quick' requests (such as retrieving a list of " "users or databases)."), cfg.IntOpt('agent_call_high_timeout', default=60 * 5, help="Maximum time (in seconds) to wait for Guest Agent 'slow' " "requests (such as restarting the database)."), cfg.IntOpt('agent_replication_snapshot_timeout', default=36000, help='Maximum time (in seconds) to wait for taking a Guest ' 'Agent replication snapshot.'), cfg.IntOpt('command_process_timeout', default=30, help='Maximum time (in seconds) to wait for out of process ' 'commands to complete.'), # The guest_id opt definition must match the one in cmd/guest.py cfg.StrOpt('guest_id', default=None, help="ID of the Guest Instance."), cfg.IntOpt('state_change_wait_time', default=60 * 10, help='Maximum time (in seconds) to wait for a state change.'), cfg.IntOpt('state_change_poll_time', default=3, help='Interval between state change poll requests (seconds).'), cfg.IntOpt('agent_heartbeat_time', default=10, help='Maximum time (in seconds) for the Guest Agent to reply ' 'to a heartbeat request.'), cfg.IntOpt('agent_heartbeat_expiry', default=60, help='Time (in seconds) after which a guest is considered ' 'unreachable'), cfg.IntOpt('num_tries', default=3, help='Number of times to check if a volume exists.'), cfg.StrOpt('volume_fstype', default='ext3', choices=['ext3', 'ext4', 'xfs'], help='File system type used to format a volume.'), cfg.StrOpt('cinder_volume_type', default=None, help='Volume type to use when provisioning a Cinder volume.'), cfg.StrOpt('format_options', default='-m 5', help='Options to use when formatting a volume.'), cfg.IntOpt('volume_format_timeout', default=120, help='Maximum time (in seconds) to wait for a volume format.'), cfg.StrOpt('mount_options', default='defaults,noatime', help='Options to use when mounting a volume.'), cfg.IntOpt('max_instances_per_tenant', default=10, help='Default maximum number of instances per tenant.', deprecated_name='max_instances_per_user'), cfg.IntOpt('max_accepted_volume_size', default=10, help='Default maximum volume size (in GB) for an instance.'), cfg.IntOpt('max_volumes_per_tenant', default=40, help='Default maximum volume capacity (in GB) spanning across ' 'all Trove volumes per tenant.', deprecated_name='max_volumes_per_user'), cfg.IntOpt('max_backups_per_tenant', default=50, help='Default maximum number of backups created by a tenant.', deprecated_name='max_backups_per_user'), cfg.StrOpt('quota_driver', default='trove.quota.quota.DbQuotaDriver', help='Default driver to use for quota checks.'), cfg.StrOpt('taskmanager_queue', default='taskmanager', help='Message queue name the Taskmanager will listen to.'), cfg.StrOpt('conductor_queue', default='trove-conductor', help='Message queue name the Conductor will listen on.'), cfg.IntOpt('trove_conductor_workers', help='Number of workers for the Conductor service. The default ' 'will be the number of CPUs available.'), cfg.BoolOpt('use_nova_server_config_drive', default=True, help='Use config drive for file injection when booting ' 'instance.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('default_datastore', default=None, help='The default datastore id or name to use if one is not ' 'provided by the user. If the default value is None, the field ' 'becomes required in the instance create request.'), cfg.StrOpt('datastore_manager', default=None, help='Manager class in the Guest Agent, set up by the ' 'Taskmanager on instance provision.'), cfg.StrOpt('block_device_mapping', default='vdb', help='Block device to map onto the created instance.'), cfg.IntOpt('server_delete_time_out', default=60, help='Maximum time (in seconds) to wait for a server delete.'), cfg.IntOpt('volume_time_out', default=60, help='Maximum time (in seconds) to wait for a volume attach.'), cfg.IntOpt('reboot_time_out', default=60 * 2, help='Maximum time (in seconds) to wait for a server reboot.'), cfg.IntOpt('dns_time_out', default=60 * 2, help='Maximum time (in seconds) to wait for a DNS entry add.'), cfg.IntOpt('resize_time_out', default=60 * 15, help='Maximum time (in seconds) to wait for a server resize.'), cfg.IntOpt('revert_time_out', default=60 * 10, help='Maximum time (in seconds) to wait for a server resize ' 'revert.'), cfg.IntOpt('cluster_delete_time_out', default=60 * 3, help='Maximum time (in seconds) to wait for a cluster delete.'), cfg.ListOpt('root_grant', default=['ALL'], help="Permissions to grant to the 'root' user."), cfg.BoolOpt('root_grant_option', default=True, help="Assign the 'root' user GRANT permissions."), cfg.IntOpt('http_get_rate', default=200, help="Maximum number of HTTP 'GET' requests (per minute)."), cfg.IntOpt('http_post_rate', default=200, help="Maximum number of HTTP 'POST' requests (per minute)."), cfg.IntOpt('http_delete_rate', default=200, help="Maximum number of HTTP 'DELETE' requests (per minute)."), cfg.IntOpt('http_put_rate', default=200, help="Maximum number of HTTP 'PUT' requests (per minute)."), cfg.IntOpt('http_mgmt_post_rate', default=200, help="Maximum number of management HTTP 'POST' requests " "(per minute)."), cfg.BoolOpt('hostname_require_valid_ip', default=True, help='Require user hostnames to be valid IP addresses.', deprecated_name='hostname_require_ipv4'), cfg.BoolOpt('trove_security_groups_support', default=True, help='Whether Trove should add Security Groups on create.'), cfg.StrOpt('trove_security_group_name_prefix', default='trove_sg', help='Prefix to use when creating Security Groups.'), cfg.StrOpt('trove_security_group_rule_cidr', default='0.0.0.0/0', help='CIDR to use when creating Security Group Rules.'), cfg.IntOpt('trove_api_workers', help='Number of workers for the API service. The default will ' 'be the number of CPUs available.'), cfg.IntOpt('usage_sleep_time', default=5, help='Time to sleep during the check for an active Guest.'), cfg.StrOpt('region', default='LOCAL_DEV', help='The region this service is located.'), cfg.StrOpt('backup_runner', default='trove.guestagent.backup.backup_types.InnoBackupEx', help='Runner to use for backups.'), cfg.DictOpt('backup_runner_options', default={}, help='Additional options to be passed to the backup runner.'), cfg.BoolOpt('verify_swift_checksum_on_restore', default=True, help='Enable verification of Swift checksum before starting ' 'restore. Makes sure the checksum of original backup matches ' 'the checksum of the Swift backup file.'), cfg.BoolOpt('verify_replica_volume_size', default=True, help='Require the replica volume size to be greater than ' 'or equal to the size of the master volume ' 'during replica creation.'), cfg.StrOpt('storage_strategy', default='SwiftStorage', help="Default strategy to store backups."), cfg.StrOpt('storage_namespace', default='trove.common.strategies.storage.swift', help='Namespace to load the default storage strategy from.'), cfg.StrOpt('backup_swift_container', default='database_backups', help='Swift container to put backups in.'), cfg.BoolOpt('backup_use_gzip_compression', default=True, help='Compress backups using gzip.'), cfg.BoolOpt('backup_use_openssl_encryption', default=True, help='Encrypt backups using OpenSSL.'), cfg.StrOpt('backup_aes_cbc_key', default='default_aes_cbc_key', help='Default OpenSSL aes_cbc key.'), cfg.BoolOpt('backup_use_snet', default=False, help='Send backup files over snet.'), cfg.IntOpt('backup_chunk_size', default=2 ** 16, help='Chunk size (in bytes) to stream to the Swift container. ' 'This should be in multiples of 128 bytes, since this is the ' 'size of an md5 digest block allowing the process to update ' 'the file checksum during streaming. ' 'See: http://stackoverflow.com/questions/1131220/'), cfg.IntOpt('backup_segment_max_size', default=2 * (1024 ** 3), help='Maximum size (in bytes) of each segment of the backup ' 'file.'), cfg.StrOpt('remote_dns_client', default='trove.common.clients.dns_client', help='Client to send DNS calls to.'), cfg.StrOpt('remote_guest_client', default='trove.common.clients.guest_client', help='Client to send Guest Agent calls to.'), cfg.StrOpt('remote_nova_client', default='trove.common.clients_admin.nova_client_trove_admin', help='Client to send Nova calls to.'), cfg.StrOpt('remote_neutron_client', default='trove.common.clients_admin.neutron_client_trove_admin', help='Client to send Neutron calls to.'), cfg.StrOpt('remote_cinder_client', default='trove.common.clients_admin.cinder_client_trove_admin', help='Client to send Cinder calls to.'), cfg.StrOpt('remote_swift_client', default='trove.common.clients.swift_client', help='Client to send Swift calls to.'), cfg.StrOpt('remote_trove_client', default='trove.common.trove_remote.trove_client', help='Client to send Trove calls to.'), cfg.StrOpt('remote_glance_client', default='trove.common.clients_admin.glance_client_trove_admin', help='Client to send Glance calls to.'), cfg.StrOpt('exists_notification_transformer', help='Transformer for exists notifications.'), cfg.IntOpt('exists_notification_interval', default=3600, help='Seconds to wait between pushing events.'), cfg.IntOpt('quota_notification_interval', help='Seconds to wait between pushing events.'), cfg.DictOpt('notification_service_id', default={'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b', 'percona': 'fd1723f5-68d2-409c-994f-a4a197892a17', 'pxc': '75a628c3-f81b-4ffb-b10a-4087c26bc854', 'redis': 'b216ffc5-1947-456c-a4cf-70f94c05f7d0', 'cassandra': '459a230d-4e97-4344-9067-2a54a310b0ed', 'couchbase': 'fa62fe68-74d9-4779-a24e-36f19602c415', 'mongodb': 'c8c907af-7375-456f-b929-b637ff9209ee', 'postgresql': 'ac277e0d-4f21-40aa-b347-1ea31e571720', 'couchdb': 'f0a9ab7b-66f7-4352-93d7-071521d44c7c', 'vertica': 'a8d805ae-a3b2-c4fd-gb23-b62cee5201ae', 'db2': 'e040cd37-263d-4869-aaa6-c62aa97523b5', 'mariadb': '7a4f82cc-10d2-4bc6-aadc-d9aacc2a3cb5'}, help='Unique ID to tag notification events.'), cfg.StrOpt('network_label_regex', default='^private$', help='Regular expression to match Trove network labels.'), cfg.StrOpt('ip_regex', default=None, help='List IP addresses that match this regular expression.'), cfg.StrOpt('black_list_regex', default=None, help='Exclude IP addresses that match this regular ' 'expression.'), cfg.StrOpt('cloudinit_location', default='/etc/trove/cloudinit', help='Path to folder with cloudinit scripts.'), cfg.StrOpt('injected_config_location', default='/etc/trove/conf.d', help='Path to folder on the Guest where config files will be ' 'injected during instance creation.'), cfg.StrOpt('guest_config', default='/etc/trove/trove-guestagent.conf', help='Path to the Guest Agent config file to be injected ' 'during instance creation.'), cfg.StrOpt('guest_info', default='guest_info.conf', help='The guest info filename found in the injected config ' 'location. If a full path is specified then it will ' 'be used as the path to the guest info file'), cfg.DictOpt('datastore_registry_ext', default=dict(), help='Extension for default datastore managers. ' 'Allows the use of custom managers for each of ' 'the datastores supported by Trove.'), cfg.StrOpt('template_path', default='/etc/trove/templates/', help='Path which leads to datastore templates.'), cfg.BoolOpt('sql_query_logging', default=False, help='Allow insecure logging while ' 'executing queries through SQLAlchemy.'), cfg.ListOpt('expected_filetype_suffixes', default=['json'], help='Filetype endings not to be reattached to an ID ' 'by the utils method correct_id_with_req.'), cfg.ListOpt('management_networks', default=[], deprecated_name='default_neutron_networks', help='List of IDs for management networks which should be ' 'attached to the instance regardless of what NICs ' 'are specified in the create API call. Currently only ' 'one management network is allowed.'), cfg.ListOpt('management_security_groups', default=[], help='List of the security group IDs that are applied on the ' 'management port of the database instance.'), cfg.IntOpt('max_header_line', default=16384, help='Maximum line size of message headers to be accepted. ' 'max_header_line may need to be increased when using ' 'large tokens (typically those generated by the ' 'Keystone v3 API with big service catalogs).'), cfg.StrOpt('conductor_manager', default='trove.conductor.manager.Manager', help='Qualified class name to use for conductor manager.'), cfg.StrOpt('network_driver', default='trove.network.nova.NovaNetwork', help="Describes the actual network manager used for " "the management of network attributes " "(security groups, floating IPs, etc.)."), cfg.IntOpt('usage_timeout', default=60 * 30, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.IntOpt('restore_usage_timeout', default=36000, help='Maximum time (in seconds) to wait for a Guest instance ' 'restored from a backup to become active.'), cfg.IntOpt('cluster_usage_timeout', default=36000, help='Maximum time (in seconds) to wait for a cluster to ' 'become active.'), cfg.IntOpt('timeout_wait_for_service', default=120, help='Maximum time (in seconds) to wait for a service to ' 'become alive.'), cfg.StrOpt('module_aes_cbc_key', default='module_aes_cbc_key', help='OpenSSL aes_cbc key for module encryption.'), cfg.ListOpt('module_types', default=['ping', 'new_relic_license'], help='A list of module types supported. A module type ' 'corresponds to the name of a ModuleDriver.'), cfg.IntOpt('module_reapply_max_batch_size', default=50, help='The maximum number of instances to reapply a module to ' 'at the same time.'), cfg.IntOpt('module_reapply_min_batch_delay', default=2, help='The minimum delay (in seconds) between subsequent ' 'module batch reapply executions.'), cfg.StrOpt('guest_log_container_name', default='database_logs', help='Name of container that stores guest log components.'), cfg.IntOpt('guest_log_limit', default=1000000, help='Maximum size of a chunk saved in guest log container.'), cfg.IntOpt('guest_log_expiry', default=2592000, help='Expiry (in seconds) of objects in guest log container.'), cfg.BoolOpt('enable_secure_rpc_messaging', default=True, help='Should RPC messaging traffic be secured by encryption.'), cfg.StrOpt('taskmanager_rpc_encr_key', default='bzH6y0SGmjuoY0FNSTptrhgieGXNDX6PIhvz', help='Key (OpenSSL aes_cbc) for taskmanager RPC encryption.'), cfg.StrOpt('inst_rpc_key_encr_key', default='emYjgHFqfXNB1NGehAFIUeoyw4V4XwWHEaKP', help='Key (OpenSSL aes_cbc) to encrypt instance keys in DB.'), cfg.StrOpt('instance_rpc_encr_key', help='Key (OpenSSL aes_cbc) for instance RPC encryption.'), ] database_opts = [ cfg.StrOpt('connection', default='sqlite:///trove_test.sqlite', help='SQL Connection.', secret=True, deprecated_name='sql_connection', deprecated_group='DEFAULT'), cfg.IntOpt('idle_timeout', default=3600, deprecated_name='sql_idle_timeout', deprecated_group='DEFAULT'), cfg.BoolOpt('query_log', default=False, deprecated_name='sql_query_log', deprecated_group='DEFAULT', deprecated_for_removal=True), cfg.BoolOpt('sqlite_synchronous', default=True, help='If True, SQLite uses synchronous mode.'), cfg.StrOpt('slave_connection', secret=True, help='The SQLAlchemy connection string to use to connect to the' ' slave database.'), cfg.StrOpt('mysql_sql_mode', default='TRADITIONAL', help='The SQL mode to be used for MySQL sessions. ' 'This option, including the default, overrides any ' 'server-set SQL mode. To use whatever SQL mode ' 'is set by the server configuration, ' 'set this to no value. Example: mysql_sql_mode='), cfg.IntOpt('max_pool_size', help='Maximum number of SQL connections to keep open in a ' 'pool.'), cfg.IntOpt('max_retries', default=10, help='Maximum number of database connection retries ' 'during startup. Set to -1 to specify an infinite ' 'retry count.'), cfg.IntOpt('retry_interval', default=10, help='Interval between retries of opening a SQL connection.'), cfg.IntOpt('max_overflow', help='If set, use this value for max_overflow with ' 'SQLAlchemy.'), cfg.IntOpt('connection_debug', default=0, help='Verbosity of SQL debugging information: 0=None, ' '100=Everything.'), cfg.BoolOpt('connection_trace', default=False, help='Add Python stack traces to SQL as comment strings.'), cfg.IntOpt('pool_timeout', help='If set, use this value for pool_timeout with ' 'SQLAlchemy.'), ] # Datastore specific option groups # Mysql mysql_group = cfg.OptGroup( 'mysql', title='MySQL options', help="Oslo option group designed for MySQL datastore") mysql_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["3306"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='InnoBackupEx', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='MysqlGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.mysql_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=400, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.mysql_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.mysql_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'InnoBackupEx': 'InnoBackupExIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for mysql.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema', 'sys'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.', deprecated_for_removal=True, deprecated_reason='Will be replaced by a configuration group ' 'option: long_query_time'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # Percona percona_group = cfg.OptGroup( 'percona', title='Percona options', help="Oslo option group designed for Percona datastore") percona_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["3306"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='InnoBackupEx', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='MysqlGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.mysql_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('replication_user', default='slave_user', help='Userid for replication slave.'), cfg.StrOpt('replication_password', default='NETOU7897NNLOU', help='Password for replication slave user.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=450, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.mysql_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.mysql_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'InnoBackupEx': 'InnoBackupExIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for percona.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.', deprecated_for_removal=True, deprecated_reason='Will be replaced by a configuration group ' 'option: long_query_time'), cfg.IntOpt('default_password_length', default='${mysql.default_password_length}', help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # Percona XtraDB Cluster pxc_group = cfg.OptGroup( 'pxc', title='Percona XtraDB Cluster options', help="Oslo option group designed for Percona XtraDB Cluster datastore") pxc_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["3306", "4444", "4567", "4568"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='InnoBackupEx', help='Default strategy to perform backups.'), cfg.StrOpt('replication_strategy', default='MysqlGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.mysql_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('replication_user', default='slave_user', help='Userid for replication slave.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=450, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.mysql_impl', help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.mysql_impl', help='Namespace to load restore strategies from.'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'InnoBackupEx': 'InnoBackupExIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root', 'clusterrepuser'], help='Users to exclude when listing users.'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema'], help='Databases to exclude when listing databases.'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.IntOpt('min_cluster_member_count', default=3, help='Minimum number of members in PXC cluster.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.api.GaleraCommonAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.taskmanager.GaleraCommonTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.guestagent.GaleraCommonGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('root_controller', default='trove.extensions.pxc.service.PxcRootController', help='Root controller implementation for pxc.'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.', deprecated_for_removal=True, deprecated_reason='Will be replaced by a configuration group ' 'option: long_query_time'), cfg.IntOpt('default_password_length', default='${mysql.default_password_length}', help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # Redis redis_group = cfg.OptGroup( 'redis', title='Redis options', help="Oslo option group designed for Redis datastore") redis_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["6379", "16379"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='RedisBackup', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='RedisSyncReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.experimental.' 'redis_sync', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/redis', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_namespace', default="trove.guestagent.strategies.backup.experimental." "redis_impl", help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default="trove.guestagent.strategies.restore.experimental." "redis_impl", help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'redis.api.RedisAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.redis.' 'taskmanager.RedisTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'redis.guestagent.RedisGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('root_controller', default='trove.extensions.redis.service.RedisRootController', help='Root controller implementation for redis.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # Cassandra cassandra_group = cfg.OptGroup( 'cassandra', title='Cassandra options', help="Oslo option group designed for Cassandra datastore") cassandra_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["7000", "7001", "7199", "9042", "9160"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental strategy based on the default backup ' 'strategy. For strategies that do not implement incremental ' 'backups, the runner performs full backup instead.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('backup_strategy', default="NodetoolSnapshot", help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/cassandra', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_namespace', default="trove.guestagent.strategies.backup.experimental." "cassandra_impl", help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default="trove.guestagent.strategies.restore.experimental." "cassandra_impl", help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for Cassandra.'), cfg.ListOpt('ignore_users', default=['os_admin'], help='Users to exclude when listing users.'), cfg.ListOpt('ignore_dbs', default=['system', 'system_auth', 'system_traces'], help='Databases to exclude when listing databases.'), cfg.StrOpt('guest_log_exposed_logs', default='system', help='List of Guest Logs to expose for publishing.'), cfg.StrOpt('system_log_level', choices=['ALL', 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR'], default='INFO', help='Cassandra log verbosity.'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'cassandra.api.CassandraAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental' '.cassandra.taskmanager.CassandraTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental' '.cassandra.guestagent.CassandraGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), cfg.BoolOpt('enable_cluster_instance_backup', default=False, help='Allows backup of single instance in the cluster.'), cfg.BoolOpt('enable_saslauthd', default=False, help='Enable the saslauth daemon.'), cfg.StrOpt('user_controller', default='trove.extensions.cassandra.service.' 'CassandraUserController', help='User controller implementation.'), cfg.StrOpt('database_controller', default='trove.extensions.cassandra.service.' 'CassandraDatabaseController', help='Database controller implementation.'), cfg.StrOpt('user_access_controller', default='trove.extensions.cassandra.service.' 'CassandraUserAccessController', help='User access controller implementation.'), cfg.IntOpt('node_sync_time', default=60, help='Time (in seconds) given to a node after a state change ' 'to finish rejoining the cluster.'), ] # Couchbase couchbase_group = cfg.OptGroup( 'couchbase', title='Couchbase options', help="Oslo option group designed for Couchbase datastore") couchbase_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', item_type=ListOfPortsType, default=["8091", "8092", "4369", "11209-11211", "21100-21199"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='CbBackup', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/couchbase', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'couchbase_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'couchbase_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for couchbase.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('default_password_length', default=24, min=6, max=24, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # MongoDB mongodb_group = cfg.OptGroup( 'mongodb', title='MongoDB options', help="Oslo option group designed for MongoDB datastore") mongodb_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["2500", "27017", "27019"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default='MongoDump', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/mongodb', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.IntOpt('num_config_servers_per_cluster', default=3, help='The number of config servers to create per cluster.'), cfg.IntOpt('num_query_routers_per_cluster', default=1, help='The number of query routers (mongos) to create ' 'per cluster.'), cfg.IntOpt('query_routers_volume_size', default=10, help='Default volume_size (in GB) for query routers (mongos).'), cfg.IntOpt('config_servers_volume_size', default=10, help='Default volume_size (in GB) for config_servers.'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.BoolOpt('cluster_secure', default=True, help='Create secure clusters. If False then the ' 'Role-Based Access Control will be disabled.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'mongodb.api.MongoDbAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.mongodb.' 'taskmanager.MongoDbTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'mongodb.guestagent.MongoDbGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'mongo_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'mongo_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.PortOpt('mongodb_port', default=27017, help='Port for mongod and mongos instances.'), cfg.PortOpt('configsvr_port', default=27019, help='Port for instances running as config servers.'), cfg.ListOpt('ignore_dbs', default=['admin', 'local', 'config'], help='Databases to exclude when listing databases.'), cfg.ListOpt('ignore_users', default=['admin.os_admin', 'admin.root'], help='Users to exclude when listing users.'), cfg.IntOpt('add_members_timeout', default=300, help='Maximum time to wait (in seconds) for a replica set ' 'initialization process to complete.'), cfg.StrOpt('root_controller', default='trove.extensions.mongodb.service.' 'MongoDBRootController', help='Root controller implementation for mongodb.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # PostgreSQL postgresql_group = cfg.OptGroup( 'postgresql', title='PostgreSQL options', help="Oslo option group for the PostgreSQL datastore.") postgresql_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["5432"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.PortOpt('postgresql_port', default=5432, help='The TCP port the server listens on.'), cfg.StrOpt('backup_strategy', default='PgBaseBackup', help='Default strategy to perform backups.'), cfg.DictOpt('backup_incremental_strategy', default={'PgBaseBackup': 'PgBaseBackupIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.StrOpt('replication_strategy', default='PostgresqlReplicationStreaming', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.experimental.' 'postgresql_impl', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/postgresql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.StrOpt('wal_archive_location', default='/mnt/wal_archive', help="Filesystem path storing WAL archive files when " "WAL-shipping based backups or replication " "is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'postgresql_impl', help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'postgresql_impl', help='Namespace to load restore strategies from.'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb'), cfg.ListOpt('ignore_users', default=['os_admin', 'postgres', 'root']), cfg.ListOpt('ignore_dbs', default=['os_admin', 'postgres']), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for postgresql.'), cfg.StrOpt('guest_log_exposed_logs', default='general', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=0, help="The time in milliseconds that a statement must take in " "in order to be logged in the 'general' log. A value of " "'0' logs all statements, while '-1' turns off " "statement logging.", deprecated_for_removal=True, deprecated_reason='Will be replaced by configuration group ' 'option: log_min_duration_statement'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # Apache CouchDB couchdb_group = cfg.OptGroup( 'couchdb', title='CouchDB options', help="Oslo option group designed for CouchDB datastore") couchdb_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["5984"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('mount_point', default='/var/lib/couchdb', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_strategy', default='CouchDBBackup', help='Default strategy to perform backups.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies' '.backup.experimental.couchdb_impl', help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies' '.restore.experimental.couchdb_impl', help='Namespace to load restore strategies from.'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' 'instance-create as the "password" field.'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for couchdb.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['_users', '_replicator'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # Vertica vertica_group = cfg.OptGroup( 'vertica', title='Vertica options', help="Oslo option group designed for Vertica datastore") vertica_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', item_type=ListOfPortsType, default=["5433", "5434", "5444", "5450", "4803"], help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', item_type=ListOfPortsType, default=["5433", "4803", "4804", "6453"], help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default=None, help='Default strategy to perform backups.'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/vertica', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_namespace', default=None, help='Namespace to load backup strategies from.'), cfg.StrOpt('restore_namespace', default=None, help='Namespace to load restore strategies from.'), cfg.IntOpt('readahead_size', default=2048, help='Size(MB) to be set as readahead_size for data volume'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.IntOpt('cluster_member_count', default=3, help='Number of members in Vertica cluster.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.vertica.' 'api.VerticaAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.vertica.' 'taskmanager.VerticaTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.vertica.' 'guestagent.VerticaGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.StrOpt('root_controller', default='trove.extensions.vertica.service.' 'VerticaRootController', help='Root controller implementation for Vertica.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('min_ksafety', default=0, help='Minimum k-safety setting permitted for vertica clusters'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # DB2 db2_group = cfg.OptGroup( 'db2', title='DB2 options', help="Oslo option group designed for DB2 datastore") db2_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["50000"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('mount_point', default="/home/db2inst1/db2inst1", help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_strategy', default='DB2OfflineBackup', help='Default strategy to perform backups.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental.' 'db2_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental.' 'db2_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.DictOpt('backup_incremental_strategy', default={}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental, the runner will use the default full backup.'), cfg.ListOpt('ignore_users', default=['PUBLIC', 'DB2INST1']), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for db2.'), cfg.StrOpt('guest_log_exposed_logs', default='', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('default_password_length', default=36, help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # MariaDB mariadb_group = cfg.OptGroup( 'mariadb', title='MariaDB options', help="Oslo option group designed for MariaDB datastore") mariadb_opts = [ cfg.BoolOpt('icmp', default=False, help='Whether to permit ICMP.', deprecated_for_removal=True), cfg.ListOpt('tcp_ports', default=["3306", "4444", "4567", "4568"], item_type=ListOfPortsType, help='List of TCP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.ListOpt('udp_ports', default=[], item_type=ListOfPortsType, help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies.backup.experimental' '.mariadb_impl', help='Namespace to load backup strategies from.', deprecated_name='backup_namespace', deprecated_group='DEFAULT'), cfg.StrOpt('backup_strategy', default='MariaBackup', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='MariaDBGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', default='trove.guestagent.strategies.replication.experimental' '.mariadb_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " "volumes if volume support is enabled."), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), cfg.IntOpt('usage_timeout', default=400, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies.restore.experimental' '.mariadb_impl', help='Namespace to load restore strategies from.', deprecated_name='restore_namespace', deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.DictOpt('backup_incremental_strategy', default={'MariaBackup': 'MariaBackupIncremental'}, help='Incremental Backup Runner based on the default ' 'strategy. For strategies that do not implement an ' 'incremental backup, the runner will use the default full ' 'backup.', deprecated_name='backup_incremental_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for mysql.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root'], help='Users to exclude when listing users.', deprecated_name='ignore_users', deprecated_group='DEFAULT'), cfg.ListOpt('ignore_dbs', default=['mysql', 'information_schema', 'performance_schema'], help='Databases to exclude when listing databases.', deprecated_name='ignore_dbs', deprecated_group='DEFAULT'), cfg.StrOpt('guest_log_exposed_logs', default='general,slow_query', help='List of Guest Logs to expose for publishing.'), cfg.IntOpt('guest_log_long_query_time', default=1000, help='The time in milliseconds that a statement must take in ' 'in order to be logged in the slow_query log.', deprecated_for_removal=True, deprecated_reason='Will be replaced by a configuration group ' 'option: long_query_time'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.IntOpt('min_cluster_member_count', default=3, help='Minimum number of members in MariaDB cluster.'), cfg.StrOpt('api_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.api.GaleraCommonAPIStrategy', help='Class that implements datastore-specific API logic.'), cfg.StrOpt('taskmanager_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.taskmanager.GaleraCommonTaskManagerStrategy', help='Class that implements datastore-specific task manager ' 'logic.'), cfg.StrOpt('guestagent_strategy', default='trove.common.strategies.cluster.experimental.' 'galera_common.guestagent.GaleraCommonGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), cfg.IntOpt('default_password_length', default='${mysql.default_password_length}', help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), ] # RPC version groups upgrade_levels = cfg.OptGroup( 'upgrade_levels', title='RPC upgrade levels group for handling versions', help='Contains the support version caps (Openstack Release) for ' 'each RPC API') rpcapi_cap_opts = [ cfg.StrOpt( 'taskmanager', default='latest', help='Set a version cap for messages sent to taskmanager services'), cfg.StrOpt( 'guestagent', default='latest', help='Set a version cap for messages sent to guestagent services'), cfg.StrOpt( 'conductor', default='latest', help='Set Openstack Release compatibility for conductor services'), ] network_group = cfg.OptGroup( 'network', title='Networking options', help="Options related to the trove instance networking." ) network_opts = [ cfg.StrOpt( 'public_network_id', default=None, help='ID of the Neutron public network to create floating IP for the ' 'public trove instance. If not given, Trove will try to query ' 'all the public networks and use the first one in the list.' ) ] service_credentials_group = cfg.OptGroup( 'service_credentials', help="Options related to Trove service credentials." ) service_credentials_opts = [ cfg.URIOpt('auth_url', default='https://0.0.0.0/identity/v3', deprecated_name='trove_auth_url', deprecated_group='DEFAULT', help='Keystone authentication URL.'), cfg.StrOpt('username', default='', help="Trove service user name.", deprecated_name='nova_proxy_admin_user', deprecated_group='DEFAULT'), cfg.StrOpt('password', default='', secret=True, help="Trove service user password.", deprecated_name='nova_proxy_admin_pass', deprecated_group='DEFAULT'), cfg.StrOpt('project_id', default='', deprecated_name='nova_proxy_admin_tenant_id', deprecated_group='DEFAULT', help="Trove service project ID."), cfg.StrOpt('project_name', default='', deprecated_name='nova_proxy_admin_tenant_name', deprecated_group='DEFAULT', help="Trove service project name."), cfg.StrOpt('user_domain_name', default='Default', deprecated_name='nova_proxy_admin_user_domain_name', deprecated_group='DEFAULT', help="Keystone domain name of the Trove service user."), cfg.StrOpt('project_domain_name', default='Default', deprecated_name='nova_proxy_admin_project_domain_name', deprecated_group='DEFAULT', help="Keystone domain name of the Trove service project."), cfg.StrOpt('region_name', default='RegionOne', deprecated_name='os_region_name', deprecated_group='DEFAULT', help="Keystone region name of the Trove service project."), ] CONF = cfg.CONF CONF.register_opts(path_opts) CONF.register_opts(versions_opts) CONF.register_opts(common_opts) CONF.register_opts(database_opts, 'database') CONF.register_group(mysql_group) CONF.register_group(percona_group) CONF.register_group(pxc_group) CONF.register_group(redis_group) CONF.register_group(cassandra_group) CONF.register_group(couchbase_group) CONF.register_group(mongodb_group) CONF.register_group(postgresql_group) CONF.register_group(couchdb_group) CONF.register_group(vertica_group) CONF.register_group(db2_group) CONF.register_group(mariadb_group) CONF.register_group(network_group) CONF.register_group(service_credentials_group) CONF.register_opts(mysql_opts, mysql_group) CONF.register_opts(percona_opts, percona_group) CONF.register_opts(pxc_opts, pxc_group) CONF.register_opts(redis_opts, redis_group) CONF.register_opts(cassandra_opts, cassandra_group) CONF.register_opts(couchbase_opts, couchbase_group) CONF.register_opts(mongodb_opts, mongodb_group) CONF.register_opts(postgresql_opts, postgresql_group) CONF.register_opts(couchdb_opts, couchdb_group) CONF.register_opts(vertica_opts, vertica_group) CONF.register_opts(db2_opts, db2_group) CONF.register_opts(mariadb_opts, mariadb_group) CONF.register_opts(network_opts, network_group) CONF.register_opts(service_credentials_opts, service_credentials_group) CONF.register_opts(rpcapi_cap_opts, upgrade_levels) profiler.set_defaults(CONF) logging.register_options(CONF) def custom_parser(parsername, parser): CONF.register_cli_opt(cfg.SubCommandOpt(parsername, handler=parser)) def parse_args(argv, default_config_files=None): cfg.CONF(args=argv[1:], project='trove', version=version.cached_version_string(), default_config_files=default_config_files) def get_ignored_dbs(): try: return get_configuration_property('ignore_dbs') except NoSuchOptError: return [] def get_ignored_users(): try: return get_configuration_property('ignore_users') except NoSuchOptError: return [] def get_configuration_property(property_name): """ Get a configuration property. Try to get it from the datastore-specific section first. If it is not available, retrieve it from the DEFAULT section. """ # Fake-integration tests do not define 'CONF.datastore_manager'. # *MySQL* options will # be loaded. This should never occur in a production environment. datastore_manager = CONF.datastore_manager if not datastore_manager: datastore_manager = 'mysql' LOG.warning("Manager name ('datastore_manager') not defined, " "using '%s' options instead.", datastore_manager) try: return CONF.get(datastore_manager).get(property_name) except NoSuchOptError: return CONF.get(property_name) def set_api_config_defaults(): """This method updates all configuration default values.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/clients.py0000644000175000017500000002112500000000000020612 0ustar00coreycorey00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils.importutils import import_class from trove.common import cfg from trove.common import exception from trove.common.strategies.cluster import strategy from cinderclient.v2 import client as CinderClient import glanceclient from keystoneauth1.identity import v3 from keystoneauth1 import session as ka_session from keystoneclient.service_catalog import ServiceCatalog from neutronclient.v2_0 import client as NeutronClient from novaclient.client import Client from swiftclient.client import Connection CONF = cfg.CONF def normalize_url(url): """Adds trailing slash if necessary.""" if not url.endswith('/'): return '%(url)s/' % {'url': url} else: return url def get_endpoint(service_catalog, service_type=None, endpoint_region=None, endpoint_type='publicURL'): """ Select an endpoint from the service catalog We search the full service catalog for services matching both type and region. The client is expected to supply the region matching the service_type. There must be one -- and only one -- successful match in the catalog, otherwise we will raise an exception. Some parts copied from glance/common/auth.py. """ endpoint_region = endpoint_region or CONF.service_credentials.region_name if not service_catalog: raise exception.EmptyCatalog() # per IRC chat, X-Service-Catalog will be a v2 catalog regardless of token # format; see https://bugs.launchpad.net/python-keystoneclient/+bug/1302970 # 'token' key necessary to get past factory validation sc = ServiceCatalog.factory({'token': None, 'serviceCatalog': service_catalog}) urls = sc.get_urls(service_type=service_type, region_name=endpoint_region, endpoint_type=endpoint_type) if not urls: raise exception.NoServiceEndpoint(service_type=service_type, endpoint_region=endpoint_region, endpoint_type=endpoint_type) return urls[0] def dns_client(context): from trove.dns.manager import DnsManager return DnsManager() def guest_client(context, id, manager=None): from trove.guestagent.api import API if manager: clazz = strategy.load_guestagent_strategy(manager).guest_client_class else: clazz = API return clazz(context, id) def nova_client(context, region_name=None, password=None): if CONF.nova_compute_url: url = '%(nova_url)s%(tenant)s' % { 'nova_url': normalize_url(CONF.nova_compute_url), 'tenant': context.project_id} else: region = region_name or CONF.service_credentials.region_name url = get_endpoint( context.service_catalog, service_type=CONF.nova_compute_service_type, endpoint_region=region, endpoint_type=CONF.nova_compute_endpoint_type ) client = Client(CONF.nova_client_version, username=context.user, password=password, endpoint_override=url, project_id=context.project_id, project_domain_name=context.project_domain_name, user_domain_name=context.user_domain_name, auth_url=CONF.service_credentials.auth_url, auth_token=context.auth_token, insecure=CONF.nova_api_insecure) client.client.auth_token = context.auth_token client.client.endpoint_override = url return client def create_admin_nova_client(context): """ Creates client that uses trove admin credentials :return: a client for nova for the trove admin """ client = create_nova_client( context, password=CONF.service_credentials.password ) return client def cinder_client(context, region_name=None): if CONF.cinder_url: url = '%(cinder_url)s%(tenant)s' % { 'cinder_url': normalize_url(CONF.cinder_url), 'tenant': context.project_id} else: region = region_name or CONF.service_credentials.region_name url = get_endpoint( context.service_catalog, service_type=CONF.cinder_service_type, endpoint_region=region, endpoint_type=CONF.cinder_endpoint_type ) client = CinderClient.Client(context.user, context.auth_token, project_id=context.project_id, auth_url=CONF.service_credentials.auth_url, insecure=CONF.cinder_api_insecure) client.client.auth_token = context.auth_token client.client.management_url = url return client def swift_client(context, region_name=None): if CONF.swift_url: # swift_url has a different format so doesn't need to be normalized url = '%(swift_url)s%(tenant)s' % {'swift_url': CONF.swift_url, 'tenant': context.project_id} else: region = region_name or CONF.service_credentials.region_name url = get_endpoint(context.service_catalog, service_type=CONF.swift_service_type, endpoint_region=region, endpoint_type=CONF.swift_endpoint_type) client = Connection(preauthurl=url, preauthtoken=context.auth_token, tenant_name=context.project_id, snet=CONF.backup_use_snet, insecure=CONF.swift_api_insecure) return client def neutron_client(context, region_name=None): if CONF.neutron_url: # neutron endpoint url / publicURL does not include tenant segment url = CONF.neutron_url else: region = region_name or CONF.service_credentials.region_name url = get_endpoint(context.service_catalog, service_type=CONF.neutron_service_type, endpoint_region=region, endpoint_type=CONF.neutron_endpoint_type) client = NeutronClient.Client(token=context.auth_token, endpoint_url=url, insecure=CONF.neutron_api_insecure) return client def glance_client(context, region_name=None): # We should allow glance to get the endpoint from the service # catalog, but to do so we would need to be able to specify # the endpoint_filter on the API calls, but glance # doesn't currently allow that. As a result, we must # specify the endpoint explicitly. if CONF.glance_url: endpoint_url = '%(url)s%(tenant)s' % { 'url': normalize_url(CONF.glance_url), 'tenant': context.project_id} else: region = region_name or CONF.service_credentials.region_name endpoint_url = get_endpoint( context.service_catalog, service_type=CONF.glance_service_type, endpoint_region=region, endpoint_type=CONF.glance_endpoint_type ) auth = v3.Token(CONF.service_credentials.auth_url, context.auth_token) session = ka_session.Session(auth=auth) return glanceclient.Client( CONF.glance_client_version, endpoint=endpoint_url, session=session ) def create_dns_client(*arg, **kwargs): return import_class(CONF.remote_dns_client)(*arg, **kwargs) def create_guest_client(*arg, **kwargs): return import_class(CONF.remote_guest_client)(*arg, **kwargs) def create_nova_client(*arg, **kwargs): return import_class(CONF.remote_nova_client)(*arg, **kwargs) def create_swift_client(*arg, **kwargs): return import_class(CONF.remote_swift_client)(*arg, **kwargs) def create_cinder_client(*arg, **kwargs): return import_class(CONF.remote_cinder_client)(*arg, **kwargs) def create_neutron_client(*arg, **kwargs): return import_class(CONF.remote_neutron_client)(*arg, **kwargs) def create_glance_client(*arg, **kwargs): return import_class(CONF.remote_glance_client)(*arg, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/clients_admin.py0000644000175000017500000001167500000000000021773 0ustar00coreycorey00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from cinderclient.v2 import client as CinderClient import glanceclient from keystoneauth1 import loading from keystoneauth1 import session from neutronclient.v2_0 import client as NeutronClient from novaclient.client import Client as NovaClient import swiftclient from trove.common import cfg from trove.common.clients import normalize_url CONF = cfg.CONF _SESSION = None def get_keystone_session(): """Get trove service credential auth session.""" global _SESSION if not _SESSION: loader = loading.get_plugin_loader('password') auth = loader.load_from_options( username=CONF.service_credentials.username, password=CONF.service_credentials.password, project_name=CONF.service_credentials.project_name, user_domain_name=CONF.service_credentials.user_domain_name, project_domain_name=CONF.service_credentials.project_domain_name, auth_url=CONF.service_credentials.auth_url) _SESSION = session.Session(auth=auth) return _SESSION def nova_client_trove_admin(context, region_name=None, password=None): """ Returns a nova client object with the trove admin credentials :param context: original context from user request :type context: trove.common.context.TroveContext :return novaclient: novaclient with trove admin credentials :rtype: novaclient.client.Client """ ks_session = get_keystone_session() client = NovaClient( CONF.nova_client_version, session=ks_session, service_type=CONF.nova_compute_service_type, region_name=region_name or CONF.service_credentials.region_name, insecure=CONF.nova_api_insecure, endpoint_type=CONF.nova_compute_endpoint_type) if CONF.nova_compute_url and CONF.service_credentials.project_id: client.client.endpoint_override = "%s/%s/" % ( normalize_url(CONF.nova_compute_url), CONF.service_credentials.project_id) return client def cinder_client_trove_admin(context, region_name=None): """ Returns a cinder client object with the trove admin credentials :param context: original context from user request :type context: trove.common.context.TroveContext :return cinderclient: cinderclient with trove admin credentials """ ks_session = get_keystone_session() client = CinderClient.Client( session=ks_session, service_type=CONF.cinder_service_type, region_name=region_name or CONF.service_credentials.region_name, insecure=CONF.cinder_api_insecure, endpoint_type=CONF.cinder_endpoint_type) if CONF.cinder_url and CONF.service_credentials.project_id: client.client.management_url = "%s/%s/" % ( normalize_url(CONF.cinder_url), CONF.service_credentials.project_id) return client def neutron_client_trove_admin(context, region_name=None): """ Returns a neutron client object with the trove admin credentials :param context: original context from user request :type context: trove.common.context.TroveContext :return neutronclient: neutronclient with trove admin credentials """ ks_session = get_keystone_session() client = NeutronClient.Client( session=ks_session, service_type=CONF.neutron_service_type, region_name=region_name or CONF.service_credentials.region_name, insecure=CONF.neutron_api_insecure, endpoint_type=CONF.neutron_endpoint_type) if CONF.neutron_url: client.management_url = CONF.neutron_url return client def swift_client_trove_admin(context, region_name=None): ks_session = get_keystone_session() client = swiftclient.Connection( session=ks_session, insecure=CONF.swift_api_insecure, os_options={ 'region_name': region_name or CONF.service_credentials.region_name, 'service_type': CONF.swift_service_type, 'endpoint_type': CONF.swift_endpoint_type } ) return client def glance_client_trove_admin(context, region_name=None): ks_session = get_keystone_session() client = glanceclient.Client( version=CONF.glance_client_version, session=ks_session, region_name=region_name or CONF.service_credentials.region_name, service_type=CONF.glance_service_type, interface=CONF.glance_endpoint_type ) return client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/configurations.py0000644000175000017500000000472400000000000022211 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import stream_codecs class RedisConfParser(object): CODEC = stream_codecs.PropertiesCodec() def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class MySQLConfParser(object): SERVER_CONF_SECTION = 'mysqld' CODEC = stream_codecs.IniCodec( default_value='1', comment_markers=('#', ';', '!')) def __init__(self, config): self.config = config def parse(self): config_dict = self.CODEC.deserialize(self.config) mysqld_section_dict = config_dict[self.SERVER_CONF_SECTION] return mysqld_section_dict.items() class MongoDBConfParser(object): CODEC = stream_codecs.SafeYamlCodec(default_flow_style=False) def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class PostgresqlConfParser(object): CODEC = stream_codecs.PropertiesCodec(delimiter='=') def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class CassandraConfParser(object): CODEC = stream_codecs.SafeYamlCodec(default_flow_style=False) def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class VerticaConfParser(object): CODEC = stream_codecs.PropertiesCodec(delimiter='=') def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() class DB2ConfParser(object): CODEC = stream_codecs.PropertiesCodec(delimiter='=') def __init__(self, config): self.config = config def parse(self): return self.CODEC.deserialize(self.config).items() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/context.py0000644000175000017500000000547200000000000020644 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple class that stores security context information in the web request. Projects should subclass this class if they wish to enhance the request context or provide additional information in their specific WSGI pipeline. """ from oslo_context import context from oslo_log import log as logging from trove.common import local from trove.common.serializable_notification import SerializableNotification LOG = logging.getLogger(__name__) class TroveContext(context.RequestContext): """ Stores information about the security context under which the user accesses the system, as well as additional request information. """ def __init__(self, limit=None, marker=None, service_catalog=None, user_identity=None, instance_id=None, timeout=None, **kwargs): self.limit = limit self.marker = marker self.service_catalog = service_catalog self.user_identity = user_identity self.instance_id = instance_id self.timeout = timeout super(TroveContext, self).__init__(**kwargs) if not hasattr(local.store, 'context'): self.update_store() def to_dict(self): parent_dict = super(TroveContext, self).to_dict() parent_dict.update({'limit': self.limit, 'marker': self.marker, 'service_catalog': self.service_catalog }) if hasattr(self, 'notification'): serialized = SerializableNotification.serialize(self, self.notification) parent_dict['trove_notification'] = serialized return parent_dict def update_store(self): local.store.context = self @classmethod def from_dict(cls, values): n_values = values.pop('trove_notification', None) ctx = super(TroveContext, cls).from_dict( values, limit=values.get('limit'), marker=values.get('marker'), service_catalog=values.get('service_catalog')) if n_values: ctx.notification = SerializableNotification.deserialize( ctx, n_values) return ctx ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/crypto_utils.py0000644000175000017500000000620600000000000021714 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Encryption/decryption handling import hashlib import os from oslo_utils import encodeutils import random import six import string from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import algorithms from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers import modes from trove.common import stream_codecs IV_BYTE_COUNT = 16 _CRYPT_BACKEND = None def _get_cipher(key, iv): global _CRYPT_BACKEND if not _CRYPT_BACKEND: _CRYPT_BACKEND = default_backend() return Cipher(algorithms.AES(key), modes.CBC(iv), backend=_CRYPT_BACKEND) def _encrypt(key, iv, data): encryptor = _get_cipher(key, iv).encryptor() return encryptor.update(data) + encryptor.finalize() def _decrypt(key, iv, data): decryptor = _get_cipher(key, iv).decryptor() return decryptor.update(data) + decryptor.finalize() def encode_data(data): # NOTE(zhaochao) No need to encoding string object any more, # as Base64Codec is now using oslo_serialization.base64 which # could take care of this. return stream_codecs.Base64Codec().serialize(data) def decode_data(data): return stream_codecs.Base64Codec().deserialize(data) # Pad the data string to an multiple of pad_size def pad_for_encryption(data, pad_size=IV_BYTE_COUNT): pad_count = pad_size - (len(data) % pad_size) return data + six.int2byte(pad_count) * pad_count # Unpad the data string by stripping off excess characters def unpad_after_decryption(data): return data[:len(data) - six.indexbytes(data, -1)] def encrypt_data(data, key, iv_byte_count=IV_BYTE_COUNT): data = encodeutils.to_utf8(data) key = encodeutils.to_utf8(key) md5_key = encodeutils.safe_encode(hashlib.md5(key).hexdigest()) iv = os.urandom(iv_byte_count) iv = iv[:iv_byte_count] data = pad_for_encryption(data, iv_byte_count) encrypted = _encrypt(md5_key, bytes(iv), data) return iv + encrypted def decrypt_data(data, key, iv_byte_count=IV_BYTE_COUNT): key = encodeutils.to_utf8(key) md5_key = encodeutils.safe_encode(hashlib.md5(key).hexdigest()) iv = data[:iv_byte_count] decrypted = _decrypt(md5_key, bytes(iv), bytes(data[iv_byte_count:])) return unpad_after_decryption(decrypted) def generate_random_key(length=32, chars=None): chars = chars if chars else (string.ascii_uppercase + string.ascii_lowercase + string.digits) return ''.join(random.choice(chars) for _ in range(length)) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73611 trove-12.1.0.dev92/trove/common/db/0000755000175000017500000000000000000000000017163 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/__init__.py0000644000175000017500000000000000000000000021262 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73611 trove-12.1.0.dev92/trove/common/db/cassandra/0000755000175000017500000000000000000000000021122 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/cassandra/__init__.py0000644000175000017500000000000000000000000023221 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/cassandra/models.py0000644000175000017500000000254100000000000022761 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.db import models class CassandraSchema(models.DatastoreSchema): """Represents a Cassandra schema and its associated properties. Keyspace names are 32 or fewer alpha-numeric characters and underscores, the first of which is an alpha character. """ @property def _max_schema_name_length(self): return 32 def _is_valid_schema_name(self, value): return not any(c in value for c in r'/\. "$') class CassandraUser(models.DatastoreUser): """Represents a Cassandra user and its associated properties.""" root_username = 'cassandra' @property def _max_user_name_length(self): return 65535 @property def schema_model(self): return CassandraSchema ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73611 trove-12.1.0.dev92/trove/common/db/couchdb/0000755000175000017500000000000000000000000020572 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/couchdb/__init__.py0000644000175000017500000000000000000000000022671 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/couchdb/models.py0000644000175000017500000000201100000000000022421 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.db import models class CouchDBSchema(models.DatastoreSchema): """Represents a CouchDB schema and its associated properties.""" @property def _max_schema_name_length(self): return 32 class CouchDBUser(models.DatastoreUser): """Represents a CouchDB user and its associated properties.""" @property def schema_model(self): return CouchDBSchema ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/models.py0000644000175000017500000003410000000000000021016 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from trove.common import cfg from trove.common.i18n import _ from trove.common import utils CONF = cfg.CONF """ The classes below are generic and can be used for any datastore, but will not provide validation. To add a new datastore create a sub-package (see mysql for example) and create new child classes inheriting from these generic classes. As a guideline, for new datastores the following class methods/variables should be overridden if validation is desired (see their docstrings for additional info): DatastoreModelsBase: __init__ DatastoreSchema: _max_schema_name_length _is_valid_schema_name verify_dict _create_checks _delete_checks DatastoreUser: _is_valid_user_name _is_valid_host_name _is_valid_password _is_valid_database verify_dict _create_checks _delete_checks """ class DatastoreModelsBase(object): """Base model for the datastore schema and user models.""" def serialize(self): return self.__dict__ def _deserialize(self, obj): self.__dict__ = obj def __repr__(self): return str(self.serialize()) @classmethod def deserialize(cls, value, verify=True): item = cls(deserializing=True) item._deserialize(value) if verify: item.verify_dict() return item @abc.abstractmethod def verify_dict(self): """Validate the object's data dictionary. :returns: True if dictionary is valid. """ @staticmethod def check_string(value, desc): """Check if the value is a string/unicode. :param value: Value to check. :param desc: Description for exception message. :raises: ValueError if not a string/unicode. """ if not isinstance(value, six.string_types): raise ValueError(_("%(desc)s is not a string. Type = %(t)s.") % {'desc': desc, 't': type(value)}) class DatastoreSchema(DatastoreModelsBase): """Represents a database schema.""" def __init__(self, name=None, deserializing=False): self._name = None self._collate = None self._character_set = None # If both or neither are passed in this is a bug. if bool(deserializing) == bool(name): raise RuntimeError(_("Bug in DatastoreSchema()")) if not deserializing: self.name = name def __str__(self): return str(self.name) @property def name(self): return self._name @name.setter def name(self, value): self._validate_schema_name(value) self._name = value def _validate_schema_name(self, value): """Perform checks on a given schema name. :param value: Validated schema name. :type value: string :raises: ValueError On validation errors. """ if not value: raise ValueError(_("Schema name empty.")) self.check_string(value, 'Schema name') if self._max_schema_name_length and (len(value) > self._max_schema_name_length): raise ValueError(_("Schema name '%(name)s' is too long. " "Max length = %(max_length)d.") % {'name': value, 'max_length': self._max_schema_name_length}) elif not self._is_valid_schema_name(value): raise ValueError(_("'%s' is not a valid schema name.") % value) @property def _max_schema_name_length(self): """Return the maximum valid schema name length if any. :returns: Maximum schema name length or None if unlimited. """ return None def _is_valid_schema_name(self, value): """Validate a given schema name. :param value: Validated schema name. :type value: string :returns: TRUE if valid, FALSE otherwise. """ return True def verify_dict(self): """Check that the object's dictionary values are valid by reloading them via the property setters. The checkers should raise the ValueError exception if invalid. All mandatory fields should be checked. """ self.name = self._name @property def ignored_dbs(self): return cfg.get_ignored_dbs() def is_ignored(self): return self.name in self.ignored_dbs def check_reserved(self): """Check if the name is on the ignore_dbs list, meaning it is reserved. :raises: ValueError if name is on the reserved list. """ if self.is_ignored(): raise ValueError(_('Database name "%(name)s" is on the reserved ' 'list: %(reserved)s.') % {'name': self.name, 'reserved': self.ignored_dbs}) def _create_checks(self): """Checks to be performed before database can be created.""" self.check_reserved() def check_create(self): """Check if the database can be created. :raises: ValueError if the schema is not valid for create. """ try: self._create_checks() except ValueError as e: raise ValueError(_('Cannot create database: %(error)s') % {'error': str(e)}) def _delete_checks(self): """Checks to be performed before database can be deleted.""" self.check_reserved() def check_delete(self): """Check if the database can be deleted. :raises: ValueError if the schema is not valid for delete. """ try: self._delete_checks() except ValueError as e: raise ValueError(_('Cannot delete database: %(error)s') % {'error': str(e)}) class DatastoreUser(DatastoreModelsBase): """Represents a datastore user.""" _HOSTNAME_WILDCARD = '%' root_username = 'root' def __init__(self, name=None, password=None, host=None, databases=None, deserializing=False): self._name = None self._password = None self._host = self._HOSTNAME_WILDCARD self._databases = [] self._is_root = False if not deserializing: self.name = name if password: self.password = password if host: self.host = host if databases: self.databases = databases @classmethod def root(cls, name=None, password=None, *args, **kwargs): if not name: name = cls.root_username if not password: password = utils.generate_random_password() user = cls(name, password, *args, **kwargs) user.make_root() return user @property def name(self): return self._name @name.setter def name(self, value): self._validate_user_name(value) self._name = value @property def password(self): return self._password @password.setter def password(self, value): self.check_string(value, "User password") if self._is_valid_password(value): self._password = value else: raise ValueError(_("'%s' is not a valid password.") % value) def _add_database(self, value): serial_db = self._build_database_schema(value).serialize() if self._is_valid_database(serial_db): self._databases.append(serial_db) @property def databases(self): return self._databases @databases.setter def databases(self, value): if isinstance(value, list): for dbname in value: self._add_database(dbname) else: self._add_database(value) @property def host(self): if self._host is None: return self._HOSTNAME_WILDCARD return self._host @host.setter def host(self, value): self.check_string(value, "User host name") if self._is_valid_host_name(value): self._host = value else: raise ValueError(_("'%s' is not a valid hostname.") % value) def _build_database_schema(self, name): """Build a schema for this user. :type name: string """ return self.schema_model(name) def deserialize_schema(self, value): """Deserialize a user's databases value. :type value: dict """ return self.schema_model.deserialize(value) def _validate_user_name(self, value): """Perform validations on a given user name. :param value: Validated user name. :type value: string :raises: ValueError On validation errors. """ if not value: raise ValueError(_("User name empty.")) self.check_string(value, "User name") if self._max_user_name_length and (len(value) > self._max_user_name_length): raise ValueError(_("User name '%(name)s' is too long. " "Max length = %(max_length)d.") % {'name': value, 'max_length': self._max_user_name_length}) elif not self._is_valid_user_name(value): raise ValueError(_("'%s' is not a valid user name.") % value) @property def _max_user_name_length(self): """Return the maximum valid user name length if any. :returns: Maximum user name length or None if unlimited. """ return None def _is_valid_user_name(self, value): """Validate a given user name. :param value: User name to be validated. :type value: string :returns: TRUE if valid, FALSE otherwise. """ return True def _is_valid_host_name(self, value): """Validate a given host name. :param value: Host name to be validated. :type value: string :returns: TRUE if valid, FALSE otherwise. """ return True def _is_valid_password(self, value): """Validate a given password. :param value: Password to be validated. :type value: string :returns: TRUE if valid, FALSE otherwise. """ return True def _is_valid_database(self, value): """Validate a given database (serialized schema object). :param value: The database to be validated. :type value: dict :returns: TRUE if valid, FALSE otherwise. :raises: ValueError if operation not allowed. """ return value not in self.databases def verify_dict(self): """Check that the object's dictionary values are valid by reloading them via the property setters. The checkers should raise the ValueError exception if invalid. All mandatory fields should be checked. """ self.name = self._name if self.__dict__.get('_password'): self.password = self._password else: self._password = None if self.__dict__.get('_host'): self.host = self._host else: self._host = self._HOSTNAME_WILDCARD if self.__dict__.get('_databases'): for database in self._databases: # Create the schema for validation only self.deserialize_schema(database) else: self._databases = [] if not self.__dict__.get('_is_root'): self._is_root = False @property def schema_model(self): return DatastoreSchema @property def ignored_users(self): if self._is_root: return [] return cfg.get_ignored_users() @property def is_ignored(self): return self.name in self.ignored_users def make_root(self): self._is_root = True def check_reserved(self): """Check if the name is on the ignore_users list, meaning it is reserved. :raises: ValueError if name is on the reserved list. """ if self.is_ignored: raise ValueError(_('User name "%(name)s" is on the reserved ' 'list: %(reserved)s.') % {'name': self.name, 'reserved': self.ignored_users}) def _create_checks(self): """Checks to be performed before user can be created.""" self.check_reserved() def check_create(self): """Check if the user can be created. :raises: ValueError if the user is not valid for create. """ try: self._create_checks() except ValueError as e: raise ValueError(_('Cannot create user: %(error)s') % {'error': str(e)}) def _delete_checks(self): """Checks to be performed before user can be created.""" self.check_reserved() def check_delete(self): """Check if the user can be deleted. :raises: ValueError if the user is not valid for delete. """ try: self._delete_checks() except ValueError as e: raise ValueError(_('Cannot delete user: %(error)s') % {'error': str(e)}) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73611 trove-12.1.0.dev92/trove/common/db/mongodb/0000755000175000017500000000000000000000000020610 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/mongodb/__init__.py0000644000175000017500000000000000000000000022707 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/mongodb/models.py0000644000175000017500000001212600000000000022447 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.db import models from trove.common.i18n import _ class MongoDBSchema(models.DatastoreSchema): """Represents a MongoDB database and its associated properties.""" @property def _max_schema_name_length(self): return 64 def _is_valid_schema_name(self, value): # check against the invalid character set from # http://docs.mongodb.org/manual/reference/limits return not any(c in value for c in r'/\. "$') class MongoDBUser(models.DatastoreUser): """Represents a MongoDB user and its associated properties. MongoDB users are identified using their name and database. Trove stores this as . """ root_username = 'admin.root' def __init__(self, name=None, password=None, host=None, databases=None, deserializing=False): super(MongoDBUser, self).__init__(name=name, password=password, host=host, databases=databases, deserializing=deserializing) if not deserializing: self._init_roles() @property def username(self): return self._username @username.setter def username(self, value): self._update_name(username=value) @property def database(self): return MongoDBSchema.deserialize(self._database) @database.setter def database(self, value): self._update_name(database=value) def _validate_user_name(self, value): self._update_name(name=value) def _update_name(self, name=None, username=None, database=None): """Keep the name, username, and database values in sync.""" if name: (database, username) = self._parse_name(name) if not (database and username): missing = 'username' if self.database else 'database' raise ValueError(_("MongoDB user's name missing %s.") % missing) else: if username: if not self.database: raise ValueError(_('MongoDB user missing database.')) database = self.database.name else: # database if not self.username: raise ValueError(_('MongoDB user missing username.')) username = self.username name = '%s.%s' % (database, username) self._name = name self._username = username self._database = self._build_database_schema(database).serialize() @property def roles(self): return self._roles @roles.setter def roles(self, value): if isinstance(value, list): for role in value: self._add_role(role) else: self._add_role(value) def revoke_role(self, role): if role in self.roles: self._roles.remove(role) def _init_roles(self): if '_roles' not in self.__dict__: self._roles = [] for db in self._databases: self._roles.append({'db': db['_name'], 'role': 'readWrite'}) def _build_database_schema(self, name): return MongoDBSchema(name) def deserialize_schema(self, value): return MongoDBSchema.deserialize(value) @staticmethod def _parse_name(value): """The name will be ., so split it.""" parts = value.split('.', 1) if len(parts) != 2: raise ValueError(_( 'MongoDB user name "%s" not in . format.' ) % value) return parts[0], parts[1] @property def _max_user_name_length(self): return 128 def _add_role(self, value): if not self._is_valid_role(value): raise ValueError(_('Role %s is invalid.') % value) self._roles.append(value) if value['role'] == 'readWrite': self.databases = value['db'] def _is_valid_role(self, value): if not isinstance(value, dict): return False if not {'db', 'role'} == set(value): return False return True def verify_dict(self): super(MongoDBUser, self).verify_dict() self._init_roles() @property def schema_model(self): return MongoDBSchema def _create_checks(self): super(MongoDBUser, self)._create_checks() if not self.password: raise ValueError(_("MongoDB user to create is missing a " "password.")) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73611 trove-12.1.0.dev92/trove/common/db/mysql/0000755000175000017500000000000000000000000020330 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/mysql/__init__.py0000644000175000017500000000000000000000000022427 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/mysql/data.py0000644000175000017500000004410400000000000021616 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. charset = {"big5": ["big5_chinese_ci", "big5_bin"], "dec8": ["dec8_swedish_ci", "dec8_bin"], "cp850": ["cp850_general_ci", "cp850_bin"], "hp8": ["hp8_english_ci", "hp8_bin"], "koi8r": ["koi8r_general_ci", "koi8r_bin"], "latin1": ["latin1_swedish_ci", "latin1_german1_ci", "latin1_danish_ci", "latin1_german2_ci", "latin1_bin", "latin1_general_ci", "latin1_general_cs", "latin1_spanish_ci"], "latin2": ["latin2_general_ci", "latin2_czech_cs", "latin2_hungarian_ci", "latin2_croatian_ci", "latin2_bin"], "swe7": ["swe7_swedish_ci", "swe7_bin"], "ascii": ["ascii_general_ci", "ascii_bin"], "ujis": ["ujis_japanese_ci", "ujis_bin"], "sjis": ["sjis_japanese_ci", "sjis_bin"], "hebrew": ["hebrew_general_ci", "hebrew_bin"], "tis620": ["tis620_thai_ci", "tis620_bin"], "euckr": ["euckr_korean_ci", "euckr_bin"], "koi8u": ["koi8u_general_ci", "koi8u_bin"], "gb2312": ["gb2312_chinese_ci", "gb2312_bin"], "greek": ["greek_general_ci", "greek_bin"], "cp1250": ["cp1250_general_ci", "cp1250_czech_cs", "cp1250_croatian_ci", "cp1250_bin", "cp1250_polish_ci"], "gbk": ["gbk_chinese_ci", "gbk_bin"], "latin5": ["latin5_turkish_ci", "latin5_bin"], "armscii8": ["armscii8_general_ci", "armscii8_bin"], "utf8": ["utf8_general_ci", "utf8_bin", "utf8_unicode_ci", "utf8_icelandic_ci", "utf8_latvian_ci", "utf8_romanian_ci", "utf8_slovenian_ci", "utf8_polish_ci", "utf8_estonian_ci", "utf8_spanish_ci", "utf8_swedish_ci", "utf8_turkish_ci", "utf8_czech_ci", "utf8_danish_ci", "utf8_lithuanian_ci", "utf8_slovak_ci", "utf8_spanish2_ci", "utf8_roman_ci", "utf8_persian_ci", "utf8_esperanto_ci", "utf8_hungarian_ci", "utf8_sinhala_ci", "utf8_german2_ci", "utf8_croatian_ci", "utf8_unicode_520_ci", "utf8_vietnamese_ci", "utf8_general_mysql500_ci" ], "utf8mb4": ["utf8mb4_0900_ai_ci"], "ucs2": ["ucs2_general_ci", "ucs2_bin", "ucs2_unicode_ci", "ucs2_icelandic_ci", "ucs2_latvian_ci", "ucs2_romanian_ci", "ucs2_slovenian_ci", "ucs2_polish_ci", "ucs2_estonian_ci", "ucs2_spanish_ci", "ucs2_swedish_ci", "ucs2_turkish_ci", "ucs2_czech_ci", "ucs2_danish_ci", "ucs2_lithuanian_ci", "ucs2_slovak_ci", "ucs2_spanish2_ci", "ucs2_roman_ci", "ucs2_persian_ci", "ucs2_esperanto_ci", "ucs2_hungarian_ci", "ucs2_sinhala_ci", "ucs2_german2_ci", "ucs2_croatian_ci", "ucs2_unicode_520_ci", "ucs2_vietnamese_ci", "ucs2_general_mysql500_ci" ], "cp866": ["cp866_general_ci", "cp866_bin"], "keybcs2": ["keybcs2_general_ci", "keybcs2_bin"], "macce": ["macce_general_ci", "macce_bin"], "macroman": ["macroman_general_ci", "macroman_bin"], "cp852": ["cp852_general_ci", "cp852_bin"], "latin7": ["latin7_general_ci", "latin7_estonian_cs", "latin7_general_cs", "latin7_bin"], "utf8mb4": ["utf8mb4_general_ci", "utf8mb4_bin", "utf8mb4_unicode_ci", "utf8mb4_icelandic_ci", "utf8mb4_latvian_ci", "utf8mb4_romanian_ci", "utf8mb4_slovenian_ci", "utf8mb4_polish_ci", "utf8mb4_estonian_ci", "utf8mb4_spanish_ci", "utf8mb4_swedish_ci", "utf8mb4_turkish_ci", "utf8mb4_czech_ci", "utf8mb4_danish_ci", "utf8mb4_lithuanian_ci", "utf8mb4_slovak_ci", "utf8mb4_spanish2_ci", "utf8mb4_roman_ci", "utf8mb4_persian_ci", "utf8mb4_esperanto_ci", "utf8mb4_hungarian_ci", "utf8mb4_sinhala_ci", "utf8mb4_german2_ci", "utf8mb4_croatian_ci", "utf8mb4_unicode_520_ci", "utf8mb4_vietnamese_ci"], "cp1251": ["cp1251_general_ci", "cp1251_bulgarian_ci", "cp1251_ukrainian_ci", "cp1251_bin", "cp1251_general_cs"], "utf16": ["utf16_general_ci", "utf16_bin", "utf16_unicode_ci", "utf16_icelandic_ci", "utf16_latvian_ci", "utf16_romanian_ci", "utf16_slovenian_ci", "utf16_polish_ci", "utf16_estonian_ci", "utf16_spanish_ci", "utf16_swedish_ci", "utf16_turkish_ci", "utf16_czech_ci", "utf16_danish_ci", "utf16_lithuanian_ci", "utf16_slovak_ci", "utf16_spanish2_ci", "utf16_roman_ci", "utf16_persian_ci", "utf16_esperanto_ci", "utf16_hungarian_ci", "utf16_sinhala_ci", "utf16_german2_ci", "utf16_croatian_ci", "utf16_unicode_520_ci", "utf16_vietnamese_ci"], "utf16le": ["utf16le_general_ci", "utf16le_bin"], "cp1256": ["cp1256_general_ci", "cp1256_bin"], "cp1257": ["cp1257_general_ci", "cp1257_lithuanian_ci", "cp1257_bin"], "utf32": ["utf32_general_ci", "utf32_bin", "utf32_unicode_ci", "utf32_icelandic_ci", "utf32_latvian_ci", "utf32_romanian_ci", "utf32_slovenian_ci", "utf32_polish_ci", "utf32_estonian_ci", "utf32_spanish_ci", "utf32_swedish_ci", "utf32_turkish_ci", "utf32_czech_ci", "utf32_danish_ci", "utf32_lithuanian_ci", "utf32_slovak_ci", "utf32_spanish2_ci", "utf32_roman_ci", "utf32_persian_ci", "utf32_esperanto_ci", "utf32_hungarian_ci", "utf32_sinhala_ci", "utf32_german2_ci", "utf32_croatian_ci", "utf32_unicode_520_ci", "utf32_vietnamese_ci"], "binary": ["binary"], "geostd8": ["geostd8_general_ci", "geostd8_bin"], "cp932": ["cp932_japanese_ci", "cp932_bin"], "eucjpms": ["eucjpms_japanese_ci", "eucjpms_bin"], "gb18030": ["gb18030_chinese_ci", "gb18030_bin", "gb18030_unicode_520_ci"]} collation = {"big5_chinese_ci": "big5", "big5_bin": "big5", "dec8_swedish_ci": "dec8", "dec8_bin": "dec8", "cp850_general_ci": "cp850", "cp850_bin": "cp850", "hp8_english_ci": "hp8", "hp8_bin": "hp8", "koi8r_general_ci": "koi8r", "koi8r_bin": "koi8r", "latin1_german1_ci": "latin1", "latin1_swedish_ci": "latin1", "latin1_danish_ci": "latin1", "latin1_german2_ci": "latin1", "latin1_bin": "latin1", "latin1_general_ci": "latin1", "latin1_general_cs": "latin1", "latin1_spanish_ci": "latin1", "latin2_czech_cs": "latin2", "latin2_general_ci": "latin2", "latin2_hungarian_ci": "latin2", "latin2_croatian_ci": "latin2", "latin2_bin": "latin2", "swe7_swedish_ci": "swe7", "swe7_bin": "swe7", "ascii_general_ci": "ascii", "ascii_bin": "ascii", "ujis_japanese_ci": "ujis", "ujis_bin": "ujis", "sjis_japanese_ci": "sjis", "sjis_bin": "sjis", "hebrew_general_ci": "hebrew", "hebrew_bin": "hebrew", "tis620_thai_ci": "tis620", "tis620_bin": "tis620", "euckr_korean_ci": "euckr", "euckr_bin": "euckr", "koi8u_general_ci": "koi8u", "koi8u_bin": "koi8u", "gb2312_chinese_ci": "gb2312", "gb2312_bin": "gb2312", "greek_general_ci": "greek", "greek_bin": "greek", "cp1250_general_ci": "cp1250", "cp1250_czech_cs": "cp1250", "cp1250_croatian_ci": "cp1250", "cp1250_bin": "cp1250", "cp1250_polish_ci": "cp1250", "gbk_chinese_ci": "gbk", "gbk_bin": "gbk", "latin5_turkish_ci": "latin5", "latin5_bin": "latin5", "armscii8_general_ci": "armscii8", "armscii8_bin": "armscii8", "utf8_general_ci": "utf8", "utf8_bin": "utf8", "utf8_unicode_ci": "utf8", "utf8_icelandic_ci": "utf8", "utf8_latvian_ci": "utf8", "utf8_romanian_ci": "utf8", "utf8_slovenian_ci": "utf8", "utf8_polish_ci": "utf8", "utf8_estonian_ci": "utf8", "utf8_spanish_ci": "utf8", "utf8_swedish_ci": "utf8", "utf8_turkish_ci": "utf8", "utf8_czech_ci": "utf8", "utf8_danish_ci": "utf8", "utf8_lithuanian_ci": "utf8", "utf8_slovak_ci": "utf8", "utf8_spanish2_ci": "utf8", "utf8_roman_ci": "utf8", "utf8_persian_ci": "utf8", "utf8_esperanto_ci": "utf8", "utf8_hungarian_ci": "utf8", "utf8_sinhala_ci": "utf8", "utf8_german2_ci": "utf8", "utf8_croatian_ci": "utf8", "utf8_unicode_520_ci": "utf8", "utf8_vietnamese_ci": "utf8", "utf8_general_mysql500_ci": "utf8", "utf8mb4_0900_ai_ci": "utf8mb4", "ucs2_general_ci": "ucs2", "ucs2_bin": "ucs2", "ucs2_unicode_ci": "ucs2", "ucs2_icelandic_ci": "ucs2", "ucs2_latvian_ci": "ucs2", "ucs2_romanian_ci": "ucs2", "ucs2_slovenian_ci": "ucs2", "ucs2_polish_ci": "ucs2", "ucs2_estonian_ci": "ucs2", "ucs2_spanish_ci": "ucs2", "ucs2_swedish_ci": "ucs2", "ucs2_turkish_ci": "ucs2", "ucs2_czech_ci": "ucs2", "ucs2_danish_ci": "ucs2", "ucs2_lithuanian_ci": "ucs2", "ucs2_slovak_ci": "ucs2", "ucs2_spanish2_ci": "ucs2", "ucs2_roman_ci": "ucs2", "ucs2_persian_ci": "ucs2", "ucs2_esperanto_ci": "ucs2", "ucs2_hungarian_ci": "ucs2", "ucs2_sinhala_ci": "ucs2", "ucs2_german2_ci": "ucs2", "ucs2_croatian_ci": "ucs2", "ucs2_unicode_520_ci": "ucs2", "ucs2_vietnamese_ci": "ucs2", "ucs2_general_mysql500_ci": "ucs2", "cp866_general_ci": "cp866", "cp866_bin": "cp866", "keybcs2_general_ci": "keybcs2", "keybcs2_bin": "keybcs2", "macce_general_ci": "macce", "macce_bin": "macce", "macroman_general_ci": "macroman", "macroman_bin": "macroman", "cp852_general_ci": "cp852", "cp852_bin": "cp852", "latin7_estonian_cs": "latin7", "latin7_general_ci": "latin7", "latin7_general_cs": "latin7", "latin7_bin": "latin7", "utf8mb4_general_ci": "utf8mb4", "utf8mb4_bin": "utf8mb4", "utf8mb4_unicode_ci": "utf8mb4", "utf8mb4_icelandic_ci": "utf8mb4", "utf8mb4_latvian_ci": "utf8mb4", "utf8mb4_romanian_ci": "utf8mb4", "utf8mb4_slovenian_ci": "utf8mb4", "utf8mb4_polish_ci": "utf8mb4", "utf8mb4_estonian_ci": "utf8mb4", "utf8mb4_spanish_ci": "utf8mb4", "utf8mb4_swedish_ci": "utf8mb4", "utf8mb4_turkish_ci": "utf8mb4", "utf8mb4_czech_ci": "utf8mb4", "utf8mb4_danish_ci": "utf8mb4", "utf8mb4_lithuanian_ci": "utf8mb4", "utf8mb4_slovak_ci": "utf8mb4", "utf8mb4_spanish2_ci": "utf8mb4", "utf8mb4_roman_ci": "utf8mb4", "utf8mb4_persian_ci": "utf8mb4", "utf8mb4_esperanto_ci": "utf8mb4", "utf8mb4_hungarian_ci": "utf8mb4", "utf8mb4_sinhala_ci": "utf8mb4", "utf8mb4_german2_ci": "utf8mb4", "utf8mb4_croatian_ci": "utf8mb4", "utf8mb4_unicode_520_ci": "utf8mb4", "utf8mb4_vietnamese_ci": "utf8mb4", "cp1251_bulgarian_ci": "cp1251", "cp1251_ukrainian_ci": "cp1251", "cp1251_bin": "cp1251", "cp1251_general_ci": "cp1251", "cp1251_general_cs": "cp1251", "utf16_general_ci": "utf16", "utf16_bin": "utf16", "utf16_unicode_ci": "utf16", "utf16_icelandic_ci": "utf16", "utf16_latvian_ci": "utf16", "utf16_romanian_ci": "utf16", "utf16_slovenian_ci": "utf16", "utf16_polish_ci": "utf16", "utf16_estonian_ci": "utf16", "utf16_spanish_ci": "utf16", "utf16_swedish_ci": "utf16", "utf16_turkish_ci": "utf16", "utf16_czech_ci": "utf16", "utf16_danish_ci": "utf16", "utf16_lithuanian_ci": "utf16", "utf16_slovak_ci": "utf16", "utf16_spanish2_ci": "utf16", "utf16_roman_ci": "utf16", "utf16_persian_ci": "utf16", "utf16_esperanto_ci": "utf16", "utf16_hungarian_ci": "utf16", "utf16_sinhala_ci": "utf16", "utf16_german2_ci": "utf16", "utf16_croatian_ci": "utf16", "utf16_unicode_520_ci": "utf16", "utf16_vietnamese_ci": "utf16", "utf16le_general_ci": "utf16le", "utf16le_bin": "utf16le", "cp1256_general_ci": "cp1256", "cp1256_bin": "cp1256", "cp1257_lithuanian_ci": "cp1257", "cp1257_bin": "cp1257", "cp1257_general_ci": "cp1257", "utf32_general_ci": "utf32", "utf32_bin": "utf32", "utf32_unicode_ci": "utf32", "utf32_icelandic_ci": "utf32", "utf32_latvian_ci": "utf32", "utf32_romanian_ci": "utf32", "utf32_slovenian_ci": "utf32", "utf32_polish_ci": "utf32", "utf32_estonian_ci": "utf32", "utf32_spanish_ci": "utf32", "utf32_swedish_ci": "utf32", "utf32_turkish_ci": "utf32", "utf32_czech_ci": "utf32", "utf32_danish_ci": "utf32", "utf32_lithuanian_ci": "utf32", "utf32_slovak_ci": "utf32", "utf32_spanish2_ci": "utf32", "utf32_roman_ci": "utf32", "utf32_persian_ci": "utf32", "utf32_esperanto_ci": "utf32", "utf32_hungarian_ci": "utf32", "utf32_sinhala_ci": "utf32", "utf32_german2_ci": "utf32", "utf32_croatian_ci": "utf32", "utf32_unicode_520_ci": "utf32", "utf32_vietnamese_ci": "utf32", "binary": "binary", "geostd8_general_ci": "geostd8", "geostd8_bin": "geostd8", "cp932_japanese_ci": "cp932", "cp932_bin": "cp932", "eucjpms_japanese_ci": "eucjpms", "eucjpms_bin": "eucjpms", "gb18030_chinese_ci": "gb18030", "gb18030_bin": "gb18030", "gb18030_unicode_520_ci": "gb18030"} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/mysql/models.py0000644000175000017500000001257100000000000022173 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import netaddr from trove.common import cfg from trove.common.db import models from trove.common.db.mysql import data as mysql_settings from trove.common.i18n import _ CONF = cfg.CONF class MySQLSchema(models.DatastoreSchema): """Represents a MySQL database and its properties.""" # Defaults __charset__ = "utf8" __collation__ = "utf8_general_ci" dbname = re.compile(r"^[A-Za-z0-9_-]+[\s\?\#\@]*[A-Za-z0-9_-]+$") # Complete list of acceptable values collation = mysql_settings.collation charset = mysql_settings.charset def __init__(self, name=None, collate=None, character_set=None, deserializing=False): super(MySQLSchema, self).__init__(name=name, deserializing=deserializing) if not deserializing: if collate: self.collate = collate if character_set: self.character_set = character_set @property def _max_schema_name_length(self): return 64 def _is_valid_schema_name(self, value): # must match the dbname regex, and # cannot contain a '\' character. return not any([ not self.dbname.match(value), ("%r" % value).find("\\") != -1 ]) @property def collate(self): """Get the appropriate collate value.""" if not self._collate and not self._character_set: return self.__collation__ elif not self._collate: return self.charset[self._character_set][0] else: return self._collate @collate.setter def collate(self, value): """Validate the collation and set it.""" if not value: pass elif self._character_set: if value not in self.charset[self._character_set]: msg = (_("%(val)s not a valid collation for charset %(char)s.") % {'val': value, 'char': self._character_set}) raise ValueError(msg) self._collate = value else: if value not in self.collation: raise ValueError(_("'%s' not a valid collation.") % value) self._collate = value self._character_set = self.collation[value] @property def character_set(self): """Get the appropriate character set value.""" if not self._character_set: return self.__charset__ else: return self._character_set @character_set.setter def character_set(self, value): """Validate the character set and set it.""" if not value: pass elif value not in self.charset: raise ValueError(_("'%s' not a valid character set.") % value) else: self._character_set = value def verify_dict(self): # Also check the collate and character_set values if set, initialize # them if not. super(MySQLSchema, self).verify_dict() if self.__dict__.get('_collate'): self.collate = self._collate else: self._collate = None if self.__dict__.get('_character_set'): self.character_set = self._character_set else: self._character_set = None class MySQLUser(models.DatastoreUser): """Represents a MySQL User and its associated properties.""" not_supported_chars = re.compile(r"""^\s|\s$|'|"|;|`|,|/|\\""") def _is_valid_string(self, value): if (not value or self.not_supported_chars.search(value) or ("%r" % value).find("\\") != -1): return False else: return True def _is_valid_user_name(self, value): return self._is_valid_string(value) def _is_valid_password(self, value): return self._is_valid_string(value) def _is_valid_host_name(self, value): if value in [None, "%"]: # % is MySQL shorthand for "everywhere". Always permitted. # Null host defaults to % anyway. return True if CONF.hostname_require_valid_ip: try: # '%' works as a MySQL wildcard, but it is not a valid # part of an IPNetwork netaddr.IPNetwork(value.replace('%', '1')) except (ValueError, netaddr.AddrFormatError): return False else: return True else: # If it wasn't required, anything else goes. return True def _build_database_schema(self, name): return MySQLSchema(name) def deserialize_schema(self, value): return MySQLSchema.deserialize(value) @property def _max_user_name_length(self): return 16 @property def schema_model(self): return MySQLSchema ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.73611 trove-12.1.0.dev92/trove/common/db/postgresql/0000755000175000017500000000000000000000000021366 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/postgresql/__init__.py0000644000175000017500000000000000000000000023465 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/postgresql/models.py0000644000175000017500000000371400000000000023230 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from six import u from trove.common.db import models class PostgreSQLSchema(models.DatastoreSchema): """Represents a PostgreSQL schema and its associated properties.""" name_regex = re.compile(u(r'^[\u0001-\u007F\u0080-\uFFFF]+[^\s]$')) def __init__(self, name=None, collate=None, character_set=None, deserializing=False): super(PostgreSQLSchema, self).__init__(name=name, deserializing=deserializing) self.collate = collate self.character_set = character_set @property def collate(self): return self._collate @collate.setter def collate(self, value): self._collate = value @property def character_set(self): return self._character_set @character_set.setter def character_set(self, value): self._character_set = value @property def _max_schema_name_length(self): return 63 def _is_valid_schema_name(self, value): return self.name_regex.match(value) is not None class PostgreSQLUser(models.DatastoreUser): """Represents a PostgreSQL user and its associated properties.""" root_username = 'postgres' @property def _max_user_name_length(self): return 63 @property def schema_model(self): return PostgreSQLSchema ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/db/redis/0000755000175000017500000000000000000000000020271 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/redis/__init__.py0000644000175000017500000000000000000000000022370 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/db/redis/models.py0000644000175000017500000000160500000000000022130 0ustar00coreycorey00000000000000# Copyright 2017 Eayun, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common.db import models class RedisRootUser(models.DatastoreModelsBase): def verify_dict(self): pass def __init__(self, password=None): self._name = '-' self._password = password super(RedisRootUser, self).__init__() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/debug_utils.py0000644000175000017500000001263500000000000021465 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # """Help utilities for debugging""" import sys from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) CONF = cfg.CONF __debug_state = None pydev_debug_opts = [ cfg.StrOpt("pydev_debug", choices=("disabled", "enabled", "auto"), default="disabled", help="Enable or disable pydev remote debugging. " "If value is 'auto' tries to connect to remote " "debugger server, but in case of error " "continues running with debugging disabled."), cfg.StrOpt("pydev_debug_host", help="Pydev debug server host (localhost by default)."), cfg.PortOpt("pydev_debug_port", default=5678, help="Pydev debug server port (5678 by default)."), cfg.StrOpt("pydev_path", help="Set path to pydevd library, used if pydevd is " "not found in python sys.path.") ] CONF.register_opts(pydev_debug_opts) def setup(): """ Analyze configuration for pydev remote debugging and establish connection to remote debugger service if needed @return: True if remote debugging was enabled successfully, otherwise - False """ global __debug_state if CONF.pydev_debug == "enabled": __debug_state = __setup_remote_pydev_debug( pydev_debug_host=CONF.pydev_debug_host, pydev_debug_port=CONF.pydev_debug_port, pydev_path=CONF.pydev_path) elif CONF.pydev_debug == "auto": __debug_state = __setup_remote_pydev_debug_safe( pydev_debug_host=CONF.pydev_debug_host, pydev_debug_port=CONF.pydev_debug_port, pydev_path=CONF.pydev_path) else: __debug_state = False def enabled(): """ @return: True if connection to remote debugger established, otherwise False """ assert __debug_state is not None, ("debug_utils are not initialized. " "Please call setup() method first") # if __debug_state is set and we have monkey patched # eventlet.thread, issue a warning. # You can't safely use eventlet.is_monkey_patched() on the # threading module so you have to do this little dance. # Discovered after much head scratching, see also # # http://stackoverflow.com/questions/32452110/ # does-eventlet-do-monkey-patch-for-threading-module # # note multi-line URL if __debug_state: import threading if threading.current_thread.__module__ == 'eventlet.green.threading': LOG.warning("Enabling debugging with eventlet monkey" " patched produce unexpected behavior.") return __debug_state def __setup_remote_pydev_debug_safe(pydev_debug_host=None, pydev_debug_port=5678, pydev_path=None): """ Safe version of __setup_remote_pydev_debug method. In error case returns False as result instead of Exception raising @see: __setup_remote_pydev_debug """ try: return __setup_remote_pydev_debug( pydev_debug_host=pydev_debug_host, pydev_debug_port=pydev_debug_port, pydev_path=pydev_path) except Exception as e: LOG.warning("Can't connect to remote debug server." " Continuing to work in standard mode." " Error: %s.", e) return False def __setup_remote_pydev_debug(pydev_debug_host=None, pydev_debug_port=None, pydev_path=None): """ Method connects to remote debug server, and attach current thread trace to debugger. Also thread.start_new_thread thread.start_new are patched to enable debugging of new threads @param pydev_debug_host: remote debug server host hame, 'localhost' if not specified or None @param pydev_debug_port: remote debug server port, 5678 if not specified or None @param pydev_path: optional path to pydevd library, used it pydevd is not found in python sys.path @return: True if debugging initialized, otherwise exception should be raised """ try: import pydevd LOG.debug("pydevd module was imported from system path") except ImportError: LOG.debug("Can't load pydevd module from system path. Try loading it " "from pydev_path: %s", pydev_path) assert pydev_path, "pydev_path is not set" if pydev_path not in sys.path: sys.path.append(pydev_path) import pydevd LOG.debug("pydevd module was imported from pydev_path: %s", pydev_path) pydevd.settrace( host=pydev_debug_host, port=pydev_debug_port, stdoutToServer=True, stderrToServer=True, trace_only_current_thread=False, suspend=False, ) return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/common/exception.py0000644000175000017500000004536300000000000021161 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """I totally stole most of this from melange, thx guys!!!""" import re from oslo_concurrency import processutils from oslo_log import log as logging from trove.common import base_exception as openstack_exception from trove.common.i18n import _ ClientConnectionError = openstack_exception.ClientConnectionError ProcessExecutionError = processutils.ProcessExecutionError DatabaseMigrationError = openstack_exception.DatabaseMigrationError LOG = logging.getLogger(__name__) wrap_exception = openstack_exception.wrap_exception def safe_fmt_string(text): return re.sub(r'%([0-9]+)', r'\1', text) class TroveError(openstack_exception.OpenstackException): """Base exception that all custom trove app exceptions inherit from.""" internal_message = None def __init__(self, message=None, **kwargs): if message is not None: self.message = message if self.internal_message is not None: try: LOG.error(safe_fmt_string(self.internal_message), kwargs) except Exception: LOG.error(self.internal_message) self.message = safe_fmt_string(self.message) super(TroveError, self).__init__(**kwargs) class DBConstraintError(TroveError): message = _("Failed to save %(model_name)s because: %(error)s.") class InvalidRPCConnectionReuse(TroveError): message = _("Invalid RPC Connection Reuse.") class NotFound(TroveError): message = _("Resource %(uuid)s cannot be found.") class BadRequest(TroveError): message = _("The server could not comply with the request since it is " "either malformed or otherwise incorrect.") class CapabilityNotFound(NotFound): message = _("Capability '%(capability)s' cannot be found.") class CapabilityDisabled(TroveError): message = _("Capability '%(capability)s' is disabled.") class FlavorNotFound(TroveError): message = _("Resource %(uuid)s cannot be found.") class UserNotFound(NotFound): message = _("User %(uuid)s cannot be found on the instance.") class RootHistoryNotFound(NotFound): message = _("Root user has never been enabled on the instance.") class DatabaseNotFound(NotFound): message = _("Database %(uuid)s cannot be found on the instance.") class ComputeInstanceNotFound(NotFound): # internal_message is used for log, stop translating. internal_message = ("Cannot find compute instance %(server_id)s for " "instance %(instance_id)s.") message = _("Resource %(instance_id)s can not be retrieved.") class DnsRecordNotFound(NotFound): message = _("DnsRecord with name= %(name)s not found.") class DatastoreNotFound(NotFound): message = _("Datastore '%(datastore)s' cannot be found.") class DatastoreVersionNotFound(NotFound): message = _("Datastore version '%(version)s' cannot be found.") class DatastoresNotFound(NotFound): message = _("Datastores cannot be found.") class DatastoreFlavorAssociationNotFound(NotFound): message = _("Flavor %(id)s is not supported for datastore " "%(datastore)s version %(datastore_version)s") class DatastoreFlavorAssociationAlreadyExists(TroveError): message = _("Flavor %(id)s is already associated with " "datastore %(datastore)s version %(datastore_version)s") class DatastoreVolumeTypeAssociationNotFound(NotFound): message = _("The volume type %(id)s is not valid for datastore " "%(datastore)s and version %(version_id)s.") class DatastoreVolumeTypeAssociationAlreadyExists(TroveError): message = _("Datastore '%(datastore)s' version %(datastore_version)s " "and volume-type %(id)s mapping already exists.") class DataStoreVersionVolumeTypeRequired(TroveError): message = _("Only specific volume types are allowed for a " "datastore %(datastore)s version %(datastore_version)s. " "You must specify a valid volume type.") class DatastoreVersionNoVolumeTypes(TroveError): message = _("No valid volume types could be found for datastore " "%(datastore)s and version %(datastore_version)s.") class DatastoreNoVersion(TroveError): message = _("Datastore '%(datastore)s' has no version '%(version)s'.") class DatastoreVersionInactive(TroveError): message = _("Datastore version '%(version)s' is not active.") class DatastoreVersionAlreadyExists(BadRequest): message = _("A datastore version with the name '%(name)s' already exists.") class DatastoreVersionsExist(BadRequest): message = _("Datastore versions exist for datastore %(datastore)s.") class DatastoreDefaultDatastoreNotFound(TroveError): message = _("Please specify datastore. Default datastore " "'%(datastore)s' cannot be found.") class DatastoreDefaultDatastoreNotDefined(TroveError): message = _("Please specify datastore. No default datastore " "is defined.") class DatastoreDefaultVersionNotFound(TroveError): message = _("Default version for datastore '%(datastore)s' not found.") class InvalidDatastoreManager(TroveError): message = _("Datastore manager %(datastore_manager)s cannot be found.") class DatastoreOperationNotSupported(TroveError): message = _("The '%(operation)s' operation is not supported for " "the '%(datastore)s' datastore.") class NoUniqueMatch(TroveError): message = _("Multiple matches found for '%(name)s', " "use an UUID to be more specific.") class OverLimit(TroveError): # internal_message is used for log, stop translating. internal_message = ("The server rejected the request due to its size or " "rate.") class QuotaLimitTooSmall(TroveError): message = _("Quota limit '%(limit)s' for '%(resource)s' is too small" " - must be at least '-1'.") class QuotaExceeded(TroveError): message = _("Quota exceeded for resources: %(overs)s.") class VolumeQuotaExceeded(QuotaExceeded): message = _("Instance volume quota exceeded.") class GuestError(TroveError): message = _("An error occurred communicating with the guest: " "%(original_message)s.") class GuestTimeout(TroveError): message = _("Timeout trying to connect to the Guest Agent.") class MissingKey(BadRequest): message = _("Required element/key - %(key)s was not specified.") class DatabaseAlreadyExists(BadRequest): message = _('A database with the name "%(name)s" already exists.') class UserAlreadyExists(BadRequest): message = _('A user with the name "%(name)s" already exists.') class InstanceAssignedToConfiguration(BadRequest): message = _('A configuration group cannot be deleted if it is ' 'associated with one or more non-terminated instances. ' 'Detach the configuration group from all non-terminated ' 'instances and please try again.') class UnprocessableEntity(TroveError): message = _("Unable to process the contained request.") class ConfigurationNotSupported(UnprocessableEntity): message = _("Configuration groups not supported by the datastore.") class CannotResizeToSameSize(TroveError): message = _("No change was requested in the size of the instance.") class VolumeAttachmentsNotFound(NotFound): message = _("Cannot find the volumes attached to compute " "instance %(server_id)s.") class VolumeCreationFailure(TroveError): message = _("Failed to create a volume in Nova.") class VolumeSizeNotSpecified(BadRequest): message = _("Volume size was not specified.") class LocalStorageNotSpecified(BadRequest): message = _("Local storage not specified in flavor ID: %(flavor)s.") class LocalStorageNotSupported(TroveError): message = _("Local storage support is not enabled.") class VolumeNotSupported(TroveError): message = _("Volume support is not enabled.") class ReplicationNotSupported(TroveError): message = _("Replication is not supported for " "the '%(datastore)s' datastore.") class ReplicationSlaveAttachError(TroveError): message = _("Exception encountered attaching slave to new replica source.") class TaskManagerError(TroveError): message = _("An error occurred communicating with the task manager: " "%(original_message)s.") class BadValue(TroveError): message = _("Value could not be converted: %(msg)s.") class PollTimeOut(TroveError): message = _("Polling request timed out.") class Forbidden(TroveError): message = _("User does not have admin privileges.") class PolicyNotAuthorized(Forbidden): message = _("Policy doesn't allow %(action)s to be performed.") class InvalidModelError(TroveError): message = _("The following values are invalid: %(errors)s.") class ModelNotFoundError(NotFound): message = _("Not Found.") class UpdateGuestError(TroveError): message = _("Failed to update instances.") class ConfigNotFound(NotFound): message = _("Config file not found.") class PasteAppNotFound(NotFound): message = _("Paste app not found.") class QuotaNotFound(NotFound): message = _("Quota could not be found.") class TenantQuotaNotFound(QuotaNotFound): message = _("Quota for tenant %(tenant_id)s could not be found.") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class BackupUploadError(TroveError): message = _("Unable to upload Backup to swift.") class BackupDownloadError(TroveError): message = _("Unable to download Backup from swift") class BackupCreationError(TroveError): message = _("Unable to create Backup.") class BackupUpdateError(TroveError): message = _("Unable to update Backup table in database.") class SecurityGroupDeletionError(TroveError): message = _("Failed to delete Security Group.") class SecurityGroupRuleDeletionError(TroveError): message = _("Failed to delete Security Group Rule.") class BackupNotCompleteError(TroveError): message = _("Unable to create instance because backup %(backup_id)s is " "not completed. Actual state: %(state)s.") class BackupFileNotFound(NotFound): message = _("Backup file in %(location)s was not found in the object " "storage.") class BackupDatastoreMismatchError(TroveError): message = _("The datastore from which the backup was taken, " "%(datastore1)s, does not match the destination" " datastore of %(datastore2)s.") class ReplicaCreateWithUsersDatabasesError(TroveError): message = _("Cannot create a replica with users or databases.") class SwiftAuthError(TroveError): message = _("Swift account not accessible for tenant %(tenant_id)s.") class SwiftNotFound(TroveError): message = _("Swift is disabled for tenant %(tenant_id)s.") class SwiftConnectionError(TroveError): message = _("Cannot connect to Swift.") class DatabaseForUserNotInDatabaseListError(TroveError): message = _("The request indicates that user %(user)s should have access " "to database %(database)s, but database %(database)s is not " "included in the initial databases list.") class DatabaseInitialDatabaseDuplicateError(TroveError): message = _("Two or more databases share the same name in the initial " "databases list. Please correct the names or remove the " "duplicate entries.") class DatabaseInitialUserDuplicateError(TroveError): message = _("Two or more users share the same name and host in the " "initial users list. Please correct the names or remove the " "duplicate entries.") class RestoreBackupIntegrityError(TroveError): message = _("Current Swift object checksum does not match original " "checksum for backup %(backup_id)s.") class ConfigKeyNotFound(NotFound): message = _("%(key)s is not a supported configuration parameter.") class NoConfigParserFound(NotFound): message = _("No configuration parser found for datastore " "%(datastore_manager)s.") class ConfigurationDatastoreNotMatchInstance(TroveError): message = _("Datastore Version on Configuration " "%(config_datastore_version)s does not " "match the Datastore Version on the instance " "%(instance_datastore_version)s.") class ConfigurationParameterDeleted(TroveError): message = _("%(parameter_name)s parameter can no longer be " "set as of %(parameter_deleted_at)s.") class ConfigurationParameterAlreadyExists(TroveError): message = _("%(parameter_name)s parameter already exists " "for datastore version %(datastore_version)s.") class ConfigurationAlreadyAttached(TroveError): message = _("Instance %(instance_id)s already has a " "Configuration Group attached: %(configuration_id)s.") class InvalidInstanceState(TroveError): message = _("The operation you have requested cannot be executed because " "the instance status is currently: %(status)s.") class NoServiceEndpoint(TroveError): """Could not find requested endpoint in Service Catalog.""" message = _("Endpoint not found for service_type=%(service_type)s, " "endpoint_type=%(endpoint_type)s, " "endpoint_region=%(endpoint_region)s.") class EmptyCatalog(NoServiceEndpoint): """The service catalog is empty.""" message = _("Empty catalog.") class IncompatibleReplicationStrategy(TroveError): message = _("Instance with replication strategy %(guest_strategy)s " "cannot replicate from instance with replication strategy " "%(replication_strategy)s.") class InsufficientSpaceForReplica(TroveError): message = _("The target instance has only %(slave_volume_size)sG free, " "but the replication snapshot contains %(dataset_size)sG " "of data.") class InsufficientSpaceForBackup(TroveError): message = _("The instance has only %(free)sG free while the estimated " "backup size is %(backup_size)sG.") class ReplicaSourceDeleteForbidden(Forbidden): message = _("The replica source cannot be deleted without detaching the " "replicas.") class ModuleTypeNotFound(NotFound): message = _("Module type '%(module_type)s' was not found.") class ModuleAppliedToInstance(BadRequest): message = _("A module cannot be deleted or its contents modified if it " "has been applied to a non-terminated instance, unless the " "module has been marked as 'live_update.' " "Please remove the module from all non-terminated " "instances and try again.") class ModuleAlreadyExists(BadRequest): message = _("A module with the name '%(name)s' already exists for " "datastore '%(datastore)s' and datastore version " "'%(ds_version)s'") class ModuleAccessForbidden(Forbidden): message = _("You must be admin to %(action)s a module with these " "options. %(options)s") class ModuleInvalid(Forbidden): message = _("The module is invalid: %(reason)s") class InstanceNotFound(NotFound): message = _("Instance '%(instance)s' cannot be found.") class ClusterNotFound(NotFound): message = _("Cluster '%(cluster)s' cannot be found.") class ClusterFlavorsNotEqual(TroveError): message = _("The flavor for each instance in a cluster must be the same.") class ClusterNetworksNotEqual(TroveError): message = _("The network for each instance in a cluster must be the same.") class NetworkNotFound(TroveError): message = _("Network Resource %(uuid)s cannot be found.") class PublicNetworkNotFound(TroveError): message = _("Public network cannot be found.") class NetworkConflict(BadRequest): message = _("User network conflicts with the management network.") class ClusterVolumeSizeRequired(TroveError): message = _("A volume size is required for each instance in the cluster.") class ClusterVolumeSizesNotEqual(TroveError): message = _("The volume size for each instance in a cluster must be " "the same.") class ClusterNumInstancesNotSupported(TroveError): message = _("The number of instances for your initial cluster must " "be %(num_instances)s.") class ClusterNumInstancesNotLargeEnough(TroveError): message = _("The number of instances for your initial cluster must " "be at least %(num_instances)s.") class ClusterNumInstancesBelowSafetyThreshold(TroveError): message = _("The number of instances in your cluster cannot " "safely be lowered below the current level based " "on your current fault-tolerance settings.") class ClusterShrinkMustNotLeaveClusterEmpty(TroveError): message = _("Must leave at least one instance in the cluster when " "shrinking.") class ClusterShrinkInstanceInUse(TroveError): message = _("Instance(s) %(id)s currently in use and cannot be deleted. " "Details: %(reason)s") class ClusterInstanceOperationNotSupported(TroveError): message = _("Operation not supported for instances that are part of a " "cluster.") class ClusterOperationNotSupported(TroveError): message = _("The '%(operation)s' operation is not supported for cluster.") class TroveOperationAuthError(TroveError): message = _("Operation not allowed for tenant %(tenant_id)s.") class ClusterDatastoreNotSupported(TroveError): message = _("Clusters not supported for " "%(datastore)s-%(datastore_version)s.") class BackupTooLarge(TroveError): message = _("Backup is too large for given flavor or volume. " "Backup size: %(backup_size)s GBs. " "Available size: %(disk_size)s GBs.") class ImageNotFound(NotFound): message = _("Image %(uuid)s cannot be found.") class LogAccessForbidden(Forbidden): message = _("You must be admin to %(action)s log '%(log)s'.") class LogsNotAvailable(Forbidden): message = _("Log actions are not supported.") class SlaveOperationNotSupported(TroveError): message = _("The '%(operation)s' operation is not supported for slaves in " "replication.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/extensions.py0000644000175000017500000005042500000000000021355 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from lxml import etree from oslo_log import log as logging from oslo_utils import encodeutils import routes import six import stevedore import webob.dec import webob.exc from trove.common import base_exception as exception from trove.common import base_wsgi from trove.common.i18n import _ from trove.common import wsgi LOG = logging.getLogger(__name__) DEFAULT_XMLNS = "http://docs.openstack.org/trove" XMLNS_ATOM = "http://www.w3.org/2005/Atom" @six.add_metaclass(abc.ABCMeta) class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ @abc.abstractmethod def get_name(self): """The name of the extension. e.g. 'Fox In Socks' """ pass @abc.abstractmethod def get_alias(self): """The alias for the extension. e.g. 'FOXNSOX' """ pass @abc.abstractmethod def get_description(self): """Friendly description for the extension. e.g. 'The Fox In Socks Extension' """ pass @abc.abstractmethod def get_namespace(self): """The XML namespace for the extension. e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0' """ pass @abc.abstractmethod def get_updated(self): """The timestamp when the extension was last updated. e.g. '2011-01-22T13:25:27-06:00' """ pass def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_actions(self): """List of extensions.ActionExtension extension objects. Actions are verbs callable from the API. """ actions = [] return actions def get_request_extensions(self): """List of extensions.RequestException extension objects. Request extensions are used to handle custom request data. """ request_exts = [] return request_exts class ActionExtensionController(object): def __init__(self, application): self.application = application self.action_handlers = {} def add_action(self, action_name, handler): self.action_handlers[action_name] = handler def action(self, req, id, body): for action_name, handler in self.action_handlers.items(): if action_name in body: return handler(body, req, id) # no action handler found (bump to downstream application) res = self.application return res class ActionExtensionResource(wsgi.Resource): def __init__(self, application): controller = ActionExtensionController(application) wsgi.Resource.__init__(self, controller) def add_action(self, action_name, handler): self.controller.add_action(action_name, handler) class RequestExtensionController(object): def __init__(self, application): self.application = application self.handlers = [] def add_handler(self, handler): self.handlers.append(handler) def process(self, req, *args, **kwargs): res = req.get_response(self.application) # currently request handlers are un-ordered for handler in self.handlers: res = handler(req, res) return res class RequestExtensionResource(wsgi.Resource): def __init__(self, application): controller = RequestExtensionController(application) wsgi.Resource.__init__(self, controller) def add_handler(self, handler): self.controller.add_handler(handler) class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager body_serializers = {'application/xml': ExtensionsXMLSerializer()} serializer = base_wsgi.ResponseSerializer( body_serializers=body_serializers) super(ExtensionsResource, self).__init__(self, None, serializer) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.get_name() ext_data['alias'] = ext.get_alias() ext_data['description'] = ext.get_description() ext_data['namespace'] = ext.get_namespace() ext_data['updated'] = ext.get_updated() ext_data['links'] = [] return ext_data def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.items(): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, req, id): # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions.get(id, None) if not ext: raise webob.exc.HTTPNotFound( _("Extension with alias %s does not exist") % id) return dict(extension=self._translate(ext)) def delete(self, req, id): raise webob.exc.HTTPNotFound() def create(self, req): raise webob.exc.HTTPNotFound() class ExtensionMiddleware(wsgi.Middleware): """Extensions middleware for WSGI.""" @classmethod def factory(cls, global_config, **local_config): """Paste factory.""" def _factory(app): return cls(app, global_config, **local_config) return _factory def _action_ext_resources(self, application, ext_mgr, mapper): """Return a dict of ActionExtensionResource-s by collection.""" action_resources = {} for action in ext_mgr.get_actions(): if action.collection not in action_resources.keys(): resource = ActionExtensionResource(application) mapper.connect("/%s/:(id)/action.:(format)" % action.collection, action='action', controller=resource, conditions=dict(method=['POST'])) mapper.connect("/%s/:(id)/action" % action.collection, action='action', controller=resource, conditions=dict(method=['POST'])) action_resources[action.collection] = resource return action_resources def _request_ext_resources(self, application, ext_mgr, mapper): """Returns a dict of RequestExtensionResource-s by collection.""" request_ext_resources = {} for req_ext in ext_mgr.get_request_extensions(): if req_ext.key not in request_ext_resources.keys(): resource = RequestExtensionResource(application) mapper.connect(req_ext.url_route + '.:(format)', action='process', controller=resource, conditions=req_ext.conditions) mapper.connect(req_ext.url_route, action='process', controller=resource, conditions=req_ext.conditions) request_ext_resources[req_ext.key] = resource return request_ext_resources def __init__(self, application, config, ext_mgr=None): ext_mgr = (ext_mgr or ExtensionManager()) mapper = routes.Mapper() # extended resources for resource_ext in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource_ext.collection) controller_resource = wsgi.Resource(resource_ext.controller, resource_ext.deserializer, resource_ext.serializer) self._map_custom_collection_actions(resource_ext, mapper, controller_resource) kargs = dict(controller=controller_resource, collection=resource_ext.collection_actions, member=resource_ext.member_actions) if resource_ext.parent: kargs['parent_resource'] = resource_ext.parent mapper.resource(resource_ext.collection, resource_ext.collection, **kargs) # extended actions action_resources = self._action_ext_resources(application, ext_mgr, mapper) for action in ext_mgr.get_actions(): LOG.debug('Extended action: %s', action.action_name) resource = action_resources[action.collection] resource.add_action(action.action_name, action.handler) # extended requests req_controllers = self._request_ext_resources(application, ext_mgr, mapper) for request_ext in ext_mgr.get_request_extensions(): LOG.debug('Extended request: %s', request_ext.key) controller = req_controllers[request_ext.key] controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) super(ExtensionMiddleware, self).__init__(application) def _map_custom_collection_actions(self, resource_ext, mapper, controller_resource): for action, method in resource_ext.collection_actions.items(): parent = resource_ext.parent conditions = dict(method=[method]) path = "/%s/%s" % (resource_ext.collection, action) path_prefix = "" if parent: path_prefix = "/%s/{%s_id}" % (parent["collection_name"], parent["member_name"]) with mapper.submapper(controller=controller_resource, action=action, path_prefix=path_prefix, conditions=conditions) as submap: submap.connect(path_prefix + path, path) submap.connect(path_prefix + path + "_format", "%s.:(format)" % path) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Route the incoming request with router.""" req.environ['extended.app'] = self.application return self._router @staticmethod @webob.dec.wsgify(RequestClass=wsgi.Request) def _dispatch(req): """Dispatch the request. Returns the routed WSGI app's response or defers to the extended application. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return req.environ['extended.app'] app = match['controller'] return app class ExtensionManager(object): EXT_NAMESPACE = 'trove.api.extensions' def __init__(self): LOG.debug('Initializing extension manager.') self.extensions = {} self._load_all_extensions() def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] extension_resource = ExtensionsResource(self) res_ext = ResourceExtension('extensions', extension_resource, serializer=extension_resource.serializer) resources.append(res_ext) for alias, ext in self.extensions.items(): try: resources.extend(ext.get_resources()) except AttributeError: pass return resources def get_actions(self): """Returns a list of ActionExtension objects.""" actions = [] for alias, ext in self.extensions.items(): try: actions.extend(ext.get_actions()) except AttributeError: pass return actions def get_request_extensions(self): """Returns a list of RequestExtension objects.""" request_exts = [] for alias, ext in self.extensions.items(): try: request_exts.extend(ext.get_request_extensions()) except AttributeError: pass return request_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.get_name()) LOG.debug('Ext alias: %s', extension.get_alias()) LOG.debug('Ext description: %s', extension.get_description()) LOG.debug('Ext namespace: %s', extension.get_namespace()) LOG.debug('Ext updated: %s', extension.get_updated()) except AttributeError as ex: LOG.exception("Exception loading extension: %s", encodeutils.exception_to_unicode(ex)) return False return True def _check_load_extension(self, ext): LOG.debug('Ext: %s', ext.obj) return isinstance(ext.obj, ExtensionDescriptor) def _load_all_extensions(self): self.api_extension_manager = stevedore.enabled.EnabledExtensionManager( namespace=self.EXT_NAMESPACE, check_func=self._check_load_extension, invoke_on_load=True, invoke_kwds={}) self.api_extension_manager.map(self.add_extension) def add_extension(self, ext): ext = ext.obj # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.get_alias() LOG.debug('Loaded extension: %s', alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext class RequestExtension(object): def __init__(self, method, url_route, handler): self.url_route = url_route self.handler = handler self.conditions = dict(method=[method]) self.key = "%s-%s" % (method, url_route) class ActionExtension(object): def __init__(self, collection, action_name, handler): self.collection = collection self.action_name = action_name self.handler = handler class BaseResourceExtension(object): def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, deserializer=None, serializer=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.deserializer = deserializer self.serializer = serializer class ExtensionsXMLSerializer(base_wsgi.XMLDictSerializer): def __init__(self): self.nsmap = {None: DEFAULT_XMLNS, 'atom': XMLNS_ATOM} def show(self, ext_dict): ext = etree.Element('extension', nsmap=self.nsmap) self._populate_ext(ext, ext_dict['extension']) return self._to_xml(ext) def index(self, exts_dict): exts = etree.Element('extensions', nsmap=self.nsmap) for ext_dict in exts_dict['extensions']: ext = etree.SubElement(exts, 'extension') self._populate_ext(ext, ext_dict) return self._to_xml(exts) def _populate_ext(self, ext_elem, ext_dict): """Populate an extension xml element from a dict.""" ext_elem.set('name', ext_dict['name']) ext_elem.set('namespace', ext_dict['namespace']) ext_elem.set('alias', ext_dict['alias']) ext_elem.set('updated', ext_dict['updated']) desc = etree.Element('description') desc.text = ext_dict['description'] ext_elem.append(desc) for link in ext_dict.get('links', []): elem = etree.SubElement(ext_elem, '{%s}link' % XMLNS_ATOM) elem.set('rel', link['rel']) elem.set('href', link['href']) elem.set('type', link['type']) return ext_elem def _to_xml(self, root): """Convert the xml object to an xml string.""" return etree.tostring(root, encoding='UTF-8') class ResourceExtension(BaseResourceExtension): def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, deserializer=None, serializer=None): super(ResourceExtension, self).__init__( collection, controller, parent=parent, collection_actions=collection_actions, member_actions=member_actions, deserializer=wsgi.RequestDeserializer(), serializer=wsgi.TroveResponseSerializer()) class TroveExtensionMiddleware(ExtensionMiddleware): def __init__(self, application, ext_mgr=None): ext_mgr = (ext_mgr or ExtensionManager()) mapper = routes.Mapper() # extended resources for resource_ext in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource_ext.collection) # The only difference here is that we are using our common # wsgi.Resource instead of the openstack common wsgi.Resource exception_map = None if hasattr(resource_ext.controller, 'exception_map'): exception_map = resource_ext.controller.exception_map controller_resource = wsgi.Resource(resource_ext.controller, resource_ext.deserializer, resource_ext.serializer, exception_map) self._map_custom_collection_actions(resource_ext, mapper, controller_resource) kargs = dict(controller=controller_resource, collection=resource_ext.collection_actions, member=resource_ext.member_actions) if resource_ext.parent: kargs['parent_resource'] = resource_ext.parent mapper.resource(resource_ext.collection, resource_ext.collection, **kargs) mapper.connect(("/%s/{id}" % resource_ext.collection), controller=controller_resource, action='edit', conditions={'method': ['PATCH']}) # extended actions action_resources = self._action_ext_resources(application, ext_mgr, mapper) for action in ext_mgr.get_actions(): LOG.debug('Extended action: %s', action.action_name) resource = action_resources[action.collection] resource.add_action(action.action_name, action.handler) # extended requests req_controllers = self._request_ext_resources(application, ext_mgr, mapper) for request_ext in ext_mgr.get_request_extensions(): LOG.debug('Extended request: %s', request_ext.key) controller = req_controllers[request_ext.key] controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) super(ExtensionMiddleware, self).__init__(application) def factory(global_config, **local_config): """Paste factory.""" def _factory(app): ext_mgr = ExtensionManager() return TroveExtensionMiddleware(app, ext_mgr) return _factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/i18n.py0000644000175000017500000000220500000000000017726 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html """ import oslo_i18n # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the # application name when this module is synced into the separate # repository. It is OK to have more than one translation function # using the same domain, since there will still only be one message # catalog. _translators = oslo_i18n.TranslatorFactory(domain='trove') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/instance.py0000644000175000017500000000752000000000000020760 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ServiceStatus(object): """Represents the status of the app and in some rare cases the agent. Code and description are what is stored in the database. "api_status" refers to the status which comes back from the REST API. """ _lookup = {} def __init__(self, code, description, api_status): self._code = code self._description = description self._api_status = api_status ServiceStatus._lookup[code] = self @property def action_is_allowed(self): allowed_statuses = [ ServiceStatuses.RUNNING._code, ServiceStatuses.SHUTDOWN._code, ServiceStatuses.CRASHED._code, ServiceStatuses.BLOCKED._code, ServiceStatuses.HEALTHY._code, ] return self._code in allowed_statuses @property def api_status(self): return self._api_status @property def code(self): return self._code @property def description(self): return self._description def __eq__(self, other): if not isinstance(other, ServiceStatus): return False return self.code == other.code @staticmethod def from_code(code): if code not in ServiceStatus._lookup: msg = 'Status code %s is not a valid ServiceStatus integer code.' raise ValueError(msg % code) return ServiceStatus._lookup[code] @staticmethod def from_description(desc): all_items = ServiceStatus._lookup.items() status_codes = [code for (code, status) in all_items if status.description == desc] if not status_codes: msg = 'Status description %s is not a valid ServiceStatus.' raise ValueError(msg % desc) return ServiceStatus._lookup[status_codes[0]] @staticmethod def is_valid_code(code): return code in ServiceStatus._lookup def __str__(self): return self._description def __repr__(self): return self._api_status class ServiceStatuses(object): RUNNING = ServiceStatus(0x01, 'running', 'ACTIVE') BLOCKED = ServiceStatus(0x02, 'blocked', 'BLOCKED') PAUSED = ServiceStatus(0x03, 'paused', 'SHUTDOWN') SHUTDOWN = ServiceStatus(0x04, 'shutdown', 'SHUTDOWN') CRASHED = ServiceStatus(0x06, 'crashed', 'SHUTDOWN') FAILED = ServiceStatus(0x08, 'failed to spawn', 'FAILED') BUILDING = ServiceStatus(0x09, 'building', 'BUILD') PROMOTING = ServiceStatus(0x10, 'promoting replica', 'PROMOTE') EJECTING = ServiceStatus(0x11, 'ejecting replica source', 'EJECT') LOGGING = ServiceStatus(0x12, 'transferring guest logs', 'LOGGING') UNKNOWN = ServiceStatus(0x16, 'unknown', 'ERROR') NEW = ServiceStatus(0x17, 'new', 'NEW') DELETED = ServiceStatus(0x05, 'deleted', 'DELETED') FAILED_TIMEOUT_GUESTAGENT = ServiceStatus(0x18, 'guestagent error', 'ERROR') INSTANCE_READY = ServiceStatus(0x19, 'instance ready', 'BUILD') RESTART_REQUIRED = ServiceStatus(0x20, 'restart required', 'RESTART_REQUIRED') HEALTHY = ServiceStatus(0x21, 'healthy', 'HEALTHY') # Dissuade further additions at run-time. ServiceStatus.__init__ = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/common/limits.py0000644000175000017500000003241100000000000020452 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils from six.moves import http_client import webob.dec import webob.exc from trove.common import base_wsgi from trove.common import cfg from trove.common.i18n import _ from trove.common import wsgi CONF = cfg.CONF # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 class Limit(object): """ Stores information about a limit for HTTP requests. """ UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = {v: k for k, v in UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """ Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError(_("Limit value must be > 0")) self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = _("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") self.error_message = msg % self.__dict__ def __call__(self, verb, url): """ Represents a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", CONF.http_post_rate, PER_MINUTE), Limit("PUT", "*", ".*", CONF.http_put_rate, PER_MINUTE), Limit("DELETE", "*", ".*", CONF.http_delete_rate, PER_MINUTE), Limit("GET", "*", ".*", CONF.http_get_rate, PER_MINUTE), Limit("POST", "*/mgmt", "^/mgmt", CONF.http_mgmt_post_rate, PER_MINUTE), ] class RateLimitingMiddleware(wsgi.TroveMiddleware): """ Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): """ Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get(wsgi.CONTEXT_KEY) tenant_id = None if context: tenant_id = context.project_id delay, error = self._limiter.check_for_delay(verb, url, tenant_id) if delay and self.enabled(): msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["trove.limits"] = self._limiter.get_limits(tenant_id) return self.application def enabled(self): return True class Limiter(object): """ Rate-limit checking class which handles limits in memory. """ def __init__(self, limits, **kwargs): """ Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith('user:'): username = key[5:] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """ Return the limits for a given user. """ return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """ Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # This was ported from nova. # Keeping it as a static method for the sake of consistency # # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """ Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError(_("Limit rules must be surrounded by " "parentheses")) group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError(_("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit")) # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError(_("Invalid units specified")) unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """ Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """ Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, request): """ Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """ Rate-limit requests based on answers from a remote source. """ def __init__(self, limiter_address): """ Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dump_as_bytes({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = http_client.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # This was ported from nova. # Keeping it as a static method for the sake of consistency # # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """ Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/local.py0000644000175000017500000000321500000000000020243 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Local storage of variables using weak references""" import threading import weakref class WeakLocal(threading.local): def __getattribute__(self, attr): rval = super(WeakLocal, self).__getattribute__(attr) if rval: # NOTE(mikal): this bit is confusing. What is stored is a weak # reference, not the value itself. We therefore need to lookup # the weak reference and return the inner value here. rval = rval() return rval def __setattr__(self, attr, value): value = weakref.ref(value) return super(WeakLocal, self).__setattr__(attr, value) # NOTE(mikal): the name "store" should be deprecated in the future store = WeakLocal() # A "weak" store uses weak references and allows an object to fall out of scope # when it falls out of scope in the code that uses the thread local storage. A # "strong" store will hold a reference to the object so that it never falls out # of scope. weak_store = WeakLocal() strong_store = threading.local() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/models.py0000644000175000017500000001012600000000000020433 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes that form the core of instances functionality.""" from oslo_utils.importutils import import_class from trove.common import cfg from trove.common import clients from trove.common.i18n import _ CONF = cfg.CONF class ModelBase(object): """ An object which can be stored in the database. """ _data_fields = [] _auto_generated_attrs = [] def _validate(self, errors): """Subclasses override this to offer additional validation. For each validation error a key with the field name and an error message is added to the dict. """ pass def data(self, **options): """Called to serialize object to a dictionary.""" data_fields = self._data_fields + self._auto_generated_attrs return {field: self[field] for field in data_fields} def is_valid(self): """Called when persisting data to ensure the format is correct.""" self.errors = {} self._validate(self.errors) # self._validate_columns_type() # self._before_validate() # self._validate() return self.errors == {} def __setitem__(self, key, value): """Overloaded to cause this object to look like a data entity.""" setattr(self, key, value) def __getitem__(self, key): """Overloaded to cause this object to look like a data entity.""" return getattr(self, key) def __eq__(self, other): """Overloaded to cause this object to look like a data entity.""" if not hasattr(other, 'id'): return False return type(other) == type(self) and other.id == self.id def __ne__(self, other): """Overloaded to cause this object to look like a data entity.""" return not self == other def __hash__(self): """Overloaded to cause this object to look like a data entity.""" return self.id.__hash__() class RemoteModelBase(ModelBase): # This should be set by the remote model during init time # The data() method will be using this _data_object = None def _data_item(self, data_object): data_fields = self._data_fields + self._auto_generated_attrs return {field: getattr(data_object, field) for field in data_fields} # data magic that will allow for a list of _data_object or a single item # if the object is a list, it will turn it into a list of hash's again def data(self, **options): if self._data_object is None: raise LookupError(_("data object is None")) if isinstance(self._data_object, list): return [self._data_item(item) for item in self._data_object] else: return self._data_item(self._data_object) class NetworkRemoteModelBase(RemoteModelBase): network_driver = None @classmethod def get_driver(cls, context, region_name): if not cls.network_driver: cls.network_driver = import_class(CONF.network_driver) return cls.network_driver(context, region_name) class NovaRemoteModelBase(RemoteModelBase): @classmethod def get_client(cls, context, region_name): return clients.create_nova_client(context, region_name) class SwiftRemoteModelBase(RemoteModelBase): @classmethod def get_client(cls, context, region_name): return clients.create_swift_client(context, region_name) class CinderRemoteModelBase(RemoteModelBase): @classmethod def get_client(cls, context): return clients.create_cinder_client(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/common/neutron.py0000644000175000017500000001147700000000000020654 0ustar00coreycorey00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr from oslo_log import log as logging from trove.common import cfg from trove.common import clients from trove.common import exception CONF = cfg.CONF LOG = logging.getLogger(__name__) MGMT_NETWORKS = None MGMT_CIDRS = None def get_management_networks(context): """Cache the management network names. When CONF.management_networks is changed, the Trove service needs to restart so the global cache will be refreshed. """ global MGMT_NETWORKS if MGMT_NETWORKS is not None: return MGMT_NETWORKS MGMT_NETWORKS = [] if len(CONF.management_networks) > 0: neutron_client = clients.create_neutron_client(context) for net_id in CONF.management_networks: MGMT_NETWORKS.append( neutron_client.show_network(net_id)['network']['name'] ) return MGMT_NETWORKS def reset_management_networks(): """This method is only for testing purpose.""" global MGMT_NETWORKS MGMT_NETWORKS = None def create_port(client, name, description, network_id, security_groups, is_public=False): port_body = { "port": { "name": name, "description": description, "network_id": network_id, "security_groups": security_groups } } port = client.create_port(body=port_body) port_id = port['port']['id'] if is_public: public_network_id = get_public_network(client) if not public_network_id: raise exception.PublicNetworkNotFound() fip_body = { "floatingip": { 'floating_network_id': public_network_id, 'port_id': port_id, } } client.create_floatingip(fip_body) return port_id def delete_port(client, id): ret = client.list_floatingips(port_id=id) if len(ret['floatingips']) > 0: for fip in ret['floatingips']: try: client.delete_floatingip(fip['id']) except Exception as e: LOG.error( 'Failed to delete floating IP for port %s, error: %s', id, str(e) ) client.delete_port(id) def get_public_network(client): """Get public network ID. If not given in the config file, try to query all the public networks and use the first one in the list. """ if CONF.network.public_network_id: return CONF.network.public_network_id kwargs = {'router:external': True} ret = client.list_networks(**kwargs) if len(ret.get('networks', [])) == 0: return None return ret['networks'][0].get('id') def create_security_group(client, name, instance_id): body = { 'security_group': { 'name': name, 'description': 'Security group for trove instance %s' % instance_id } } ret = client.create_security_group(body=body) return ret['security_group']['id'] def create_security_group_rule(client, sg_id, protocol, ports, remote_ips): for remote_ip in remote_ips: ip = netaddr.IPNetwork(remote_ip) ethertype = 'IPv4' if ip.version == 4 else 'IPv6' for port_or_range in set(ports): from_, to_ = port_or_range[0], port_or_range[-1] body = { "security_group_rule": { "direction": "ingress", "ethertype": ethertype, "protocol": protocol, "security_group_id": sg_id, "port_range_min": int(from_), "port_range_max": int(to_), "remote_ip_prefix": remote_ip } } client.create_security_group_rule(body) def get_subnet_cidrs(client, network_id): cidrs = [] subnets = client.list_subnets(network_id=network_id)['subnets'] for subnet in subnets: cidrs.append(subnet.get('cidr')) return cidrs def get_mamangement_subnet_cidrs(client): """Cache the management subnet CIDRS.""" global MGMT_CIDRS if MGMT_CIDRS is not None: return MGMT_CIDRS MGMT_CIDRS = [] if len(CONF.management_networks) > 0: MGMT_CIDRS = get_subnet_cidrs(client, CONF.management_networks[0]) return MGMT_CIDRS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/notification.py0000644000175000017500000005670000000000000021646 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import traceback from oslo_log import log as logging from trove.common import cfg from trove.common.exception import TroveError from trove.common.i18n import _ from trove.common import timeutils from trove.conductor import api as conductor_api from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF class EndNotification(object): @property def _notifier(self): ''' Returns the notification for Trove API or TaskManager, otherwise returns an API to the conductor to whom to forward the notification ''' return (self.context.notification if self.context.notification.server_type in ['api', 'taskmanager'] else conductor_api.API(self.context)) def __init__(self, context, **kwargs): self.context = context self.context.notification.payload.update(kwargs) def __enter__(self): return self.context.notification def __exit__(self, etype, value, tb): if etype: message = str(value) exception = traceback.format_exception(etype, value, tb) self._notifier.notify_exc_info(message, exception) else: self._notifier.notify_end() class StartNotification(EndNotification): def __enter__(self): self.context.notification.notify_start() return super(StartNotification, self).__enter__() class NotificationCastWrapper(object): def __init__(self, context, api): self.context = context self.api = api self.has_notification = hasattr(context, 'notification') def __enter__(self): if self.has_notification: self.old_server_type = self.context.notification.server_type self.context.notification.server_type = self.api def __exit__(self, etype, value, traceback): if self.has_notification: self.context.notification.server_type = self.old_server_type self.context.notification.needs_end_notification = False class TroveBaseTraits(object): ''' The base traits of all trove.* notifications. This class should correspond to trove_base_traits in ceilometer/event_definitions.yaml ''' event_type_format = 'trove.instance.%s' def __init__(self, **kwargs): self.payload = {} instance = kwargs.pop('instance', None) if instance: self.instance = instance self.context = instance.context created_time = timeutils.isotime(instance.db_info.created) self.payload.update({ 'created_at': created_time, 'name': instance.name, 'instance_id': instance.id, 'instance_name': instance.name, 'instance_type_id': instance.flavor_id, 'launched_at': created_time, 'nova_instance_id': instance.server_id, 'region': CONF.region, 'state_description': instance.status.lower(), 'state': instance.status.lower(), 'tenant_id': instance.tenant_id, 'user_id': instance.context.user, }) self.payload.update(kwargs) def serialize(self, ctxt): if hasattr(self, 'instance'): if 'instance_type' not in self.payload: flavor_id = self.instance.flavor_id flavor = self.instance.nova_client.flavors.get(flavor_id) self.payload['instance_type'] = flavor.name self.payload['service_id'] = self.instance._get_service_id( self.instance.datastore_version.manager, CONF.notification_service_id) return self.payload def deserialize(self, ctxt, payload): self.payload = payload self.context = ctxt return self def notify(self, event_type, publisher_id=None): publisher_id = publisher_id or CONF.host event_type = self.event_type_format % event_type event_payload = self.serialize(self.context) LOG.debug('Sending event: %(event_type)s, %(payload)s', {'event_type': event_type, 'payload': event_payload}) notifier = rpc.get_notifier( service='taskmanager', publisher_id=publisher_id) notifier.info(self.context, event_type, event_payload) class TroveCommonTraits(TroveBaseTraits): ''' Additional traits for trove.* notifications that describe instance action events This class should correspond to trove_common_traits in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): self.server = kwargs.pop('server', None) super(TroveCommonTraits, self).__init__(**kwargs) def serialize(self, ctxt): if hasattr(self, 'instance'): instance = self.instance if 'instance_type' not in self.payload: flavor = instance.nova_client.flavors.get(instance.flavor_id) self.payload['instance_size'] = flavor.ram if self.server is None and instance.server_id: try: self.server = instance.nova_client.servers.get( instance.server_id) except Exception: pass if self.server: self.payload['availability_zone'] = getattr( self.server, 'OS-EXT-AZ:availability_zone', None) if CONF.get(instance.datastore_version.manager).volume_support: self.payload.update({ 'volume_size': instance.volume_size, 'nova_volume_id': instance.volume_id }) return TroveBaseTraits.serialize(self, ctxt) class TroveInstanceCreate(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_create in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceCreate, self).__init__(**kwargs) def notify(self): super(TroveInstanceCreate, self).notify('create') class TroveInstanceModifyVolume(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_modify_volume in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceModifyVolume, self).__init__(**kwargs) def notify(self): super(TroveInstanceModifyVolume, self).notify('modify_volume') class TroveInstanceModifyFlavor(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_modify_flavor in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceModifyFlavor, self).__init__(**kwargs) def notify(self): super(TroveInstanceModifyFlavor, self).notify('modify_flavor') class TroveInstanceDelete(TroveCommonTraits): ''' Additional traits for trove.instance.create notifications that describe instance action events This class should correspond to trove_instance_delete in ceilometer/event_definitions.yaml ''' def __init__(self, **kwargs): super(TroveInstanceDelete, self).__init__(**kwargs) def notify(self): super(TroveInstanceDelete, self).notify('delete') class DBaaSQuotas(object): ''' The traits of dbaas.quotas notifications. This class should correspond to dbaas.quotas in ceilometer/event_definitions.yaml ''' event_type = 'dbaas.quota' def __init__(self, context, quota, usage): self.context = context self.payload = { 'resource': quota.resource, 'in_use': usage.in_use, 'reserved': usage.reserved, 'limit': quota.hard_limit, 'updated': usage.updated } def notify(self): LOG.debug('Sending event: %(event_type)s, %(payload)s', {'event_type': DBaaSQuotas.event_type, 'payload': self.payload}) notifier = rpc.get_notifier( service='taskmanager', publisher_id=CONF.host) notifier.info(self.context, DBaaSQuotas.event_type, self.payload) class DBaaSAPINotification(object): ''' The traits of dbaas.* notifications (except quotas). This class should correspond to dbaas_base_traits in ceilometer/event_definitions.yaml ''' event_type_format = 'dbaas.%s.%s' notify_callback = None @classmethod def register_notify_callback(cls, callback): """A callback registered here will be fired whenever a notification is sent out. The callback should take a notification object, and event_qualifier. """ cls.notify_callback = callback @abc.abstractmethod def event_type(self): 'Returns the event type (like "create" for dbaas.create.start)' pass @abc.abstractmethod def required_start_traits(self): 'Returns list of required traits for start notification' pass def optional_start_traits(self): 'Returns list of optional traits for start notification' return [] def required_end_traits(self): 'Returns list of required traits for end notification' return [] def optional_end_traits(self): 'Returns list of optional traits for end notification' return [] def required_error_traits(self): 'Returns list of required traits for error notification' return ['message', 'exception'] def optional_error_traits(self): 'Returns list of optional traits for error notification' return ['instance_id'] def required_base_traits(self): return ['tenant_id', 'client_ip', 'server_ip', 'server_type', 'request_id'] @property def server_type(self): return self.payload['server_type'] @server_type.setter def server_type(self, server_type): self.payload['server_type'] = server_type @property def request_id(self): return self.payload['request_id'] def __init__(self, context, **kwargs): self.context = context self.needs_end_notification = True self.payload = {} if 'request' in kwargs: request = kwargs.pop('request') self.payload.update({ 'request_id': context.request_id, 'server_type': 'api', 'client_ip': request.remote_addr, 'server_ip': request.host, 'tenant_id': context.project_id, }) elif 'request_id' not in kwargs: raise TroveError(_("Notification %s must include 'request'" " property") % self.__class__.__name__) self.payload.update(kwargs) def serialize(self, context): return self.payload def validate(self, required_traits): required_keys = set(required_traits) provided_keys = set(self.payload.keys()) if not required_keys.issubset(provided_keys): raise TroveError(_("The following required keys not defined for" " notification %(name)s: %(keys)s") % {'name': self.__class__.__name__, 'keys': list(required_keys - provided_keys)}) if 'server_type' not in self.payload: raise TroveError(_("Notification %s must include a" " 'server_type' for correct routing") % self.__class__.__name__) def _notify(self, event_qualifier, required_traits, optional_traits, **kwargs): self.payload.update(kwargs) self.validate(self.required_base_traits() + required_traits) available_values = self.serialize(self.context) payload = {k: available_values[k] for k in self.required_base_traits() + required_traits} for k in optional_traits: if k in available_values: payload[k] = available_values[k] qualified_event_type = (DBaaSAPINotification.event_type_format % (self.event_type(), event_qualifier)) LOG.debug('Sending event: %(event_type)s, %(payload)s', {'event_type': qualified_event_type, 'payload': payload}) context = copy.copy(self.context) del context.notification notifier = rpc.get_notifier(service=self.payload['server_type']) notifier.info(context, qualified_event_type, self.payload) if self.notify_callback: self.notify_callback(event_qualifier) def notify_start(self, **kwargs): self._notify('start', self.required_start_traits(), self.optional_start_traits(), **kwargs) def notify_end(self, **kwargs): if self.needs_end_notification: self._notify('end', self.required_end_traits(), self.optional_end_traits(), **kwargs) def notify_exc_info(self, message, exception): self.payload.update({ 'message': message, 'exception': exception }) self._notify('error', self.required_error_traits(), self.optional_error_traits()) class DBaaSInstanceCreate(DBaaSAPINotification): def event_type(self): return 'instance_create' def required_start_traits(self): return ['name', 'flavor_id', 'datastore', 'datastore_version', 'image_id', 'availability_zone', 'region_name'] def optional_start_traits(self): return ['databases', 'users', 'volume_size', 'restore_point', 'replica_of', 'replica_count', 'cluster_id', 'backup_id', 'nics'] def required_end_traits(self): return ['instance_id'] class DBaaSInstanceReboot(DBaaSAPINotification): def event_type(self): return 'instance_reboot' def required_start_traits(self): return ['instance_id'] class DBaaSInstanceRestart(DBaaSAPINotification): def event_type(self): return 'instance_restart' def required_start_traits(self): return ['instance_id'] class DBaaSInstanceResizeVolume(DBaaSAPINotification): def event_type(self): return 'instance_resize_volume' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'new_size'] class DBaaSInstanceResizeInstance(DBaaSAPINotification): def event_type(self): return 'instance_resize_instance' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'new_flavor_id'] class DBaaSInstancePromote(DBaaSAPINotification): def event_type(self): return 'instance_promote' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceEject(DBaaSAPINotification): def event_type(self): return 'instance_eject' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceDelete(DBaaSAPINotification): def event_type(self): return 'instance_delete' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceResetStatus(DBaaSAPINotification): def event_type(self): return 'instance_reset_status' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceDetach(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'instance_detach' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSInstanceAttachConfiguration(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'instance_attach_configuration' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'configuration_id'] class DBaaSInstanceDetachConfiguration(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'instance_detach_configuration' @abc.abstractmethod def required_start_traits(self): return ['instance_id'] class DBaaSClusterAttachConfiguration(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_attach_configuration' @abc.abstractmethod def required_start_traits(self): return ['cluster_id', 'configuration_id'] class DBaaSClusterDetachConfiguration(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_detach_configuration' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSClusterCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_create' @abc.abstractmethod def required_start_traits(self): return ['name', 'datastore', 'datastore_version'] @abc.abstractmethod def required_end_traits(self): return ['cluster_id'] class DBaaSClusterRestart(DBaaSAPINotification): def event_type(self): return 'cluster_restart' def required_start_traits(self): return ['cluster_id'] class DBaaSClusterUpgrade(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_upgrade' @abc.abstractmethod def required_start_traits(self): return ['cluster_id', 'datastore_version'] class DBaaSClusterDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_delete' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSClusterResetStatus(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_reset_status' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSClusterAddShard(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_add_shard' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] class DBaaSClusterGrow(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_grow' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] @abc.abstractmethod def required_end_traits(self): return ['cluster_id'] class DBaaSClusterShrink(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'cluster_shrink' @abc.abstractmethod def required_start_traits(self): return ['cluster_id'] @abc.abstractmethod def required_end_traits(self): return ['cluster_id'] class DBaaSBackupCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'backup_create' @abc.abstractmethod def required_start_traits(self): return ['name', 'instance_id', 'description', 'parent_id'] @abc.abstractmethod def required_end_traits(self): return ['backup_id'] class DBaaSBackupDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'backup_delete' @abc.abstractmethod def required_start_traits(self): return ['backup_id'] class DBaaSDatabaseCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'database_create' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'dbname'] class DBaaSDatabaseDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'database_delete' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'dbname'] class DBaaSUserCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_create' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSUserDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_delete' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSUserUpdateAttributes(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_update_attributes' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSUserGrant(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_grant' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username', 'database'] class DBaaSUserRevoke(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_revoke' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username', 'database'] class DBaaSUserChangePassword(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'user_change_password' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'username'] class DBaaSConfigurationCreate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_create' @abc.abstractmethod def required_start_traits(self): return ['name', 'datastore', 'datastore_version'] def required_end_traits(self): return ['configuration_id'] class DBaaSConfigurationDelete(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_delete' @abc.abstractmethod def required_start_traits(self): return ['configuration_id'] class DBaaSConfigurationUpdate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_update' @abc.abstractmethod def required_start_traits(self): return ['configuration_id', 'name', 'description'] class DBaaSConfigurationEdit(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'configuration_edit' @abc.abstractmethod def required_start_traits(self): return ['configuration_id'] class DBaaSInstanceUpgrade(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'upgrade' @abc.abstractmethod def required_start_traits(self): return ['instance_id', 'datastore_version_id'] class DBaaSInstanceMigrate(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'migrate' @abc.abstractmethod def required_start_traits(self): return ['host'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/pagination.py0000644000175000017500000001235500000000000021307 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import bisect import collections import six.moves.urllib.parse as urllib_parse def url_quote(s): if s is None: return s return urllib_parse.quote(str(s)) def paginate_list(li, limit=None, marker=None, include_marker=False, key=lambda x: x): """Sort the given list and return a sublist containing a page of items. :param list li: The list to be paginated. :param int limit: Maximum number of items to be returned. :param marker: Key of the first item to appear on the sublist. :param bool include_marker: Include the marker value itself in the sublist. :param lambda key: Sorting expression. :return: """ sli = sorted(li, key=key) index = [key(item) for item in sli] if marker is None: marker = '' if include_marker: pos = bisect.bisect_left(index, marker) else: pos = bisect.bisect(index, marker) if limit and pos + limit < len(sli): page = sli[pos:pos + limit] return page, key(page[-1]) else: return sli[pos:], None def paginate_object_list(li, attr_name, limit=None, marker=None, include_marker=False): """Wrapper for paginate_list to handle lists of generic objects paginated based on an attribute. """ return paginate_list(li, limit=limit, marker=marker, include_marker=include_marker, key=lambda x: getattr(x, attr_name)) def paginate_dict_list(li, key, limit=None, marker=None, include_marker=False): """Wrapper for paginate_list to handle lists of dicts paginated based on a key. """ return paginate_list(li, limit=limit, marker=marker, include_marker=include_marker, key=lambda x: x[key]) class PaginatedDataView(object): def __init__(self, collection_type, collection, current_page_url, next_page_marker=None): self.collection_type = collection_type self.collection = collection self.current_page_url = current_page_url self.next_page_marker = url_quote(next_page_marker) def data(self): return {self.collection_type: self.collection, 'links': self._links, } def _links(self): if not self.next_page_marker: return [] app_url = AppUrl(self.current_page_url) next_url = app_url.change_query_params(marker=self.next_page_marker) next_link = { 'rel': 'next', 'href': str(next_url), } return [next_link] class SimplePaginatedDataView(object): # In some cases, we can't create a PaginatedDataView because # we don't have a collection query object to create a view on. # In that case, we have to supply the URL and collection manually. def __init__(self, url, name, view, marker): self.url = url self.name = name self.view = view self.marker = url_quote(marker) def data(self): if not self.marker: return self.view.data() app_url = AppUrl(self.url) next_url = str(app_url.change_query_params(marker=self.marker)) next_link = {'rel': 'next', 'href': next_url} view_data = {self.name: self.view.data()[self.name], 'links': [next_link]} return view_data class AppUrl(object): def __init__(self, url): self.url = url def __str__(self): return self.url def change_query_params(self, **kwargs): # Seeks out the query params in a URL and changes/appends to them # from the kwargs given. So change_query_params(foo='bar') # would remove from the URL any old instance of foo=something and # then add &foo=bar to the URL. parsed_url = urllib_parse.urlparse(self.url) # Build a dictionary out of the query parameters in the URL # with an OrderedDict to preserve the order of the URL. query_params = collections.OrderedDict( urllib_parse.parse_qsl(parsed_url.query)) # Use kwargs to change or update any values in the query dict. query_params.update(kwargs) # Build a new query based on the updated query dict. new_query_params = urllib_parse.urlencode(query_params) return self.__class__( # Force HTTPS. urllib_parse.ParseResult('https', parsed_url.netloc, parsed_url.path, parsed_url.params, new_query_params, parsed_url.fragment).geturl()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/pastedeploy.py0000644000175000017500000001152100000000000021501 0ustar00coreycorey00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from paste import deploy from trove.common import local class BasePasteFactory(object): """A base class for paste app and filter factories. Sub-classes must override the KEY class attribute and provide a __call__ method. """ KEY = None def __init__(self, data): self.data = data def _import_factory(self, local_conf): """Import an app/filter class. Lookup the KEY from the PasteDeploy local conf and import the class named there. This class can then be used as an app or filter factory. Note we support the : format. Note also that if you do e.g. key = value then ConfigParser returns a value with a leading newline, so we strip() the value before using it. """ mod_str, _sep, class_str = local_conf[self.KEY].strip().rpartition(':') del local_conf[self.KEY] __import__(mod_str) return getattr(sys.modules[mod_str], class_str) class AppFactory(BasePasteFactory): """A Generic paste.deploy app factory. This requires openstack.app_factory to be set to a callable which returns a WSGI app when invoked. The format of the name is : e.g. [app:myfooapp] paste.app_factory = trove.common.pastedeploy:app_factory openstack.app_factory = myapp:Foo The WSGI app constructor must accept a data object and a local config dict as its two arguments. """ KEY = 'openstack.app_factory' def __call__(self, global_conf, **local_conf): """The actual paste.app_factory protocol method.""" factory = self._import_factory(local_conf) return factory(self.data, **local_conf) class FilterFactory(AppFactory): """A Generic paste.deploy filter factory. This requires openstack.filter_factory to be set to a callable which returns a WSGI filter when invoked. The format is : e.g. [filter:myfoofilter] paste.filter_factory = trove.common.pastedeploy:filter_factory openstack.filter_factory = myfilter:Foo The WSGI filter constructor must accept a WSGI app, a data object and a local config dict as its three arguments. """ KEY = 'openstack.filter_factory' def __call__(self, global_conf, **local_conf): """The actual paste.filter_factory protocol method.""" factory = self._import_factory(local_conf) def filter(app): return factory(app, self.data, **local_conf) return filter def app_factory(global_conf, **local_conf): """A paste app factory used with paste_deploy_app().""" return local.store.app_factory(global_conf, **local_conf) def filter_factory(global_conf, **local_conf): """A paste filter factory used with paste_deploy_app().""" return local.store.filter_factory(global_conf, **local_conf) def paste_deploy_app(paste_config_file, app_name, data): """Load a WSGI app from a PasteDeploy configuration. Use deploy.loadapp() to load the app from the PasteDeploy configuration, ensuring that the supplied data object is passed to the app and filter factories defined in this module. To use these factories and the data object, the configuration should look like this: [app:myapp] paste.app_factory = trove.common.pastedeploy:app_factory openstack.app_factory = myapp:App ... [filter:myfilter] paste.filter_factory = trove.common.pastedeploy:filter_factory openstack.filter_factory = myapp:Filter and then: myapp.py: class App(object): def __init__(self, data): ... class Filter(object): def __init__(self, app, data): ... :param paste_config_file: a PasteDeploy config file :param app_name: the name of the app/pipeline to load from the file :param data: a data object to supply to the app and its filters :returns: the WSGI app """ (af, ff) = (AppFactory(data), FilterFactory(data)) local.store.app_factory = af local.store.filter_factory = ff try: return deploy.loadapp("config:%s" % paste_config_file, name=app_name) finally: del local.store.app_factory del local.store.filter_factory ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/policies/0000755000175000017500000000000000000000000020405 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/__init__.py0000644000175000017500000000325200000000000022520 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from trove.common.policies import backups from trove.common.policies import base from trove.common.policies import clusters from trove.common.policies import configuration_parameters from trove.common.policies import configurations from trove.common.policies import databases from trove.common.policies import datastores from trove.common.policies import flavors from trove.common.policies import instances from trove.common.policies import limits from trove.common.policies import modules from trove.common.policies import root from trove.common.policies import user_access from trove.common.policies import users def list_rules(): return itertools.chain( base.list_rules(), instances.list_rules(), root.list_rules(), users.list_rules(), user_access.list_rules(), databases.list_rules(), clusters.list_rules(), backups.list_rules(), configurations.list_rules(), configuration_parameters.list_rules(), datastores.list_rules(), flavors.list_rules(), limits.list_rules(), modules.list_rules() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/backups.py0000644000175000017500000000420300000000000022406 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_BACKUPS, PATH_BACKUP rules = [ policy.DocumentedRuleDefault( name='backup:create', check_str='rule:admin_or_owner', description='Create a backup of a database instance.', operations=[ { 'path': PATH_BACKUPS, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='backup:delete', check_str='rule:admin_or_owner', description='Delete a backup of a database instance.', operations=[ { 'path': PATH_BACKUP, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='backup:index', check_str='rule:admin_or_owner', description='List all backups.', operations=[ { 'path': PATH_BACKUPS, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='backup:index:all_projects', check_str='role:admin', description='List backups for all the projects.', operations=[ { 'path': PATH_BACKUPS, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='backup:show', check_str='rule:admin_or_owner', description='Get informations of a backup.', operations=[ { 'path': PATH_BACKUP, 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/base.py0000644000175000017500000000435000000000000021673 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy PATH_BASE = '/v1.0/{account_id}' PATH_INSTANCES = PATH_BASE + '/instances' PATH_INSTANCES_DETAIL = PATH_INSTANCES + '/detail' PATH_INSTANCE = PATH_INSTANCES + '/{instance_id}' PATH_INSTANCE_ACTION = PATH_INSTANCE + '/action' PATH_USERS = PATH_INSTANCE + '/users' PATH_USER = PATH_USERS + '/{user}' PATH_ACCESSES = PATH_USER + '/databases' PATH_ACCESS = PATH_ACCESSES + '/{database}' PATH_DATABASES = PATH_INSTANCE + '/databases' PATH_DATABASE = PATH_DATABASES + '/{database}' PATH_CLUSTERS = PATH_BASE + '/clusters' PATH_CLUSTER = PATH_CLUSTERS + '/{cluster}' PATH_CLUSTER_INSTANCES = PATH_CLUSTER + '/instances' PATH_CLUSTER_INSTANCE = PATH_CLUSTER_INSTANCES + '/{instance}' PATH_BACKUPS = PATH_BASE + '/backups' PATH_BACKUP = PATH_BACKUPS + '/{backup}' PATH_CONFIGS = PATH_BASE + '/configurations' PATH_CONFIG = PATH_CONFIGS + '/{config}' PATH_DATASTORES = PATH_BASE + '/datastores' PATH_DATASTORE = PATH_DATASTORES + '/{datastore}' PATH_VERSIONS = PATH_DATASTORES + '/versions' PATH_FLAVORS = PATH_BASE + '/flavors' PATH_FLAVOR = PATH_FLAVORS + '/{flavor}' PATH_LIMITS = PATH_BASE + '/limits' PATH_MODULES = PATH_BASE + '/modules' PATH_MODULE = PATH_MODULES + '/{module}' rules = [ policy.RuleDefault( 'admin', 'role:admin or is_admin:True', description='Must be an administrator.'), policy.RuleDefault( 'admin_or_owner', 'rule:admin or tenant:%(tenant)s', description='Must be an administrator or owner of the object.'), policy.RuleDefault( 'default', 'rule:admin_or_owner', description='Must be an administrator or owner of the object.') ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/clusters.py0000644000175000017500000000607300000000000022631 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import ( PATH_CLUSTERS, PATH_CLUSTER, PATH_CLUSTER_INSTANCE) rules = [ policy.DocumentedRuleDefault( name='cluster:create', check_str='rule:admin_or_owner', description='Create a cluster.', operations=[ { 'path': PATH_CLUSTERS, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='cluster:delete', check_str='rule:admin_or_owner', description='Delete a cluster.', operations=[ { 'path': PATH_CLUSTER, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='cluster:force_delete', check_str='rule:admin_or_owner', description='Forcibly delete a cluster.', operations=[ { 'path': PATH_CLUSTER + ' (reset-status)', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='cluster:index', check_str='rule:admin_or_owner', description='List all clusters', operations=[ { 'path': PATH_CLUSTERS, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='cluster:show', check_str='rule:admin_or_owner', description='Get informations of a cluster.', operations=[ { 'path': PATH_CLUSTER, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='cluster:show_instance', check_str='rule:admin_or_owner', description='Get informations of a instance in a cluster.', operations=[ { 'path': PATH_CLUSTER_INSTANCE, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='cluster:action', check_str='rule:admin_or_owner', description='Commit an action against a cluster', operations=[ { 'path': PATH_CLUSTER, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='cluster:reset-status', check_str='rule:admin', description='Reset the status of a cluster to NONE.', operations=[ { 'path': PATH_CLUSTER + ' (reset-status)', 'method': 'POST' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/configuration_parameters.py0000644000175000017500000000445600000000000026062 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_DATASTORE, PATH_VERSIONS rules = [ policy.DocumentedRuleDefault( name='configuration-parameter:index', check_str='rule:admin_or_owner', description='List all parameters bind to a datastore version.', operations=[ { 'path': PATH_DATASTORE + '/versions/{version}/parameters', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='configuration-parameter:show', check_str='rule:admin_or_owner', description='Get a paramter of a datastore version.', operations=[ { 'path': (PATH_DATASTORE + '/versions/{version}/parameters/{param}'), 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='configuration-parameter:index_by_version', check_str='rule:admin_or_owner', description='List all paramters bind to a datastore version by ' 'the id of the version(datastore is not provided).', operations=[ { 'path': PATH_VERSIONS + '/{version}/paramters', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='configuration-parameter:show_by_version', check_str='rule:admin_or_owner', description='Get a paramter of a datastore version by it names and ' 'the id of the version(datastore is not provided).', operations=[ { 'path': PATH_VERSIONS + '/{version}/paramters/{param}', 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/configurations.py0000644000175000017500000000561200000000000024015 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_CONFIGS, PATH_CONFIG rules = [ policy.DocumentedRuleDefault( name='configuration:create', check_str='rule:admin_or_owner', description='Create a configuration group.', operations=[ { 'path': PATH_CONFIGS, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='configuration:delete', check_str='rule:admin_or_owner', description='Delete a configuration group.', operations=[ { 'path': PATH_CONFIG, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='configuration:index', check_str='rule:admin_or_owner', description='List all configuration groups.', operations=[ { 'path': PATH_CONFIGS, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='configuration:show', check_str='rule:admin_or_owner', description='Get informations of a configuration group.', operations=[ { 'path': PATH_CONFIG, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='configuration:instances', check_str='rule:admin_or_owner', description='List all instances which a configuration group ' 'has be assigned to.', operations=[ { 'path': PATH_CONFIG + '/instances', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='configuration:update', check_str='rule:admin_or_owner', description='Update a configuration group(the configuration ' 'group will be replaced completely).', operations=[ { 'path': PATH_CONFIG, 'method': 'PUT' } ]), policy.DocumentedRuleDefault( name='configuration:edit', check_str='rule:admin_or_owner', description='Patch a configuration group.', operations=[ { 'path': PATH_CONFIG, 'method': 'PATCH' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/databases.py0000644000175000017500000000430300000000000022706 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import ( PATH_INSTANCES, PATH_DATABASES, PATH_DATABASE) rules = [ policy.DocumentedRuleDefault( name='instance:extension:database:create', check_str='rule:admin_or_owner', description='Create a set of Schemas', operations=[ { 'path': PATH_DATABASES, 'method': 'POST' }, # we also check this when creating instances with # databases specified. { 'path': PATH_INSTANCES, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:extension:database:delete', check_str='rule:admin_or_owner', description='Delete a schema from a database.', operations=[ { 'path': PATH_DATABASE, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='instance:extension:database:index', check_str='rule:admin_or_owner', description='List all schemas from a database.', operations=[ { 'path': PATH_DATABASES, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:extension:database:show', check_str='rule:admin_or_owner', description='Get informations of a schema' '(Currently Not Implemented).', operations=[ { 'path': PATH_DATABASE, 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/common/policies/datastores.py0000644000175000017500000000635300000000000023137 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import ( PATH_DATASTORES, PATH_DATASTORE, PATH_VERSIONS) rules = [ policy.DocumentedRuleDefault( name='datastore:index', check_str='', description='List all datastores.', operations=[ { 'path': PATH_DATASTORES, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='datastore:show', check_str='', description='Get informations of a datastore.', operations=[ { 'path': PATH_DATASTORE, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='datastore:delete', check_str='rule:admin', description='Delete a datastore.', operations=[ { 'path': PATH_DATASTORE, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='datastore:version_show', check_str='', description='Get a version of a datastore by the version id.', operations=[ { 'path': PATH_DATASTORE + '/versions/{version}', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='datastore:version_show_by_uuid', check_str='', description='Get a version of a datastore by the version id' '(without providing the datastore id).', operations=[ { 'path': PATH_VERSIONS + '/{version}', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='datastore:version_index', check_str='', description='Get all versions of a datastore.', operations=[ { 'path': PATH_DATASTORE + '/versions', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='datastore:list_associated_flavors', check_str='', description='List all flavors associated with a datastore version.', operations=[ { 'path': PATH_DATASTORE + '/versions/{version}/flavors', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='datastore:list_associated_volume_types', check_str='', description='List all volume-types associated with ' 'a datastore version.', operations=[ { 'path': PATH_DATASTORE + '/versions/{version}/volume-types', 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/flavors.py0000644000175000017500000000233200000000000022433 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_FLAVORS, PATH_FLAVOR rules = [ policy.DocumentedRuleDefault( name='flavor:index', check_str='', description='List all flavors.', operations=[ { 'path': PATH_FLAVORS, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='flavor:show', check_str='', description='Get information of a flavor.', operations=[ { 'path': PATH_FLAVOR, 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/instances.py0000644000175000017500000001706600000000000022760 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import ( PATH_INSTANCES, PATH_INSTANCES_DETAIL, PATH_INSTANCE, PATH_INSTANCE_ACTION) rules = [ policy.DocumentedRuleDefault( name='instance:create', check_str='rule:admin_or_owner', description='Create a database instance.', operations=[ { 'path': PATH_INSTANCES, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:delete', check_str='rule:admin_or_owner', description='Delete a database instance.', operations=[ { 'path': PATH_INSTANCE, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='instance:force_delete', check_str='rule:admin_or_owner', description='Forcibly delete a database instance.', operations=[ { 'path': PATH_INSTANCE, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='instance:index', check_str='rule:admin_or_owner', description='List database instances.', operations=[ { 'path': PATH_INSTANCES, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:detail', check_str='rule:admin_or_owner', description='List database instances with details.', operations=[ { 'path': PATH_INSTANCES_DETAIL, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:show', check_str='rule:admin_or_owner', description='Get details of a specific database instance.', operations=[ { 'path': PATH_INSTANCE, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:update', check_str='rule:admin_or_owner', description='Update a database instance to ' 'attach/detach configuration', operations=[ { 'path': PATH_INSTANCE, 'method': 'PUT' }, # we also check this when creating instances with # a configuration group specified. { 'path': PATH_INSTANCES, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:edit', check_str='rule:admin_or_owner', description='Updates the instance to set or ' 'unset one or more attributes.', operations=[ { 'path': PATH_INSTANCE, 'method': 'PATCH' } ]), policy.DocumentedRuleDefault( name='instance:restart', check_str='rule:admin_or_owner', description='Restart a database instance.', operations=[ { 'path': PATH_INSTANCE_ACTION + ' (restart)', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:resize_volume', check_str='rule:admin_or_owner', description='Resize a database instance volume.', operations=[ { 'path': PATH_INSTANCE_ACTION + ' (resize)', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:resize_flavor', check_str='rule:admin_or_owner', description='Resize a database instance flavor.', operations=[ { 'path': PATH_INSTANCE_ACTION + ' (resize)', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:reset_status', check_str='rule:admin', description='Reset the status of a database instance to ERROR.', operations=[ { 'path': PATH_INSTANCE_ACTION + ' (reset_status)', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:promote_to_replica_source', check_str='rule:admin_or_owner', description='Promote instance to replica source.', operations=[ { 'path': PATH_INSTANCE_ACTION + ' (promote_to_replica_source)', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:eject_replica_source', check_str='rule:admin_or_owner', description='Eject the replica source from its replica set.', operations=[ { 'path': PATH_INSTANCE_ACTION + ' (eject_replica_source)', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:configuration', check_str='rule:admin_or_owner', description='Get the default configuration template ' 'applied to the instance.', operations=[ { 'path': PATH_INSTANCE + '/configuration', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:guest_log_list', check_str='rule:admin_or_owner', description='Get all informations about all logs ' 'of a database instance.', operations=[ { 'path': PATH_INSTANCE + '/log', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:backups', check_str='rule:admin_or_owner', description='Get all backups of a database instance.', operations=[ { 'path': PATH_INSTANCE + '/backups', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:module_list', check_str='rule:admin_or_owner', description='Get informations about modules on a database instance.', operations=[ { 'path': PATH_INSTANCE + '/modules', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:module_apply', check_str='rule:admin_or_owner', description='Apply modules to a database instance.', operations=[ { 'path': PATH_INSTANCE + '/modules', 'method': 'POST' }, # we also check this when creating instances with # modules specified. { 'path': PATH_INSTANCES, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:module_remove', check_str='rule:admin_or_owner', description='Remove a module from a database instance.', operations=[ { 'path': PATH_INSTANCE + '/modules/{module_id}', 'method': 'DELETE' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/limits.py0000644000175000017500000000176100000000000022265 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_LIMITS rules = [ policy.DocumentedRuleDefault( name='limits:index', check_str='rule:admin_or_owner', description='List all absolute and rate limit informations.', operations=[ { 'path': PATH_LIMITS, 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/modules.py0000644000175000017500000000527100000000000022434 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_MODULES, PATH_MODULE rules = [ policy.DocumentedRuleDefault( name='module:create', check_str='rule:admin_or_owner', description='Create a module.', operations=[ { 'path': PATH_MODULES, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='module:delete', check_str='rule:admin_or_owner', description='Delete a module.', operations=[ { 'path': PATH_MODULE, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='module:index', check_str='rule:admin_or_owner', description='List all modules.', operations=[ { 'path': PATH_MODULES, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='module:show', check_str='rule:admin_or_owner', description='Get informations of a module.', operations=[ { 'path': PATH_MODULE, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='module:instances', check_str='rule:admin_or_owner', description='List all instances to which a module is applied.', operations=[ { 'path': PATH_MODULE + '/instances', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='module:update', check_str='rule:admin_or_owner', description='Update a module.', operations=[ { 'path': PATH_MODULE, 'method': 'PUT' } ]), policy.DocumentedRuleDefault( name='module:reapply', check_str='rule:admin_or_owner', description='Reapply a module to all instances.', operations=[ { 'path': PATH_MODULE + '/instances', 'method': 'PUT' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/root.py0000644000175000017500000000533700000000000021752 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_INSTANCE, PATH_CLUSTER rules = [ policy.DocumentedRuleDefault( name='instance:extension:root:create', check_str='rule:admin_or_owner', description='Enable the root user of a database instance.', operations=[ { 'path': PATH_INSTANCE + '/root', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:extension:root:delete', check_str='rule:admin_or_owner', description='Disable the root user of a database instance.', operations=[ { 'path': PATH_INSTANCE + '/root', 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='instance:extension:root:index', check_str='rule:admin_or_owner', description='Show whether the root user of a database ' 'instance has been ever enabled.', operations=[ { 'path': PATH_INSTANCE + '/root', 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='cluster:extension:root:create', check_str='rule:admin_or_owner', description='Enable the root user of the instances in a cluster.', operations=[ { 'path': PATH_CLUSTER + '/root', 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='cluster:extension:root:delete', check_str='rule:admin_or_owner', description='Enable the root user of the instances in a cluster.', operations=[ { 'path': PATH_CLUSTER + '/root', 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='cluster:extension:root:index', check_str='rule:admin_or_owner', description='Disable the root of the instances in a cluster.', operations=[ { 'path': PATH_CLUSTER + '/root', 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/user_access.py0000644000175000017500000000323100000000000023255 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import PATH_ACCESSES, PATH_ACCESS rules = [ policy.DocumentedRuleDefault( name='instance:extension:user_access:update', check_str='rule:admin_or_owner', description='Grant access for a user to one or more databases.', operations=[ { 'path': PATH_ACCESSES, 'method': 'PUT' } ]), policy.DocumentedRuleDefault( name='instance:extension:user_access:delete', check_str='rule:admin_or_owner', description='Revoke access for a user to a databases.', operations=[ { 'path': PATH_ACCESS, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='instance:extension:user_access:index', check_str='rule:admin_or_owner', description='Get permissions of a user', operations=[ { 'path': PATH_ACCESSES, 'method': 'GET' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policies/users.py0000644000175000017500000000554600000000000022132 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from trove.common.policies.base import ( PATH_INSTANCES, PATH_USERS, PATH_USER) rules = [ policy.DocumentedRuleDefault( name='instance:extension:user:create', check_str='rule:admin_or_owner', description='Create users for a database instance.', operations=[ { 'path': PATH_USERS, 'method': 'POST' }, # we also check this when creating instances with # users specified. { 'path': PATH_INSTANCES, 'method': 'POST' } ]), policy.DocumentedRuleDefault( name='instance:extension:user:delete', check_str='rule:admin_or_owner', description='Delete a user from a database instance.', operations=[ { 'path': PATH_USER, 'method': 'DELETE' } ]), policy.DocumentedRuleDefault( name='instance:extension:user:index', check_str='rule:admin_or_owner', description='Get all users of a database instance.', operations=[ { 'path': PATH_USERS, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:extension:user:show', check_str='rule:admin_or_owner', description='Get the information of a single user ' 'of a database instance.', operations=[ { 'path': PATH_USER, 'method': 'GET' } ]), policy.DocumentedRuleDefault( name='instance:extension:user:update', check_str='rule:admin_or_owner', description='Update attributes for a user of a database instance.', operations=[ { 'path': PATH_USER, 'method': 'PUT' } ]), policy.DocumentedRuleDefault( name='instance:extension:user:update_all', check_str='rule:admin_or_owner', description='Update the password for one or more users ' 'a database instance.', operations=[ { 'path': PATH_USERS, 'method': 'PUT' } ]) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/policy.py0000644000175000017500000000475600000000000020463 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_policy import policy from trove.common import exception as trove_exceptions from trove.common import policies CONF = cfg.CONF _ENFORCER = None def get_enforcer(): global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF) _ENFORCER.register_defaults(policies.list_rules()) _ENFORCER.load_rules() return _ENFORCER def authorize_on_tenant(context, rule): return __authorize(context, rule, target=None) def authorize_on_target(context, rule, target): if target: return __authorize(context, rule, target=target) raise trove_exceptions.TroveError( "BUG: Target must not evaluate to False.") def __authorize(context, rule, target=None): """Checks authorization of a rule against the target in this context. * This function is not to be called directly. Calling the function with a target that evaluates to None may result in policy bypass. Use 'authorize_on_*' calls instead. :param context Trove context. :type context Context. :param rule: The rule to evaluate. e.g. ``instance:create_instance``, ``instance:resize_volume`` :param target As much information about the object being operated on as possible. For object creation (target=None) this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :type target dict :raises: :class:`PolicyNotAuthorized` if verification fails. """ target = target or {'tenant': context.project_id} return get_enforcer().authorize( rule, target, context.to_dict(), do_raise=True, exc=trove_exceptions.PolicyNotAuthorized, action=rule) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/profile.py0000644000175000017500000000333400000000000020613 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_context import context from oslo_log import log as logging import oslo_messaging as messaging from osprofiler import notifier from osprofiler import web from trove.common import cfg from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF def setup_profiler(binary, host): if CONF.profiler.enabled: _notifier = notifier.create( "Messaging", messaging, context.get_admin_context().to_dict(), rpc.TRANSPORT, "trove", binary, host) notifier.set(_notifier) web.enable(CONF.profiler.hmac_keys) LOG.warning("The OpenStack Profiler is enabled. Using one" " of the hmac_keys specified in the trove.conf file " "(typically in /etc/trove), a trace can be made of " "all requests. Only an admin user can retrieve " "the trace information, however.\n" "To disable the profiler, add the following to the " "configuration file:\n" "[profiler]\n" "enabled=false") else: web.disable() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/rpc/0000755000175000017500000000000000000000000017362 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/rpc/__init__.py0000644000175000017500000000000000000000000021461 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/rpc/conductor_guest_serializer.py0000644000175000017500000000375200000000000025403 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from trove.common import crypto_utils as crypto from trove.common.i18n import _ from trove.common.rpc import serializer CONF = cfg.CONF # BUG(1650518): Cleanup in the Pike release class ConductorGuestSerializer(serializer.TroveSerializer): def __init__(self, base, key): self._key = key super(ConductorGuestSerializer, self).__init__(base) def _serialize_entity(self, ctxt, entity): if self._key is None: return entity value = crypto.encode_data( crypto.encrypt_data( jsonutils.dumps(entity), self._key)) return jsonutils.dumps({'entity': value, 'csz-instance-id': CONF.guest_id}) def _deserialize_entity(self, ctxt, entity): msg = (_("_deserialize_entity not implemented in " "ConductorGuestSerializer.")) raise Exception(msg) def _serialize_context(self, ctxt): if self._key is None: return ctxt cstr = jsonutils.dumps(ctxt) return {'context': crypto.encode_data( crypto.encrypt_data(cstr, self._key)), 'csz-instance-id': CONF.guest_id} def _deserialize_context(self, ctxt): msg = (_("_deserialize_context not implemented in " "ConductorGuestSerializer.")) raise Exception(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/rpc/conductor_host_serializer.py0000644000175000017500000000537200000000000025231 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from trove.common import crypto_utils as cu from trove.common.rpc import serializer from trove.instance.models import get_instance_encryption_key CONF = cfg.CONF # BUG(1650518): Cleanup in the Pike release class ConductorHostSerializer(serializer.TroveSerializer): def __init__(self, base, *_): super(ConductorHostSerializer, self).__init__(base) def _serialize_entity(self, ctxt, entity): try: if ctxt.instance_id is None: return entity except (ValueError, TypeError): return entity instance_key = get_instance_encryption_key(ctxt.instance_id) estr = jsonutils.dumps(entity) return cu.encode_data(cu.encrypt_data(estr, instance_key)) def _deserialize_entity(self, ctxt, entity): try: entity = jsonutils.loads(entity) instance_id = entity['csz-instance-id'] except (ValueError, TypeError): return entity instance_key = get_instance_encryption_key(instance_id) estr = cu.decrypt_data(cu.decode_data(entity['entity']), instance_key) entity = jsonutils.loads(estr) return entity def _serialize_context(self, ctxt): try: if ctxt.instance_id is None: return ctxt except (ValueError, TypeError): return ctxt instance_key = get_instance_encryption_key(ctxt.instance_id) cstr = jsonutils.dumps(ctxt) return {'context': cu.encode_data(cu.encrypt_data(cstr, instance_key))} def _deserialize_context(self, ctxt): try: instance_id = ctxt.get('csz-instance-id', None) if instance_id is not None: instance_key = get_instance_encryption_key(instance_id) cstr = cu.decrypt_data(cu.decode_data(ctxt['context']), instance_key) ctxt = jsonutils.loads(cstr) except (ValueError, TypeError): return ctxt ctxt['instance_id'] = instance_id return ctxt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/rpc/secure_serializer.py0000644000175000017500000000370600000000000023461 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from trove.common import crypto_utils as cu from trove.common.rpc import serializer # BUG(1650518): Cleanup in the Pike release class SecureSerializer(serializer.TroveSerializer): def __init__(self, base, key): self._key = key super(SecureSerializer, self).__init__(base) def _serialize_entity(self, ctxt, entity): if self._key is None: return entity estr = jsonutils.dumps(entity) return cu.encode_data(cu.encrypt_data(estr, self._key)) def _deserialize_entity(self, ctxt, entity): try: if self._key is not None: estr = cu.decrypt_data(cu.decode_data(entity), self._key) entity = jsonutils.loads(estr) except (ValueError, TypeError): return entity return entity def _serialize_context(self, ctxt): if self._key is None: return ctxt cstr = jsonutils.dumps(ctxt) return {'context': cu.encode_data(cu.encrypt_data(cstr, self._key))} def _deserialize_context(self, ctxt): try: if self._key is not None: cstr = cu.decrypt_data(cu.decode_data(ctxt['context']), self._key) ctxt = jsonutils.loads(cstr) except (ValueError, TypeError): return ctxt return ctxt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/rpc/serializer.py0000644000175000017500000000511100000000000022103 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging as messaging from osprofiler import profiler from trove.common.context import TroveContext class TroveSerializer(messaging.Serializer): """The Trove serializer class that handles class inheritance and base serializers. """ def __init__(self, base): self._base = base def _serialize_entity(self, context, entity): return entity def serialize_entity(self, context, entity): if self._base: entity = self._base.serialize_entity(context, entity) return self._serialize_entity(context, entity) def _deserialize_entity(self, context, entity): return entity def deserialize_entity(self, context, entity): entity = self._deserialize_entity(context, entity) if self._base: entity = self._base.deserialize_entity(context, entity) return entity def _serialize_context(self, context): return context def serialize_context(self, context): if self._base: context = self._base.serialize_context(context) return self._serialize_context(context) def _deserialize_context(self, context): return context def deserialize_context(self, context): context = self._deserialize_context(context) if self._base: context = self._base.deserialize_context(context) return context class TroveRequestContextSerializer(TroveSerializer): def _serialize_context(self, context): _context = context.to_dict() prof = profiler.get() if prof: trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def _deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: profiler.init(**trace_info) return TroveContext.from_dict(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/rpc/service.py0000644000175000017500000000624200000000000021400 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import inspect import os from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import loopingcall from oslo_service import service from oslo_utils import importutils from osprofiler import profiler from trove.common import cfg from trove.common import profile from trove.common.rpc import secure_serializer as ssz from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class RpcService(service.Service): def __init__(self, key, host=None, binary=None, topic=None, manager=None, rpc_api_version=None, secure_serializer=ssz.SecureSerializer): super(RpcService, self).__init__() self.key = key self.host = host or CONF.host self.binary = binary or os.path.basename(inspect.stack()[-1][1]) self.topic = topic or self.binary.rpartition('trove-')[2] _manager = importutils.import_object(manager) self.manager_impl = profiler.trace_cls("rpc")(_manager) self.rpc_api_version = rpc_api_version or \ self.manager_impl.RPC_API_VERSION self.secure_serializer = secure_serializer profile.setup_profiler(self.binary, self.host) def start(self): LOG.debug("Creating RPC server for service %s", self.topic) target = messaging.Target(topic=self.topic, server=self.host, version=self.rpc_api_version) if not hasattr(self.manager_impl, 'target'): self.manager_impl.target = target endpoints = [self.manager_impl] self.rpcserver = rpc.get_server( target, endpoints, key=self.key, secure_serializer=self.secure_serializer) self.rpcserver.start() # TODO(hub-cap): Currently the context is none... do we _need_ it here? report_interval = CONF.report_interval if report_interval > 0: pulse = loopingcall.FixedIntervalLoopingCall( self.manager_impl.run_periodic_tasks, context=None) pulse.start(interval=report_interval, initial_delay=report_interval) pulse.wait() def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpcserver.stop() except Exception: LOG.info("Failed to stop RPC server before shutdown. ") pass super(RpcService, self).stop() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/rpc/version.py0000644000175000017500000000153000000000000021420 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # based on configured release version RPC_API_VERSION = "1.0" # API version history: # # 1.0 - Initial version. (We started keeping track at icehouse-3) # 1.1 - # 1.2 - ... VERSION_ALIASES = { 'icehouse': '1.0' } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/schemas/0000755000175000017500000000000000000000000020221 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/schemas/atom-link.rng0000644000175000017500000000700100000000000022622 0ustar00coreycorey00000000000000 1 [^:]* .+/.+ [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* xml:base xml:lang ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/schemas/atom.rng0000644000175000017500000003624200000000000021700 0ustar00coreycorey00000000000000 text html xhtml An atom:feed must have an atom:author unless all of its atom:entry children have an atom:author. An atom:entry must have at least one atom:link element with a rel attribute of 'alternate' or an atom:content. An atom:entry must have an atom:author if its feed does not. text html xhtml 1 [^:]* .+/.+ [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* .+@.+ xml:base xml:lang ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/schemas/v1.1/0000755000175000017500000000000000000000000020706 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/schemas/v1.1/limits.rng0000644000175000017500000000172300000000000022722 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/serializable_notification.py0000644000175000017500000000224300000000000024365 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.utils import import_class class SerializableNotification(object): @staticmethod def serialize(context, notification): serialized = notification.serialize(context) serialized['notification_classname'] = ( notification.__module__ + "." + type(notification).__name__) return serialized @staticmethod def deserialize(context, serialized): classname = serialized.pop('notification_classname') notification_class = import_class(classname) return notification_class(context, **serialized) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/server_group.py0000644000175000017500000000707300000000000021701 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import six from oslo_log import log as logging from trove.common.clients import create_nova_client LOG = logging.getLogger(__name__) class ServerGroup(object): @classmethod def load(cls, context, instance_id): client = create_nova_client(context) server_group = None expected_name = "locality_%s" % instance_id try: for sg in client.server_groups.list(): if sg.name == expected_name: server_group = sg except Exception: LOG.exception("Could not load server group for instance %s", instance_id) if not server_group: LOG.info('No server group found for instance %s', instance_id) return server_group @classmethod def create(cls, context, locality, name_suffix): client = create_nova_client(context) server_group_name = "%s_%s" % ('locality', name_suffix) server_group = client.server_groups.create( name=server_group_name, policies=[locality]) LOG.debug("Created '%(locality)s' server group called %(group_name)s " "(id: %(group_id)s).", {'locality': locality, 'group_name': server_group_name, 'group_id': server_group.id}) return server_group @classmethod def delete(cls, context, server_group, force=False): # Only delete the server group if we're the last member in it, or if # it has no members if server_group: if force or len(server_group.members) <= 1: LOG.info("Deleting server group %s", server_group.id) client = create_nova_client(context) client.server_groups.delete(server_group.id) else: LOG.debug("Skipping delete of server group %(id)s " "(members: %(members)s).", {'id': server_group.id, 'members': server_group.members}) @classmethod def convert_to_hint(cls, server_group, hints=None): if server_group: hints = hints or {} hints["group"] = server_group.id return hints @classmethod def build_scheduler_hint(cls, context, locality, name_suffix): scheduler_hint = None if locality: # Build the scheduler hint, but only if locality's a string if isinstance(locality, six.string_types): server_group = cls.create( context, locality, name_suffix) scheduler_hint = cls.convert_to_hint( server_group) else: # otherwise assume it's already in hint form (i.e. a dict) scheduler_hint = locality return scheduler_hint @classmethod def get_locality(cls, server_group): locality = None if server_group: locality = server_group.policies[0] return locality ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/strategies/0000755000175000017500000000000000000000000020750 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/__init__.py0000644000175000017500000000000000000000000023047 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/strategies/cluster/0000755000175000017500000000000000000000000022431 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/__init__.py0000644000175000017500000000000000000000000024530 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/base.py0000644000175000017500000000256000000000000023720 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class BaseAPIStrategy(object): @property def cluster_class(self): raise NotImplementedError() @property def cluster_controller_actions(self): raise NotImplementedError() @property def cluster_view_class(self): raise NotImplementedError() @property def mgmt_cluster_view_class(self): raise NotImplementedError() class BaseTaskManagerStrategy(object): @property def task_manager_api_class(self, context): raise NotImplementedError() @property def task_manager_cluster_tasks_class(self, context): raise NotImplementedError() class BaseGuestAgentStrategy(object): @property def guest_client_class(self): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/0000755000175000017500000000000000000000000025126 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/__init__.py0000644000175000017500000000000000000000000027225 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7401102 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/cassandra/0000755000175000017500000000000000000000000027065 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/cassandra/__init__.py0000644000175000017500000000000000000000000031164 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/cassandra/api.py0000644000175000017500000002057000000000000030214 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.cluster import models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import server_group as srv_grp from trove.common.strategies.cluster import base from trove.common.strategies.cluster.experimental.cassandra import taskmanager from trove.common import utils from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class CassandraAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return CassandraCluster @property def cluster_controller_actions(self): return { 'grow': self._action_grow_cluster, 'shrink': self._action_shrink_cluster } def _action_grow_cluster(self, cluster, body): nodes = body['grow'] instances = [] for node in nodes: instance = { 'flavor_id': utils.get_id_from_href(node['flavorRef']) } if 'name' in node: instance['name'] = node['name'] if 'volume' in node: instance['volume_size'] = int(node['volume']['size']) instances.append(instance) return cluster.grow(instances) def _action_shrink_cluster(self, cluster, body): nodes = body['shrink'] instance_ids = [node['id'] for node in nodes] return cluster.shrink(instance_ids) @property def cluster_view_class(self): return CassandraClusterView @property def mgmt_cluster_view_class(self): return CassandraMgmtClusterView class CassandraCluster(models.Cluster): DEFAULT_DATA_CENTER = "dc1" DEFAULT_RACK = "rack1" @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): LOG.debug("Processing a request for creating a new cluster.") # Updating Cluster Task. db_info = models.DBCluster.create( name=name, tenant_id=context.project_id, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL, configuration_id=configuration) cls._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances, extended_properties, locality, configuration) # Calling taskmanager to further proceed for cluster-configuration. task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return CassandraCluster(context, db_info, datastore, datastore_version) @classmethod def _create_cluster_instances( cls, context, cluster_id, cluster_name, datastore, datastore_version, instances, extended_properties, locality, configuration_id): LOG.debug("Processing a request for new cluster instances.") cassandra_conf = CONF.get(datastore_version.manager) eph_enabled = cassandra_conf.device_path vol_enabled = cassandra_conf.volume_support # Validate instance flavors. models.validate_instance_flavors(context, instances, vol_enabled, eph_enabled) # Compute the total volume allocation. req_volume_size = models.get_required_volume_size(instances, vol_enabled) # Check requirements against quota. num_new_instances = len(instances) deltas = {'instances': num_new_instances, 'volumes': req_volume_size} models.assert_homogeneous_cluster(instances) check_quotas(context.project_id, deltas) # Checking networks are same for the cluster models.validate_instance_nics(context, instances) # Creating member instances. num_instances = len( taskmanager.CassandraClusterTasks.find_cluster_node_ids(cluster_id) ) new_instances = [] for instance_idx, instance in enumerate(instances, num_instances + 1): instance_az = instance.get('availability_zone', None) member_config = {"id": cluster_id, "instance_type": "member", "dc": cls.DEFAULT_DATA_CENTER, "rack": instance_az or cls.DEFAULT_RACK} instance_name = instance.get('name') if not instance_name: instance_name = cls._build_instance_name( cluster_name, member_config['dc'], member_config['rack'], instance_idx) new_instance = inst_models.Instance.create( context, instance_name, instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance['volume_size'], None, nics=instance.get('nics', None), availability_zone=instance_az, configuration_id=configuration_id, cluster_config=member_config, volume_type=instance.get('volume_type', None), modules=instance.get('modules'), locality=locality, region_name=instance.get('region_name')) new_instances.append(new_instance) return new_instances @classmethod def _build_instance_name(cls, cluster_name, dc, rack, instance_idx): return "%s-member-%s-%s-%d" % (cluster_name, dc, rack, instance_idx) def grow(self, instances): LOG.debug("Processing a request for growing cluster: %s", self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) configuration_id = self.db_info.configuration_id new_instances = self._create_cluster_instances( context, db_info.id, db_info.name, datastore, datastore_version, instances, None, locality, configuration_id) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return CassandraCluster(context, db_info, datastore, datastore_version) def shrink(self, removal_ids): LOG.debug("Processing a request for shrinking cluster: %s", self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(context, datastore_version.manager).shrink_cluster( db_info.id, removal_ids) return CassandraCluster(context, db_info, datastore, datastore_version) def restart(self): self.rolling_restart() def upgrade(self, datastore_version): self.rolling_upgrade(datastore_version) def configuration_attach(self, configuration_id): self.rolling_configuration_update(configuration_id, apply_on_all=False) def configuration_detach(self): self.rolling_configuration_remove(apply_on_all=False) class CassandraClusterView(ClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) class CassandraMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/cassandra/guestagent.py0000644000175000017500000001114400000000000031606 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) class CassandraGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return CassandraGuestAgentAPI class CassandraGuestAgentAPI(guest_api.API): """Cluster Specific Datastore Guest API **** VERSION CONTROLLED API **** The methods in this class are subject to version control as coordinated by guestagent/api.py. Whenever a change is made to any API method in this class, add a version number and comment to the top of guestagent/api.py and use the version number as appropriate in this file """ def get_data_center(self): LOG.debug("Retrieving the data center for node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("get_data_center", self.agent_low_timeout, version=version) def get_rack(self): LOG.debug("Retrieving the rack for node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("get_rack", self.agent_low_timeout, version=version) def set_seeds(self, seeds): LOG.debug("Configuring the gossip seeds for node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("set_seeds", self.agent_low_timeout, version=version, seeds=seeds) def get_seeds(self): LOG.debug("Retrieving the gossip seeds for node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("get_seeds", self.agent_low_timeout, version=version) def set_auto_bootstrap(self, enabled): LOG.debug("Setting the auto-bootstrap to '%(enabled)s' " "for node: %(id)s", {'enabled': enabled, 'id': self.id}) version = guest_api.API.API_BASE_VERSION return self._call("set_auto_bootstrap", self.agent_low_timeout, version=version, enabled=enabled) def cluster_complete(self): LOG.debug("Sending a setup completion notification for node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("cluster_complete", self.agent_high_timeout, version=version) def node_cleanup_begin(self): LOG.debug("Signaling the node to prepare for cleanup: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("node_cleanup_begin", self.agent_low_timeout, version=version) def node_cleanup(self): LOG.debug("Running cleanup on node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._cast('node_cleanup', version=version) def node_decommission(self): LOG.debug("Decommission node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._cast("node_decommission", version=version) def cluster_secure(self, password): LOG.debug("Securing the cluster via node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call( "cluster_secure", self.agent_high_timeout, version=version, password=password) def get_admin_credentials(self): LOG.debug("Retrieving the admin credentials from node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("get_admin_credentials", self.agent_low_timeout, version=version) def store_admin_credentials(self, admin_credentials): LOG.debug("Storing the admin credentials on node: %s", self.id) version = guest_api.API.API_BASE_VERSION return self._call("store_admin_credentials", self.agent_low_timeout, version=version, admin_credentials=admin_credentials) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/cassandra/taskmanager.py0000644000175000017500000003613300000000000031742 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base from trove.common import utils from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance import tasks as inst_tasks from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class CassandraTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return CassandraTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return CassandraClusterTasks class CassandraClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s.", cluster_id) def _create_cluster(): cluster_node_ids = self.find_cluster_node_ids(cluster_id) # Wait for cluster nodes to get to cluster-ready status. LOG.debug("Waiting for all nodes to become ready.") if not self._all_instances_ready(cluster_node_ids, cluster_id): return cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) LOG.debug("All nodes ready, proceeding with cluster setup.") seeds = self.choose_seed_nodes(cluster_nodes) # Configure each cluster node with the list of seeds. # Once all nodes are configured, start the seed nodes one at a time # followed by the rest of the nodes. try: LOG.debug("Selected seed nodes: %s", seeds) for node in cluster_nodes: LOG.debug("Configuring node: %s.", node['id']) node['guest'].set_seeds(seeds) node['guest'].set_auto_bootstrap(False) LOG.debug("Starting seed nodes.") for node in cluster_nodes: if node['ip'] in seeds: node['guest'].restart() node['guest'].set_auto_bootstrap(True) LOG.debug("All seeds running, starting remaining nodes.") for node in cluster_nodes: if node['ip'] not in seeds: node['guest'].restart() node['guest'].set_auto_bootstrap(True) # Create the in-database user via the first node. The remaining # nodes will replicate in-database changes automatically. # Only update the local authentication file on the other nodes. LOG.debug("Securing the cluster.") key = utils.generate_random_password() admin_creds = None for node in cluster_nodes: if admin_creds is None: admin_creds = node['guest'].cluster_secure(key) else: node['guest'].store_admin_credentials(admin_creds) node['guest'].cluster_complete() LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception("Error creating cluster.") self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for building cluster.") self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s.", cluster_id) @classmethod def find_cluster_node_ids(cls, cluster_id): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() return [db_instance.id for db_instance in db_instances] @classmethod def load_cluster_nodes(cls, context, node_ids): return [cls.build_node_info(Instance.load(context, node_id)) for node_id in node_ids] @classmethod def build_node_info(cls, instance): guest = cls.get_guest(instance) return {'instance': instance, 'guest': guest, 'id': instance.id, 'ip': cls.get_ip(instance), 'dc': guest.get_data_center(), 'rack': guest.get_rack()} @classmethod def choose_seed_nodes(cls, node_info): """Select gossip seeds. The seeds are cluster nodes from which any new/other cluster nodes request information on the cluster geometry. They should include at least one node from each data center and rack. Gossip optimization is not critical, but it is recommended to use a small seed list. Select one (random) node from each dc and rack. :param node_info: List of cluster nodes. :type node_info: list of dicts """ ips_by_affinity = cls._group_by_affinity(node_info) return {ips_by_affinity[dc][rack][0] for dc in ips_by_affinity for rack in ips_by_affinity[dc]} @classmethod def _group_by_affinity(cls, node_info): """Group node IPs by affinity to data center and rack.""" ips_by_affinity = dict() for node in node_info: ip = node['ip'] dc = node['dc'] rack = node['rack'] if dc in ips_by_affinity: dc_nodes = ips_by_affinity[dc] if rack in dc_nodes: rack_nodes = dc_nodes[rack] rack_nodes.append(ip) else: dc_nodes.update({rack: [ip]}) else: ips_by_affinity.update({dc: {rack: [ip]}}) return ips_by_affinity def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s.", cluster_id) def _grow_cluster(): # Wait for new nodes to get to cluster-ready status. LOG.debug("Waiting for new nodes to become ready.") if not self._all_instances_ready(new_instance_ids, cluster_id): return new_instances = [Instance.load(context, instance_id) for instance_id in new_instance_ids] added_nodes = [self.build_node_info(instance) for instance in new_instances] LOG.debug("All nodes ready, proceeding with cluster setup.") cluster_node_ids = self.find_cluster_node_ids(cluster_id) cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) old_nodes = [node for node in cluster_nodes if node['id'] not in new_instance_ids] try: # All nodes should have the same seeds and credentials. # Retrieve the information from the first node. test_node = old_nodes[0] current_seeds = test_node['guest'].get_seeds() admin_creds = test_node['guest'].get_admin_credentials() # Bootstrap new nodes. # Seed nodes do not bootstrap. Current running nodes # must be used as seeds during the process. # Since we are adding to an existing cluster, ensure that the # new nodes have auto-bootstrapping enabled. # Start the added nodes. LOG.debug("Starting new nodes.") for node in added_nodes: node['guest'].set_auto_bootstrap(True) node['guest'].set_seeds(current_seeds) node['guest'].store_admin_credentials(admin_creds) node['guest'].restart() node['guest'].cluster_complete() # Recompute the seed nodes based on the updated cluster # geometry. seeds = self.choose_seed_nodes(cluster_nodes) # Configure each cluster node with the updated list of seeds. LOG.debug("Updating all nodes with new seeds: %s", seeds) for node in cluster_nodes: node['guest'].set_seeds(seeds) # Run nodetool cleanup on each of the previously existing nodes # to remove the keys that no longer belong to those nodes. # Wait for cleanup to complete on one node before running # it on the next node. LOG.debug("Cleaning up orphan data on old cluster nodes.") for node in old_nodes: nid = node['id'] node['guest'].node_cleanup_begin() node['guest'].node_cleanup() LOG.debug("Waiting for node to finish its " "cleanup: %s", nid) if not self._all_instances_running([nid], cluster_id): LOG.warning("Node did not complete cleanup " "successfully: %s", nid) LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception("Error growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s.", cluster_id) def shrink_cluster(self, context, cluster_id, removal_ids): LOG.debug("Begin shrink_cluster for id: %s.", cluster_id) def _shrink_cluster(): cluster_node_ids = self.find_cluster_node_ids(cluster_id) cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) removed_nodes = CassandraClusterTasks.load_cluster_nodes( context, removal_ids) LOG.debug("All nodes ready, proceeding with cluster setup.") # Update the list of seeds on remaining nodes if necessary. # Once all nodes are configured, decommission the removed nodes. # Cassandra will stream data from decommissioned nodes to the # remaining ones. try: current_seeds = self._get_current_seeds(context, cluster_id) # The seeds will have to be updated on all remaining instances # if any of the seed nodes is going to be removed. update_seeds = any(node['ip'] in current_seeds for node in removed_nodes) LOG.debug("Decommissioning removed nodes.") for node in removed_nodes: node['guest'].node_decommission() node['instance'].update_db(cluster_id=None) # Recompute the seed nodes based on the updated cluster # geometry if any of the existing seed nodes was removed. if update_seeds: LOG.debug("Updating seeds on the remaining nodes.") cluster_nodes = self.load_cluster_nodes( context, cluster_node_ids) remaining_nodes = [node for node in cluster_nodes if node['id'] not in removal_ids] seeds = self.choose_seed_nodes(remaining_nodes) LOG.debug("Selected seed nodes: %s", seeds) for node in remaining_nodes: LOG.debug("Configuring node: %s.", node['id']) node['guest'].set_seeds(seeds) # Wait for the removed nodes to go SHUTDOWN. LOG.debug("Waiting for all decommissioned nodes to shutdown.") if not self._all_instances_shutdown(removal_ids, cluster_id): # Now detached, failed nodes will stay available # in the list of standalone instances. return # Delete decommissioned instances only when the cluster is in a # consistent state. LOG.debug("Deleting decommissioned instances.") for node in removed_nodes: Instance.delete(node['instance']) LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception("Error shrinking cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for shrinking cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) finally: timeout.cancel() LOG.debug("End shrink_cluster for id: %s.", cluster_id) def restart_cluster(self, context, cluster_id): self.rolling_restart_cluster( context, cluster_id, delay_sec=CONF.cassandra.node_sync_time) def upgrade_cluster(self, context, cluster_id, datastore_version): current_seeds = self._get_current_seeds(context, cluster_id) def ordering_function(instance): if self.get_ip(instance) in current_seeds: return -1 return 0 self.rolling_upgrade_cluster(context, cluster_id, datastore_version, ordering_function) def _get_current_seeds(self, context, cluster_id): # All nodes should have the same seeds. # We retrieve current seeds from the first node. cluster_node_ids = self.find_cluster_node_ids(cluster_id) test_node = self.load_cluster_nodes(context, cluster_node_ids[:1])[0] return test_node['guest'].get_seeds() class CassandraTaskManagerAPI(task_api.API): pass ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/galera_common/0000755000175000017500000000000000000000000027731 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/galera_common/__init__.py0000644000175000017500000000000000000000000032030 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/galera_common/api.py0000644000175000017500000002054100000000000031056 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Copyright 2016 Tesora Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging import time from trove.cluster import models as cluster_models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import exception from trove.common import server_group as srv_grp from trove.common.strategies.cluster import base as cluster_base from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class GaleraCommonAPIStrategy(cluster_base.BaseAPIStrategy): @property def cluster_class(self): return GaleraCommonCluster @property def cluster_view_class(self): return GaleraCommonClusterView @property def mgmt_cluster_view_class(self): return GaleraCommonMgmtClusterView class GaleraCommonCluster(cluster_models.Cluster): @staticmethod def _validate_cluster_instances(context, instances, datastore, datastore_version): """Validate the flavor and volume""" ds_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Checking volumes and get delta for quota check cluster_models.validate_instance_flavors( context, instances, ds_conf.volume_support, ds_conf.device_path) req_volume_size = cluster_models.get_required_volume_size( instances, ds_conf.volume_support) cluster_models.assert_homogeneous_cluster(instances) deltas = {'instances': num_instances, 'volumes': req_volume_size} # quota check check_quotas(context.project_id, deltas) # Checking networks are same for the cluster cluster_models.validate_instance_nics(context, instances) @staticmethod def _create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality, configuration_id): member_config = {"id": db_info.id, "instance_type": "member"} name_index = int(time.time()) for instance in instances: if not instance.get("name"): instance['name'] = "%s-member-%s" % (db_info.name, str(name_index)) name_index += 1 return [Instance.create(context, instance['name'], instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance.get('volume_size', None), None, availability_zone=instance.get( 'availability_zone', None), nics=instance.get('nics', None), configuration_id=configuration_id, cluster_config=member_config, volume_type=instance.get( 'volume_type', None), modules=instance.get('modules'), locality=locality, region_name=instance.get('region_name') ) for instance in instances] @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): LOG.debug("Initiating Galera cluster creation.") ds_conf = CONF.get(datastore_version.manager) # Check number of instances is at least min_cluster_member_count if len(instances) < ds_conf.min_cluster_member_count: raise exception.ClusterNumInstancesNotLargeEnough( num_instances=ds_conf.min_cluster_member_count) cls._validate_cluster_instances(context, instances, datastore, datastore_version) # Updating Cluster Task db_info = cluster_models.DBCluster.create( name=name, tenant_id=context.project_id, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL, configuration_id=configuration) cls._create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality, configuration) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return cls(context, db_info, datastore, datastore_version) def grow(self, instances): LOG.debug("Growing cluster %s.", self.id) self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version self._validate_cluster_instances(context, instances, datastore, datastore_version) db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) try: locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) configuration_id = self.db_info.configuration_id new_instances = self._create_instances( context, db_info, datastore, datastore_version, instances, None, locality, configuration_id) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) except Exception: db_info.update(task_status=ClusterTasks.NONE) raise return self.__class__(context, db_info, datastore, datastore_version) def shrink(self, instances): """Removes instances from a cluster.""" LOG.debug("Shrinking cluster %s.", self.id) self.validate_cluster_available() removal_instances = [Instance.load(self.context, inst_id) for inst_id in instances] db_instances = DBInstance.find_all( cluster_id=self.db_info.id, deleted=False).all() if len(db_instances) - len(removal_instances) < 1: raise exception.ClusterShrinkMustNotLeaveClusterEmpty() self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) try: task_api.load(self.context, self.ds_version.manager ).shrink_cluster(self.db_info.id, [instance.id for instance in removal_instances]) except Exception: self.db_info.update(task_status=ClusterTasks.NONE) raise return self.__class__(self.context, self.db_info, self.ds, self.ds_version) def restart(self): self.rolling_restart() def upgrade(self, datastore_version): self.rolling_upgrade(datastore_version) def configuration_attach(self, configuration_id): self.rolling_configuration_update(configuration_id) def configuration_detach(self): self.rolling_configuration_remove() class GaleraCommonClusterView(ClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) class GaleraCommonMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/galera_common/guestagent.py0000644000175000017500000000645700000000000032465 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Copyright 2016 Tesora Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base as cluster_base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class GaleraCommonGuestAgentStrategy(cluster_base.BaseGuestAgentStrategy): @property def guest_client_class(self): return GaleraCommonGuestAgentAPI class GaleraCommonGuestAgentAPI(guest_api.API): """Cluster Specific Datastore Guest API **** VERSION CONTROLLED API **** The methods in this class are subject to version control as coordinated by guestagent/api.py. Whenever a change is made to any API method in this class, add a version number and comment to the top of guestagent/api.py and use the version number as appropriate in this file """ def install_cluster(self, replication_user, cluster_configuration, bootstrap): """Install the cluster.""" LOG.debug("Installing Galera cluster.") version = guest_api.API.API_BASE_VERSION self._call("install_cluster", CONF.cluster_usage_timeout, version=version, replication_user=replication_user, cluster_configuration=cluster_configuration, bootstrap=bootstrap) def reset_admin_password(self, admin_password): """Store this password on the instance as the admin password.""" version = guest_api.API.API_BASE_VERSION self._call("reset_admin_password", CONF.cluster_usage_timeout, version=version, admin_password=admin_password) def cluster_complete(self): """Set the status that the cluster is build is complete.""" LOG.debug("Notifying cluster install completion.") version = guest_api.API.API_BASE_VERSION return self._call("cluster_complete", self.agent_high_timeout, version=version) def get_cluster_context(self): """Get the context of the cluster.""" LOG.debug("Getting the cluster context.") version = guest_api.API.API_BASE_VERSION return self._call("get_cluster_context", self.agent_high_timeout, version=version) def write_cluster_configuration_overrides(self, cluster_configuration): """Write an updated the cluster configuration.""" LOG.debug("Writing an updated the cluster configuration.") version = guest_api.API.API_BASE_VERSION self._call("write_cluster_configuration_overrides", self.agent_high_timeout, version=version, cluster_configuration=cluster_configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py0000644000175000017500000003430100000000000032601 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Copyright 2016 Tesora Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.clients import create_nova_client from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common.i18n import _ from trove.common.strategies.cluster import base as cluster_base from trove.common.template import ClusterConfigTemplate from trove.common import utils from trove.extensions.common import models as ext_models from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance import tasks as inst_tasks from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class GaleraCommonTaskManagerStrategy(cluster_base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return task_api.API @property def task_manager_cluster_tasks_class(self): return GaleraCommonClusterTasks class GaleraCommonClusterTasks(task_models.ClusterTasks): CLUSTER_REPLICATION_USER = "clusterrepuser" def _render_cluster_config(self, context, instance, cluster_ips, cluster_name, replication_user): client = create_nova_client(context) flavor = client.flavors.get(instance.flavor_id) instance_ip = self.get_ip(instance) config = ClusterConfigTemplate( self.datastore_version, flavor, instance.id) replication_user_pass = "%(name)s:%(password)s" % replication_user config_rendered = config.render( replication_user_pass=replication_user_pass, cluster_ips=cluster_ips, cluster_name=cluster_name, instance_ip=instance_ip, instance_name=instance.name, ) return config_rendered def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s.", cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("Waiting for instances to get to cluster-ready status.") # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): raise TroveError(_("Instances in cluster did not report " "ACTIVE")) LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] cluster_ips = [self.get_ip(instance) for instance in instances] instance_guests = [] # Create replication user and password for synchronizing the # galera cluster replication_user = { "name": self.CLUSTER_REPLICATION_USER, "password": utils.generate_random_password(), } # Galera cluster name must be unique and be shorter than a full # uuid string so we remove the hyphens and chop it off. It was # recommended to be 16 chars or less. # (this is not currently documented on Galera docs) cluster_name = utils.generate_uuid().replace("-", "")[:16] LOG.debug("Configuring cluster configuration.") try: # Set the admin password for all the instances because the # password in the my.cnf will be wrong after the joiner # instances syncs with the donor instance. admin_password = str(utils.generate_random_password()) bootstrap = True for instance in instances: guest = self.get_guest(instance) instance_guests.append(guest) guest.reset_admin_password(admin_password) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(cluster_ips), cluster_name, replication_user) # push the cluster config and bootstrap the first instance guest.install_cluster(replication_user, cluster_configuration, bootstrap) bootstrap = False LOG.debug("Finalizing cluster configuration.") for guest in instance_guests: guest.cluster_complete() except Exception: LOG.exception("Error creating cluster.") self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for building cluster.") self.update_statuses_on_failure(cluster_id) except TroveError: LOG.exception("Error creating cluster %s.", cluster_id) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s.", cluster_id) def _check_cluster_for_root(self, context, existing_instances, new_instances): """Check for existing instances root enabled""" for instance in existing_instances: if ext_models.Root.load(context, instance.id): for new_instance in new_instances: ext_models.RootHistory.create(context, new_instance.id) return def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin Galera grow_cluster for id: %s.", cluster_id) def _grow_cluster(): db_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() existing_instances = [Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in new_instance_ids] if not existing_instances: raise TroveError(_("Unable to determine existing cluster " "member(s)")) # get list of ips of existing cluster members existing_cluster_ips = [self.get_ip(instance) for instance in existing_instances] existing_instance_guests = [self.get_guest(instance) for instance in existing_instances] # get the cluster context to setup new members cluster_context = existing_instance_guests[0].get_cluster_context() # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): raise TroveError(_("Instances in cluster did not report " "ACTIVE")) LOG.debug("All members ready, proceeding for cluster setup.") # Get the new instances to join the cluster new_instances = [Instance.load(context, instance_id) for instance_id in new_instance_ids] new_cluster_ips = [self.get_ip(instance) for instance in new_instances] for instance in new_instances: guest = self.get_guest(instance) guest.reset_admin_password(cluster_context['admin_password']) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(existing_cluster_ips), cluster_context['cluster_name'], cluster_context['replication_user']) # push the cluster config and bootstrap the first instance bootstrap = False guest.install_cluster(cluster_context['replication_user'], cluster_configuration, bootstrap) self._check_cluster_for_root(context, existing_instances, new_instances) # apply the new config to all instances for instance in existing_instances + new_instances: guest = self.get_guest(instance) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(existing_cluster_ips + new_cluster_ips), cluster_context['cluster_name'], cluster_context['replication_user']) guest.write_cluster_configuration_overrides( cluster_configuration) for instance in new_instances: guest = self.get_guest(instance) guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) except Exception: LOG.exception("Error growing cluster %s.", cluster_id) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s.", cluster_id) def shrink_cluster(self, context, cluster_id, removal_instance_ids): LOG.debug("Begin Galera shrink_cluster for id: %s.", cluster_id) def _shrink_cluster(): removal_instances = [Instance.load(context, instance_id) for instance_id in removal_instance_ids] for instance in removal_instances: Instance.delete(instance) # wait for instances to be deleted def all_instances_marked_deleted(): non_deleted_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() non_deleted_ids = [db_instance.id for db_instance in non_deleted_instances] return not bool( set(removal_instance_ids).intersection( set(non_deleted_ids)) ) try: LOG.info("Deleting instances (%s)", removal_instance_ids) utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error("timeout for instances to be marked as deleted.") return db_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() leftover_instances = [Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in removal_instance_ids] leftover_cluster_ips = [self.get_ip(instance) for instance in leftover_instances] # Get config changes for left over instances rnd_cluster_guest = self.get_guest(leftover_instances[0]) cluster_context = rnd_cluster_guest.get_cluster_context() # apply the new config to all leftover instances for instance in leftover_instances: guest = self.get_guest(instance) # render the conf.d/cluster.cnf configuration cluster_configuration = self._render_cluster_config( context, instance, ",".join(leftover_cluster_ips), cluster_context['cluster_name'], cluster_context['replication_user']) guest.write_cluster_configuration_overrides( cluster_configuration) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for shrinking cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) except Exception: LOG.exception("Error shrinking cluster %s.", cluster_id) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) finally: timeout.cancel() LOG.debug("End shrink_cluster for id: %s.", cluster_id) def restart_cluster(self, context, cluster_id): self.rolling_restart_cluster(context, cluster_id) def upgrade_cluster(self, context, cluster_id, datastore_version): self.rolling_upgrade_cluster(context, cluster_id, datastore_version) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/mongodb/0000755000175000017500000000000000000000000026553 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/mongodb/__init__.py0000644000175000017500000000000000000000000030652 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/mongodb/api.py0000644000175000017500000007275700000000000027720 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.cluster import models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import clients from trove.common import exception from trove.common.i18n import _ from trove.common.notification import DBaaSClusterGrow from trove.common.notification import StartNotification from trove.common import server_group as srv_grp from trove.common.strategies.cluster import base from trove.common import utils from trove.datastore import models as datastore_models from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class MongoDbAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return MongoDbCluster @property def cluster_view_class(self): return MongoDbClusterView @property def mgmt_cluster_view_class(self): return MongoDbMgmtClusterView class MongoDbCluster(models.Cluster): @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): if configuration: raise exception.ConfigurationNotSupported() # TODO(amcreynolds): consider moving into CONF and even supporting # TODO(amcreynolds): an array of values, e.g. [3, 5, 7] # TODO(amcreynolds): or introduce a min/max num_instances and set # TODO(amcreynolds): both to 3 num_instances = len(instances) if num_instances != 3: raise exception.ClusterNumInstancesNotSupported(num_instances=3) mongo_conf = CONF.get(datastore_version.manager) num_configsvr = int(extended_properties.get( 'num_configsvr', mongo_conf.num_config_servers_per_cluster)) num_mongos = int(extended_properties.get( 'num_mongos', mongo_conf.num_query_routers_per_cluster)) delta_instances = num_instances + num_configsvr + num_mongos models.validate_instance_flavors( context, instances, mongo_conf.volume_support, mongo_conf.device_path) models.assert_homogeneous_cluster(instances) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) volume_type = instances[0].get('volume_type', None) configsvr_vsize = int(extended_properties.get( 'configsvr_volume_size', mongo_conf.config_servers_volume_size)) configsvr_vtype = extended_properties.get('configsvr_volume_type', volume_type) mongos_vsize = int(extended_properties.get( 'mongos_volume_size', mongo_conf.query_routers_volume_size)) mongos_vtype = extended_properties.get('mongos_volume_type', volume_type) all_instances = (instances + [{'volume_size': configsvr_vsize}] * num_configsvr + [{'volume_size': mongos_vsize}] * num_mongos) req_volume_size = models.get_required_volume_size( all_instances, mongo_conf.volume_support) deltas = {'instances': delta_instances, 'volumes': req_volume_size} check_quotas(context.project_id, deltas) # Checking networks are same for the cluster models.validate_instance_nics(context, instances) nics = instances[0].get('nics', None) azs = [instance.get('availability_zone', None) for instance in instances] regions = [instance.get('region_name', None) for instance in instances] db_info = models.DBCluster.create( name=name, tenant_id=context.project_id, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) replica_set_name = "rs1" member_config = {"id": db_info.id, "shard_id": utils.generate_uuid(), "instance_type": "member", "replica_set_name": replica_set_name} configsvr_config = {"id": db_info.id, "instance_type": "config_server"} mongos_config = {"id": db_info.id, "instance_type": "query_router"} if mongo_conf.cluster_secure: cluster_key = utils.generate_random_password() member_config['key'] = cluster_key configsvr_config['key'] = cluster_key mongos_config['key'] = cluster_key for i in range(0, num_instances): instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, availability_zone=azs[i], nics=nics, configuration_id=None, cluster_config=member_config, volume_type=volume_type, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) for i in range(1, num_configsvr + 1): instance_name = "%s-%s-%s" % (name, "configsvr", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, configsvr_vsize, None, availability_zone=None, nics=nics, configuration_id=None, cluster_config=configsvr_config, volume_type=configsvr_vtype, locality=locality, region_name=regions[i % num_instances] ) for i in range(1, num_mongos + 1): instance_name = "%s-%s-%s" % (name, "mongos", str(i)) inst_models.Instance.create(context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, mongos_vsize, None, availability_zone=None, nics=nics, configuration_id=None, cluster_config=mongos_config, volume_type=mongos_vtype, locality=locality, region_name=regions[i % num_instances] ) task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return MongoDbCluster(context, db_info, datastore, datastore_version) def _parse_grow_item(self, item): used_keys = [] def _check_option(key, required=False, valid_values=None): if required and key not in item: raise exception.TroveError( _('An instance with the options %(given)s is missing ' 'the MongoDB required option %(expected)s.') % {'given': item.keys(), 'expected': key} ) value = item.get(key, None) if valid_values and value not in valid_values: raise exception.TroveError( _('The value %(value)s for key %(key)s is invalid. ' 'Allowed values are %(valid)s.') % {'value': value, 'key': key, 'valid': valid_values} ) used_keys.append(key) return value flavor_id = utils.get_id_from_href(_check_option('flavorRef', required=True)) volume_size = int(_check_option('volume', required=True)['size']) instance_type = _check_option('type', required=True, valid_values=[u'replica', u'query_router']) name = _check_option('name') related_to = _check_option('related_to') nics = _check_option('nics') availability_zone = _check_option('availability_zone') unused_keys = list(set(item.keys()).difference(set(used_keys))) if unused_keys: raise exception.TroveError( _('The arguments %s are not supported by MongoDB.') % unused_keys ) instance = {'flavor_id': flavor_id, 'volume_size': volume_size, 'instance_type': instance_type} if name: instance['name'] = name if related_to: instance['related_to'] = related_to if nics: instance['nics'] = nics if availability_zone: instance['availability_zone'] = availability_zone return instance def action(self, context, req, action, param): if action == 'grow': context.notification = DBaaSClusterGrow(context, request=req) with StartNotification(context, cluster_id=self.id): return self.grow([self._parse_grow_item(item) for item in param]) elif action == 'add_shard': context.notification = DBaaSClusterGrow(context, request=req) with StartNotification(context, cluster_id=self.id): return self.add_shard() else: super(MongoDbCluster, self).action(context, req, action, param) def add_shard(self): if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name log_fmt = ("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") exc_fmt = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") LOG.error(log_fmt, current_task) raise exception.UnprocessableEntity(exc_fmt % current_task) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False, type='member').all() num_unique_shards = len(set([db_inst.shard_id for db_inst in db_insts])) if num_unique_shards == 0: LOG.error("This action cannot be performed on the cluster as no " "reference shard exists.") raise exception.UnprocessableEntity( _("This action cannot be performed on the cluster as no " "reference shard exists.")) arbitrary_shard_id = db_insts[0].shard_id members_in_shard = [db_inst for db_inst in db_insts if db_inst.shard_id == arbitrary_shard_id] num_members_per_shard = len(members_in_shard) a_member = inst_models.load_any_instance(self.context, members_in_shard[0].id) deltas = {'instances': num_members_per_shard} volume_size = a_member.volume_size if volume_size: deltas['volumes'] = volume_size * num_members_per_shard check_quotas(self.context.project_id, deltas) new_replica_set_name = "rs" + str(num_unique_shards + 1) new_shard_id = utils.generate_uuid() dsv_manager = (datastore_models.DatastoreVersion. load_by_uuid(db_insts[0].datastore_version_id).manager) manager = task_api.load(self.context, dsv_manager) key = manager.get_key(a_member) member_config = {"id": self.id, "shard_id": new_shard_id, "instance_type": "member", "replica_set_name": new_replica_set_name, "key": key} locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) for i in range(1, num_members_per_shard + 1): instance_name = "%s-%s-%s" % (self.name, new_replica_set_name, str(i)) inst_models.Instance.create(self.context, instance_name, a_member.flavor_id, a_member.datastore_version.image_id, [], [], a_member.datastore, a_member.datastore_version, volume_size, None, availability_zone=None, nics=None, configuration_id=None, cluster_config=member_config, locality=locality) self.update_db(task_status=ClusterTasks.ADDING_SHARD) manager.mongodb_add_shard_cluster( self.id, new_shard_id, new_replica_set_name) def grow(self, instances): """Extend a cluster by adding new instances. Currently only supports adding a replica set to the cluster. """ if not len(instances) > 0: raise exception.TroveError( _('No instances specified for grow operation.') ) self._prep_resize() self._check_quotas(self.context, instances) query_routers, shards = self._group_instances(instances) for shard in shards: self._check_instances( self.context, shard, self.datastore_version, allowed_instance_count=[3] ) if query_routers: self._check_instances(self.context, query_routers, self.datastore_version) # all checks are done before any instances are created locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) instance_ids = [] for shard in shards: instance_ids.extend(self._create_shard_instances(shard, locality)) if query_routers: instance_ids.extend( self._create_query_router_instances(query_routers, locality) ) self.update_db(task_status=ClusterTasks.GROWING_CLUSTER) self.manager.grow_cluster(self.id, instance_ids) def shrink(self, instance_ids): """Removes instances from a cluster. Currently only supports removing entire replica sets from the cluster. """ if not len(instance_ids) > 0: raise exception.TroveError( _('No instances specified for shrink operation.') ) self._prep_resize() all_member_ids = set([member.id for member in self.members]) all_query_router_ids = set([query_router.id for query_router in self.query_routers]) target_ids = set(instance_ids) target_member_ids = target_ids.intersection(all_member_ids) target_query_router_ids = target_ids.intersection(all_query_router_ids) target_configsvr_ids = target_ids.difference( target_member_ids.union(target_query_router_ids) ) if target_configsvr_ids: raise exception.ClusterShrinkInstanceInUse( id=list(target_configsvr_ids), reason="Cannot remove config servers." ) remaining_query_router_ids = all_query_router_ids.difference( target_query_router_ids ) if len(remaining_query_router_ids) < 1: raise exception.ClusterShrinkInstanceInUse( id=list(target_query_router_ids), reason="Cannot remove all remaining query routers. At least " "one query router must be available in the cluster." ) if target_member_ids: target_members = [member for member in self.members if member.id in target_member_ids] target_shards = {} for member in target_members: if member.shard_id in target_shards: target_shards[member.shard_id].append(member.id) else: target_shards[member.shard_id] = [member.id] for target_shard_id in target_shards.keys(): # check the whole shard is being deleted target_shard_member_ids = [ member.id for member in target_members if member.shard_id == target_shard_id ] all_shard_member_ids = [ member.id for member in self.members if member.shard_id == target_shard_id ] if set(target_shard_member_ids) != set(all_shard_member_ids): raise exception.TroveError( _('MongoDB cluster shrink only supports removing an ' 'entire shard. Shard %(shard)s has members: ' '%(instances)s') % {'shard': target_shard_id, 'instances': all_shard_member_ids} ) self._check_shard_status(target_shard_member_ids[0]) # all checks are done by now self.update_db(task_status=ClusterTasks.SHRINKING_CLUSTER) for instance_id in instance_ids: instance = inst_models.load_any_instance(self.context, instance_id) instance.delete() self.manager.shrink_cluster(self.id, instance_ids) def _create_instances(self, instances, cluster_config, default_name_tag, locality, key=None): """Loop through the instances and create them in this cluster.""" cluster_config['id'] = self.id if CONF.get(self.datastore_version.manager).cluster_secure: if not key: key = self.get_guest(self.arbitrary_query_router).get_key() cluster_config['key'] = key instance_ids = [] for i, instance in enumerate(instances): name = instance.get('name', '%s-%s-%s' % ( self.name, default_name_tag, i + 1)) new_instance = inst_models.Instance.create( self.context, name, instance['flavor_id'], self.datastore_version.image_id, [], [], self.datastore, self.datastore_version, instance['volume_size'], None, availability_zone=instance.get('availability_zone', None), nics=instance.get('nics', None), cluster_config=cluster_config, locality=locality ) instance_ids.append(new_instance.id) return instance_ids def _create_shard_instances(self, instances, locality, replica_set_name=None, key=None): """Create the instances for a new shard in the cluster.""" shard_id = utils.generate_uuid() if not replica_set_name: replica_set_name = self._gen_replica_set_name() cluster_config = {'shard_id': shard_id, 'instance_type': 'member', 'replica_set_name': replica_set_name} return self._create_instances(instances, cluster_config, replica_set_name, locality, key=key) def _create_query_router_instances(self, instances, locality, key=None): """Create the instances for the new query router.""" cluster_config = {'instance_type': 'query_router'} return self._create_instances(instances, cluster_config, 'mongos', locality, key=key) def _prep_resize(self): """Get information about the cluster's current state.""" if self.db_info.task_status != ClusterTasks.NONE: current_task = self.db_info.task_status.name log_fmt = ("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") exc_fmt = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") LOG.error(log_fmt, current_task) raise exception.UnprocessableEntity(exc_fmt % current_task) def _instances_of_type(instance_type): return [db_inst for db_inst in self.db_instances if db_inst.type == instance_type] self.config_svrs = _instances_of_type('config_server') self.query_routers = _instances_of_type('query_router') self.members = _instances_of_type('member') self.shard_ids = set([member.shard_id for member in self.members]) self.arbitrary_query_router = inst_models.load_any_instance( self.context, self.query_routers[0].id ) self.manager = task_api.load(self.context, self.datastore_version.manager) def _group_instances(self, instances): """Group the instances into logical sets (type, shard, etc).""" replicas = [] query_routers = [] for item in instances: if item['instance_type'] == 'replica': replica_requirements = ['name'] if not all(key in item for key in replica_requirements): raise exception.TroveError( _('Replica instance does not have required field(s) ' '%s.') % replica_requirements ) replicas.append(item) elif item['instance_type'] == 'query_router': query_routers.append(item) else: raise exception.TroveError( _('Instance type %s not supported for MongoDB cluster ' 'grow.') % item['instance_type'] ) return query_routers, self._group_shard_instances(replicas) def _group_shard_instances(self, instances): """Group the replica instances into shards.""" # Create the sets. Dictionary keys correspond to instance names. # Dictionary values are the same if related. sets = {} specified_names = [] for instance in instances: name = instance['name'] specified_names.append(name) if name in sets: sets[name].append(instance) else: sets[name] = [instance] if 'related_to' in instance: if instance['related_to'] == instance['name']: continue relative = instance['related_to'] if relative in sets: if sets[relative] is not sets[name]: sets[relative].extend(sets[name]) sets[name] = sets[relative] else: sets[relative] = sets[name] specified_names_set = set(specified_names) if len(specified_names) != len(specified_names_set): raise exception.TroveError( _('Duplicate member names not allowed.') ) unknown_relations = set(sets.keys()).difference((specified_names_set)) if unknown_relations: raise exception.TroveError( _('related_to target(s) %(targets)s do not match any ' 'specified names.') % {'targets': list(unknown_relations)} ) # reduce the set to unique values shards = [] for key in sets.keys(): exists = False for item in shards: if item is sets[key]: exists = True break if exists: continue shards.append(sets[key]) for shard in shards: flavor = None size = None for member in shard: if ((flavor and member['flavor_id'] != flavor) or ( size and member['volume_size'] != size)): raise exception.TroveError( _('Members of the same shard have mismatching ' 'flavorRef and/or volume values.') ) flavor = member['flavor_id'] size = member['volume_size'] return shards def _gen_replica_set_name(self): """Check the replica set names of all shards in the cluster to determine the next available name. Names are in the form 'rsX' where X is an integer. """ used_names = [] for shard_id in self.shard_ids: # query the guest for the replica name on one member of each shard members = [mem for mem in self.members if mem.shard_id == shard_id] member = inst_models.load_any_instance(self.context, members[0].id) used_names.append(self.get_guest(member).get_replica_set_name()) # find the first unused name i = 0 while True: i += 1 name = 'rs%s' % i if name not in used_names: return name def _check_shard_status(self, member_id): member = inst_models.load_any_instance(self.context, member_id) guest = self.get_guest(member) rs_name = guest.get_replica_set_name() if self.get_guest( self.arbitrary_query_router).is_shard_active(rs_name): raise exception.TroveError( _('Shard with instance %s is still active. Please remove the ' 'shard from the MongoDB cluster before shrinking.') % member_id ) @staticmethod def _check_quotas(context, instances): deltas = {'instances': len(instances), 'volumes': sum([instance['volume_size'] for instance in instances])} check_quotas(context.project_id, deltas) @staticmethod def _check_instances(context, instances, datastore_version, allowed_instance_count=None): instance_count = len(instances) if allowed_instance_count: if instance_count not in allowed_instance_count: raise exception.ClusterNumInstancesNotSupported( num_instances=allowed_instance_count ) flavor_ids = [instance['flavor_id'] for instance in instances] if len(set(flavor_ids)) != 1: raise exception.ClusterFlavorsNotEqual() flavor_id = flavor_ids[0] nova_client = clients.create_nova_client(context) try: flavor = nova_client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) mongo_conf = CONF.get(datastore_version.manager) volume_sizes = [instance['volume_size'] for instance in instances if instance.get('volume_size', None)] if mongo_conf.volume_support: if len(volume_sizes) != instance_count: raise exception.ClusterVolumeSizeRequired() if len(set(volume_sizes)) != 1: raise exception.ClusterVolumeSizesNotEqual() volume_size = volume_sizes[0] models.validate_volume_size(volume_size) else: # TODO(amcreynolds): is ephemeral possible for mongodb clusters? if len(volume_sizes) > 0: raise exception.VolumeNotSupported() ephemeral_support = mongo_conf.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) class MongoDbClusterView(ClusterView): def build_instances(self): return self._build_instances(['query_router'], ['member']) class MongoDbMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['query_router'], ['config_server', 'member', 'query_router']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/mongodb/guestagent.py0000644000175000017500000001211100000000000031267 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class MongoDbGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return MongoDbGuestAgentAPI class MongoDbGuestAgentAPI(guest_api.API): """Cluster Specific Datastore Guest API **** VERSION CONTROLLED API **** The methods in this class are subject to version control as coordinated by guestagent/api.py. Whenever a change is made to any API method in this class, add a version number and comment to the top of guestagent/api.py and use the version number as appropriate in this file """ def add_shard(self, replica_set_name, replica_set_member): LOG.debug("Adding shard with replSet %(replica_set_name)s and member " "%(replica_set_member)s for instance " "%(id)s", {'replica_set_name': replica_set_name, 'replica_set_member': replica_set_member, 'id': self.id}) version = guest_api.API.API_BASE_VERSION return self._call("add_shard", self.agent_high_timeout, version=version, replica_set_name=replica_set_name, replica_set_member=replica_set_member) def add_members(self, members): LOG.debug("Adding members %(members)s on instance %(id)s", { 'members': members, 'id': self.id}) version = guest_api.API.API_BASE_VERSION return self._call("add_members", CONF.mongodb.add_members_timeout, version=version, members=members) def add_config_servers(self, config_servers): LOG.debug("Adding config servers %(config_servers)s for instance " "%(id)s", {'config_servers': config_servers, 'id': self.id}) version = guest_api.API.API_BASE_VERSION return self._call("add_config_servers", self.agent_high_timeout, version=version, config_servers=config_servers) def cluster_complete(self): LOG.debug("Notify regarding cluster install completion") version = guest_api.API.API_BASE_VERSION return self._call("cluster_complete", self.agent_high_timeout, version=version) def get_key(self): LOG.debug("Requesting cluster key from guest") version = guest_api.API.API_BASE_VERSION return self._call("get_key", self.agent_low_timeout, version=version) def prep_primary(self): LOG.debug("Preparing member to be primary member.") version = guest_api.API.API_BASE_VERSION return self._call("prep_primary", self.agent_high_timeout, version=version) def create_admin_user(self, password): LOG.debug("Creating admin user") version = guest_api.API.API_BASE_VERSION return self._call("create_admin_user", self.agent_high_timeout, version=version, password=password) def store_admin_password(self, password): LOG.debug("Storing admin password") version = guest_api.API.API_BASE_VERSION return self._call("store_admin_password", self.agent_low_timeout, version=version, password=password) def get_replica_set_name(self): LOG.debug("Querying member for its replica set name") version = guest_api.API.API_BASE_VERSION return self._call("get_replica_set_name", self.agent_high_timeout, version=version) def get_admin_password(self): LOG.debug("Querying instance for its admin password") version = guest_api.API.API_BASE_VERSION return self._call("get_admin_password", self.agent_low_timeout, version=version) def is_shard_active(self, replica_set_name): LOG.debug("Checking if replica set %s is active", replica_set_name) version = guest_api.API.API_BASE_VERSION return self._call("is_shard_active", self.agent_high_timeout, version=version, replica_set_name=replica_set_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py0000644000175000017500000004213300000000000031425 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.exception import PollTimeOut from trove.common.instance import ServiceStatuses from trove.common.strategies.cluster import base from trove.common import utils from trove.instance import models from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance import tasks as inst_tasks from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class MongoDbTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return MongoDbTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return MongoDbClusterTasks @property def task_manager_manager_actions(self): return {'add_shard_cluster': self._manager_add_shard} def _manager_add_shard(self, context, cluster_id, shard_id, replica_set_name): cluster_tasks = task_models.ClusterTasks.load( context, cluster_id, MongoDbClusterTasks) cluster_tasks.add_shard_cluster(context, cluster_id, shard_id, replica_set_name) class MongoDbClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("begin create_cluster for id: %s", cluster_id) def _create_cluster(): # fetch instances by cluster_id against instances table db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("instances in cluster %(cluster_id)s: %(instance_ids)s", {'cluster_id': cluster_id, 'instance_ids': instance_ids}) if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("all instances in cluster %s ready.", cluster_id) instances = [Instance.load(context, instance_id) for instance_id in instance_ids] # filter query routers in instances into a new list: query_routers query_routers = [instance for instance in instances if instance.type == 'query_router'] LOG.debug("query routers: %s", [instance.id for instance in query_routers]) # filter config servers in instances into new list: config_servers config_servers = [instance for instance in instances if instance.type == 'config_server'] LOG.debug("config servers: %s", [instance.id for instance in config_servers]) # filter members (non router/configsvr) into a new list: members members = [instance for instance in instances if instance.type == 'member'] LOG.debug("members: %s", [instance.id for instance in members]) # for config_server in config_servers, append ip/hostname to # "config_server_hosts", then # peel off the replica-set name and ip/hostname from 'x' config_server_ips = [self.get_ip(instance) for instance in config_servers] LOG.debug("config server ips: %s", config_server_ips) if not self._add_query_routers(query_routers, config_server_ips): return if not self._create_shard(query_routers[0], members): return # call to start checking status for instance in instances: self.get_guest(instance).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("timeout for building cluster.") self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end create_cluster for id: %s", cluster_id) def add_shard_cluster(self, context, cluster_id, shard_id, replica_set_name): LOG.debug("begin add_shard_cluster for cluster %(cluster_id)s " "shard %(shard_id)s", {'cluster_id': cluster_id, 'shard_id': shard_id}) def _add_shard_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False, shard_id=shard_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("instances in shard %(shard_id)s: %(instance_ids)s", {'shard_id': shard_id, 'instance_ids': instance_ids}) if not self._all_instances_ready(instance_ids, cluster_id, shard_id): return members = [Instance.load(context, instance_id) for instance_id in instance_ids] db_query_routers = DBInstance.find_all(cluster_id=cluster_id, type='query_router', deleted=False).all() query_routers = [Instance.load(context, db_query_router.id) for db_query_router in db_query_routers] if not self._create_shard(query_routers[0], members): return for member in members: self.get_guest(member).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _add_shard_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("timeout for building shard.") self.update_statuses_on_failure(cluster_id, shard_id) finally: timeout.cancel() LOG.debug("end add_shard_cluster for cluster %(cluster_id)s " "shard %(shard_id)s", {'cluster_id': cluster_id, 'shard_id': shard_id}) def grow_cluster(self, context, cluster_id, instance_ids): LOG.debug("begin grow_cluster for MongoDB cluster %s", cluster_id) def _grow_cluster(): new_instances = [db_instance for db_instance in self.db_instances if db_instance.id in instance_ids] new_members = [db_instance for db_instance in new_instances if db_instance.type == 'member'] new_query_routers = [db_instance for db_instance in new_instances if db_instance.type == 'query_router'] instances = [] if new_members: shard_ids = set([db_instance.shard_id for db_instance in new_members]) query_router_id = self._get_running_query_router_id() if not query_router_id: return for shard_id in shard_ids: LOG.debug('growing cluster by adding shard %(shard_id)s ' 'on query router %(router_id)s', {'shard_id': shard_id, 'router_id': query_router_id}) member_ids = [db_instance.id for db_instance in new_members if db_instance.shard_id == shard_id] if not self._all_instances_ready( member_ids, cluster_id, shard_id ): return members = [Instance.load(context, member_id) for member_id in member_ids] query_router = Instance.load(context, query_router_id) if not self._create_shard(query_router, members): return instances.extend(members) if new_query_routers: query_router_ids = [db_instance.id for db_instance in new_query_routers] config_servers_ids = [db_instance.id for db_instance in self.db_instances if db_instance.type == 'config_server'] LOG.debug('growing cluster by adding query routers ' '%(router)s, with config servers %(server)s', {'router': query_router_ids, 'server': config_servers_ids}) if not self._all_instances_ready( query_router_ids, cluster_id ): return query_routers = [Instance.load(context, instance_id) for instance_id in query_router_ids] config_servers_ips = [ self.get_ip(Instance.load(context, config_server_id)) for config_server_id in config_servers_ids ] if not self._add_query_routers( query_routers, config_servers_ips, admin_password=self.get_cluster_admin_password(context) ): return instances.extend(query_routers) for instance in instances: self.get_guest(instance).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("timeout for growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() LOG.debug("end grow_cluster for MongoDB cluster %s", self.id) def shrink_cluster(self, context, cluster_id, instance_ids): LOG.debug("begin shrink_cluster for MongoDB cluster %s", cluster_id) def _shrink_cluster(): def all_instances_marked_deleted(): non_deleted_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() non_deleted_ids = [db_instance.id for db_instance in non_deleted_instances] return not bool( set(instance_ids).intersection(set(non_deleted_ids)) ) try: utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error("timeout for instances to be marked as deleted.") return cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("timeout for shrinking cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) finally: timeout.cancel() LOG.debug("end shrink_cluster for MongoDB cluster %s", self.id) def get_cluster_admin_password(self, context): """The cluster admin's user credentials are stored on all query routers. Find one and get the guest to return the password. """ instance = Instance.load(context, self._get_running_query_router_id()) return self.get_guest(instance).get_admin_password() def _init_replica_set(self, primary_member, other_members): """Initialize the replica set by calling the primary member guest's add_members. """ LOG.debug('initializing replica set on %s', primary_member.id) other_members_ips = [] try: for member in other_members: other_members_ips.append(self.get_ip(member)) self.get_guest(member).restart() self.get_guest(primary_member).prep_primary() self.get_guest(primary_member).add_members(other_members_ips) except Exception: LOG.exception("error initializing replica set") self.update_statuses_on_failure(self.id, shard_id=primary_member.shard_id) return False return True def _create_shard(self, query_router, members): """Create a replica set out of the given member instances and add it as a shard to the cluster. """ primary_member = members[0] other_members = members[1:] if not self._init_replica_set(primary_member, other_members): return False replica_set = self.get_guest(primary_member).get_replica_set_name() LOG.debug('adding replica set %(replica_set)s as shard %(shard_id)s ' 'to cluster %(cluster_id)s', {'replica_set': replica_set, 'shard_id': primary_member.shard_id, 'cluster_id': self.id}) try: self.get_guest(query_router).add_shard( replica_set, self.get_ip(primary_member)) except Exception: LOG.exception("error adding shard") self.update_statuses_on_failure(self.id, shard_id=primary_member.shard_id) return False return True def _get_running_query_router_id(self): """Get a query router in this cluster that is in the RUNNING state.""" for instance_id in [db_instance.id for db_instance in self.db_instances if db_instance.type == 'query_router']: status = models.InstanceServiceStatus.find_by( instance_id=instance_id).get_status() if status == ServiceStatuses.RUNNING: return instance_id LOG.exception("no query routers ready to accept requests") self.update_statuses_on_failure(self.id) return False def _add_query_routers(self, query_routers, config_server_ips, admin_password=None): """Configure the given query routers for the cluster. If this is a new_cluster an admin user will be created with a randomly generated password, else the password needs to be retrieved from and existing query router. """ LOG.debug('adding new query router(s) %(routers)s with config server ' 'ips %(ips)s', {'routers': [i.id for i in query_routers], 'ips': config_server_ips}) for query_router in query_routers: try: LOG.debug("calling add_config_servers on query router %s", query_router.id) guest = self.get_guest(query_router) guest.add_config_servers(config_server_ips) if not admin_password: LOG.debug("creating cluster admin user") admin_password = utils.generate_random_password() guest.create_admin_user(admin_password) else: guest.store_admin_password(admin_password) except Exception: LOG.exception("error adding config servers") self.update_statuses_on_failure(self.id) return False return True class MongoDbTaskManagerAPI(task_api.API): def mongodb_add_shard_cluster(self, cluster_id, shard_id, replica_set_name): LOG.debug("Making async call to add shard cluster %s ", cluster_id) version = task_api.API.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "add_shard_cluster", cluster_id=cluster_id, shard_id=shard_id, replica_set_name=replica_set_name) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/redis/0000755000175000017500000000000000000000000026234 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/redis/__init__.py0000644000175000017500000000000000000000000030333 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/redis/api.py0000644000175000017500000001757200000000000027373 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import exception from trove.common import server_group as srv_grp from trove.common.strategies.cluster import base from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class RedisAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return RedisCluster @property def cluster_view_class(self): return RedisClusterView @property def mgmt_cluster_view_class(self): return RedisMgmtClusterView class RedisCluster(models.Cluster): @staticmethod def _create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality): redis_conf = CONF.get(datastore_version.manager) ephemeral_enabled = redis_conf.device_path volume_enabled = redis_conf.volume_support num_instances = len(instances) models.validate_instance_flavors( context, instances, volume_enabled, ephemeral_enabled) total_volume_allocation = models.get_required_volume_size( instances, volume_enabled) models.assert_homogeneous_cluster(instances) models.validate_instance_nics(context, instances) name_index = 1 for instance in instances: if not instance.get('name'): instance['name'] = "%s-member-%s" % (db_info.name, name_index) name_index += 1 # Check quotas quota_request = {'instances': num_instances, 'volumes': total_volume_allocation} check_quotas(context.project_id, quota_request) # Creating member instances return [inst_models.Instance.create(context, instance['name'], instance['flavor_id'], datastore_version.image_id, [], [], datastore, datastore_version, instance.get('volume_size'), None, instance.get( 'availability_zone', None), instance.get('nics', None), configuration_id=None, cluster_config={ "id": db_info.id, "instance_type": "member"}, volume_type=instance.get( 'volume_type', None), modules=instance.get('modules'), locality=locality, region_name=instance.get( 'region_name') ) for instance in instances] @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): LOG.debug("Initiating cluster creation.") if configuration: raise exception.ConfigurationNotSupported() # Updating Cluster Task db_info = models.DBCluster.create( name=name, tenant_id=context.project_id, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return RedisCluster(context, db_info, datastore, datastore_version) def upgrade(self, datastore_version): self.rolling_upgrade(datastore_version) def grow(self, instances): LOG.debug("Growing cluster.") self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) new_instances = self._create_instances(context, db_info, datastore, datastore_version, instances, None, locality) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return RedisCluster(context, db_info, datastore, datastore_version) def shrink(self, removal_ids): LOG.debug("Shrinking cluster %s.", self.id) self.validate_cluster_available() cluster_info = self.db_info cluster_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) try: removal_insts = [inst_models.Instance.load(self.context, inst_id) for inst_id in removal_ids] node_ids = [] error_ids = [] for instance in removal_insts: node_id = Cluster.get_guest(instance).get_node_id_for_removal() if node_id: node_ids.append(node_id) else: error_ids.append(instance.id) if error_ids: raise exception.ClusterShrinkInstanceInUse( id=error_ids, reason="Nodes cannot be removed. Check slots." ) all_instances = ( inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all()) remain_insts = [inst_models.Instance.load(self.context, inst.id) for inst in all_instances if inst.id not in removal_ids] for inst in remain_insts: guest = Cluster.get_guest(inst) guest.remove_nodes(node_ids) for inst in removal_insts: inst.update_db(cluster_id=None) for inst in removal_insts: inst_models.Instance.delete(inst) return RedisCluster(self.context, cluster_info, self.ds, self.ds_version) finally: cluster_info.update(task_status=ClusterTasks.NONE) class RedisClusterView(ClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) class RedisMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member'], ['member']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/redis/guestagent.py0000644000175000017500000000600200000000000030752 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) class RedisGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return RedisGuestAgentAPI class RedisGuestAgentAPI(guest_api.API): """Cluster Specific Datastore Guest API **** VERSION CONTROLLED API **** The methods in this class are subject to version control as coordinated by guestagent/api.py. Whenever a change is made to any API method in this class, add a version number and comment to the top of guestagent/api.py and use the version number as appropriate in this file """ def get_node_ip(self): LOG.debug("Retrieve ip info from node.") version = guest_api.API.API_BASE_VERSION return self._call("get_node_ip", self.agent_high_timeout, version=version) def get_node_id_for_removal(self): LOG.debug("Validating cluster node removal.") version = guest_api.API.API_BASE_VERSION return self._call("get_node_id_for_removal", self.agent_high_timeout, version=version) def remove_nodes(self, node_ids): LOG.debug("Removing nodes from cluster.") version = guest_api.API.API_BASE_VERSION return self._call("remove_nodes", self.agent_high_timeout, version=version, node_ids=node_ids) def cluster_meet(self, ip, port): LOG.debug("Joining node to cluster.") version = guest_api.API.API_BASE_VERSION return self._call("cluster_meet", self.agent_high_timeout, version=version, ip=ip, port=port) def cluster_addslots(self, first_slot, last_slot): LOG.debug("Adding slots %s-%s to cluster.", first_slot, last_slot) version = guest_api.API.API_BASE_VERSION return self._call("cluster_addslots", self.agent_high_timeout, version=version, first_slot=first_slot, last_slot=last_slot) def cluster_complete(self): LOG.debug("Notifying cluster install completion.") version = guest_api.API.API_BASE_VERSION return self._call("cluster_complete", self.agent_high_timeout, version=version) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/redis/taskmanager.py0000644000175000017500000001405000000000000031103 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.exception import TroveError from trove.common.i18n import _ from trove.common.strategies.cluster import base from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance import tasks as inst_tasks from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class RedisTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return RedisTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return RedisClusterTasks class RedisClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s.", cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] # Connect nodes to the first node guests = [self.get_guest(instance) for instance in instances] try: cluster_head = instances[0] cluster_head_port = '6379' cluster_head_ip = self.get_ip(cluster_head) for guest in guests[1:]: guest.cluster_meet(cluster_head_ip, cluster_head_port) num_nodes = len(instances) total_slots = 16384 slots_per_node = total_slots / num_nodes leftover_slots = total_slots % num_nodes first_slot = 0 for guest in guests: last_slot = first_slot + slots_per_node if leftover_slots > 0: leftover_slots -= 1 else: last_slot -= 1 guest.cluster_addslots(first_slot, last_slot) first_slot = last_slot + 1 for guest in guests: guest.cluster_complete() except Exception: LOG.exception("Error creating cluster.") self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for building cluster.") self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s.", cluster_id) def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s.", cluster_id) def _grow_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() cluster_head = next(Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in new_instance_ids) if not cluster_head: raise TroveError(_("Unable to determine existing Redis cluster" " member")) (cluster_head_ip, cluster_head_port) = ( self.get_guest(cluster_head).get_node_ip()) # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") new_insts = [Instance.load(context, instance_id) for instance_id in new_instance_ids] new_guests = map(self.get_guest, new_insts) # Connect nodes to the cluster head for guest in new_guests: guest.cluster_meet(cluster_head_ip, cluster_head_port) for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) except Exception: LOG.exception("Error growing cluster %s.", cluster_id) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s.", cluster_id) def upgrade_cluster(self, context, cluster_id, datastore_version): self.rolling_upgrade_cluster(context, cluster_id, datastore_version) class RedisTaskManagerAPI(task_api.API): pass ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/vertica/0000755000175000017500000000000000000000000026563 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/vertica/__init__.py0000644000175000017500000000000000000000000030662 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/vertica/api.py0000644000175000017500000002176200000000000027716 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.cluster import models from trove.cluster.tasks import ClusterTasks from trove.cluster.views import ClusterView from trove.common import cfg from trove.common import exception from trove.common import server_group as srv_grp from trove.common.strategies.cluster import base from trove.common import utils from trove.extensions.mgmt.clusters.views import MgmtClusterView from trove.instance import models as inst_models from trove.quota.quota import check_quotas from trove.taskmanager import api as task_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class VerticaAPIStrategy(base.BaseAPIStrategy): @property def cluster_class(self): return VerticaCluster def _action_grow(self, cluster, body): nodes = body['grow'] instances = [] for node in nodes: instance = { 'flavor_id': utils.get_id_from_href(node['flavorRef']) } if 'name' in node: instance['name'] = node['name'] if 'volume' in node: instance['volume_size'] = int(node['volume']['size']) instances.append(instance) return cluster.grow(instances) def _action_shrink(self, cluster, body): nodes = body['shrink'] instance_ids = [node['id'] for node in nodes] return cluster.shrink(instance_ids) @property def cluster_view_class(self): return VerticaClusterView @property def mgmt_cluster_view_class(self): return VerticaMgmtClusterView class VerticaCluster(models.Cluster): @staticmethod def _create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality, new_cluster=True): vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) existing = inst_models.DBInstance.find_all(cluster_id=db_info.id, deleted=False).all() num_existing = len(existing) # Matching number of instances with configured cluster_member_count if (new_cluster and num_instances != vertica_conf.cluster_member_count): raise exception.ClusterNumInstancesNotSupported( num_instances=vertica_conf.cluster_member_count) models.validate_instance_flavors( context, instances, vertica_conf.volume_support, vertica_conf.device_path) req_volume_size = models.get_required_volume_size( instances, vertica_conf.volume_support) models.assert_homogeneous_cluster(instances) models.validate_instance_nics(context, instances) deltas = {'instances': num_instances, 'volumes': req_volume_size} check_quotas(context.project_id, deltas) flavor_id = instances[0]['flavor_id'] volume_size = instances[0].get('volume_size', None) volume_type = instances[0].get('volume_type', None) nics = [instance.get('nics', None) for instance in instances] azs = [instance.get('availability_zone', None) for instance in instances] regions = [instance.get('region_name', None) for instance in instances] # Creating member instances minstances = [] for i in range(0, num_instances): if i == 0 and new_cluster: member_config = {"id": db_info.id, "instance_type": "master"} else: member_config = {"id": db_info.id, "instance_type": "member"} instance_name = "%s-member-%s" % (db_info.name, str(i + num_existing + 1)) minstances.append( inst_models.Instance.create( context, instance_name, flavor_id, datastore_version.image_id, [], [], datastore, datastore_version, volume_size, None, nics=nics[i], availability_zone=azs[i], configuration_id=None, cluster_config=member_config, volume_type=volume_type, modules=instances[i].get('modules'), locality=locality, region_name=regions[i]) ) return minstances @classmethod def create(cls, context, name, datastore, datastore_version, instances, extended_properties, locality, configuration): LOG.debug("Initiating cluster creation.") if configuration: raise exception.ConfigurationNotSupported() vertica_conf = CONF.get(datastore_version.manager) num_instances = len(instances) # Matching number of instances with configured cluster_member_count if num_instances != vertica_conf.cluster_member_count: raise exception.ClusterNumInstancesNotSupported( num_instances=vertica_conf.cluster_member_count) db_info = models.DBCluster.create( name=name, tenant_id=context.project_id, datastore_version_id=datastore_version.id, task_status=ClusterTasks.BUILDING_INITIAL) cls._create_instances(context, db_info, datastore, datastore_version, instances, extended_properties, locality, new_cluster=True) # Calling taskmanager to further proceed for cluster-configuration task_api.load(context, datastore_version.manager).create_cluster( db_info.id) return VerticaCluster(context, db_info, datastore, datastore_version) @staticmethod def k_safety(n): """ Vertica defines k-safety values of 0, 1 or 2: https://my.vertica.com/docs/7.1.x/HTML/Content/Authoring/Glossary/ K-Safety.htm """ if n < 3: return 0 elif n < 5: return 1 else: return 2 def grow(self, instances): LOG.debug("Growing cluster.") self.validate_cluster_available() context = self.context db_info = self.db_info datastore = self.ds datastore_version = self.ds_version db_info.update(task_status=ClusterTasks.GROWING_CLUSTER) locality = srv_grp.ServerGroup.convert_to_hint(self.server_group) new_instances = self._create_instances(context, db_info, datastore, datastore_version, instances, None, locality, new_cluster=False) task_api.load(context, datastore_version.manager).grow_cluster( db_info.id, [instance.id for instance in new_instances]) return VerticaCluster(context, db_info, datastore, datastore_version) def shrink(self, instance_ids): self.validate_cluster_available() context = self.context db_info = self.db_info datastore_version = self.ds_version for db_instance in self.db_instances: if db_instance.type == 'master': if db_instance.id in instance_ids: raise exception.ClusterShrinkInstanceInUse( id=db_instance.id, reason="Cannot remove master node." ) all_instance_ids = [db_instance.id for db_instance in self.db_instances] left_instances = [instance_id for instance_id in all_instance_ids if instance_id not in instance_ids] k = self.k_safety(len(left_instances)) vertica_conf = CONF.get(datastore_version.manager) if k < vertica_conf.min_ksafety: raise exception.ClusterNumInstancesBelowSafetyThreshold() db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER) task_api.load(context, datastore_version.manager).shrink_cluster( self.db_info.id, instance_ids) return VerticaCluster(self.context, db_info, self.ds, self.ds_version) class VerticaClusterView(ClusterView): def build_instances(self): return self._build_instances(['member', 'master'], ['member', 'master']) class VerticaMgmtClusterView(MgmtClusterView): def build_instances(self): return self._build_instances(['member', 'master'], ['member', 'master']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/vertica/guestagent.py0000644000175000017500000000660300000000000031310 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.cluster import base from trove.guestagent import api as guest_api LOG = logging.getLogger(__name__) CONF = cfg.CONF class VerticaGuestAgentStrategy(base.BaseGuestAgentStrategy): @property def guest_client_class(self): return VerticaGuestAgentAPI class VerticaGuestAgentAPI(guest_api.API): """Cluster Specific Datastore Guest API **** VERSION CONTROLLED API **** The methods in this class are subject to version control as coordinated by guestagent/api.py. Whenever a change is made to any API method in this class, add a version number and comment to the top of guestagent/api.py and use the version number as appropriate in this file """ def get_public_keys(self, user): LOG.debug("Getting public keys for user: %s.", user) version = guest_api.API.API_BASE_VERSION return self._call("get_public_keys", self.agent_high_timeout, version=version, user=user) def authorize_public_keys(self, user, public_keys): LOG.debug("Authorizing public keys for user: %s.", user) version = guest_api.API.API_BASE_VERSION return self._call("authorize_public_keys", self.agent_high_timeout, version=version, user=user, public_keys=public_keys) def install_cluster(self, members): LOG.debug("Installing Vertica cluster on members: %s.", members) version = guest_api.API.API_BASE_VERSION return self._call("install_cluster", CONF.cluster_usage_timeout, version=version, members=members) def grow_cluster(self, members): LOG.debug("Growing Vertica cluster with members: %s.", members) version = guest_api.API.API_BASE_VERSION return self._call("grow_cluster", CONF.cluster_usage_timeout, version=version, members=members) def shrink_cluster(self, members): LOG.debug("Shrinking Vertica cluster with members: %s.", members) version = guest_api.API.API_BASE_VERSION return self._call("shrink_cluster", CONF.cluster_usage_timeout, version=version, members=members) def mark_design_ksafe(self, k): LOG.debug("Setting vertica k-safety level to : %s.", k) version = guest_api.API.API_BASE_VERSION return self._call("mark_design_ksafe", CONF.cluster_usage_timeout, version=version, k=k) def cluster_complete(self): LOG.debug("Notifying cluster install completion.") version = guest_api.API.API_BASE_VERSION return self._call("cluster_complete", self.agent_high_timeout, version=version) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/experimental/vertica/taskmanager.py0000644000175000017500000002234300000000000031436 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet.timeout import Timeout from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common.strategies.cluster import base from trove.common.strategies.cluster.experimental.vertica.api import \ VerticaCluster from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance import tasks as inst_tasks from trove.taskmanager import api as task_api import trove.taskmanager.models as task_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class VerticaTaskManagerStrategy(base.BaseTaskManagerStrategy): @property def task_manager_api_class(self): return VerticaTaskManagerAPI @property def task_manager_cluster_tasks_class(self): return VerticaClusterTasks class VerticaClusterTasks(task_models.ClusterTasks): def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s.", cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] member_ips = [self.get_ip(instance) for instance in instances] guests = [self.get_guest(instance) for instance in instances] # Users to be configured for password-less SSH. authorized_users_without_password = ['root', 'dbadmin'] # Configuring password-less SSH for cluster members. # Strategy for setting up SSH: # get public keys for user from member-instances in cluster, # combine them, finally push it back to all instances, # and member instances add them to authorized keys. LOG.debug("Configuring password-less SSH on cluster members.") try: for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in guests] for guest in guests: guest.authorize_public_keys(user, pub_key) LOG.debug("Installing cluster with members: %s.", member_ips) for db_instance in db_instances: if db_instance['type'] == 'master': master_instance = Instance.load(context, db_instance.id) self.get_guest(master_instance).install_cluster( member_ips) break LOG.debug("Finalizing cluster configuration.") for guest in guests: guest.cluster_complete() except Exception: LOG.exception("Error creating cluster.") self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for building cluster.") self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s.", cluster_id) def grow_cluster(self, context, cluster_id, new_instance_ids): def _grow_cluster(): LOG.debug("begin grow_cluster for Vertica cluster %s", cluster_id) db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for new cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return new_insts = [Instance.load(context, instance_id) for instance_id in new_instance_ids] existing_instances = [Instance.load(context, instance_id) for instance_id in instance_ids if instance_id not in new_instance_ids] existing_guests = [self.get_guest(i) for i in existing_instances] new_guests = [self.get_guest(i) for i in new_insts] all_guests = new_guests + existing_guests authorized_users_without_password = ['root', 'dbadmin'] new_ips = [self.get_ip(instance) for instance in new_insts] for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in all_guests] for guest in all_guests: guest.authorize_public_keys(user, pub_key) for db_instance in db_instances: if db_instance['type'] == 'master': LOG.debug("Found 'master' instance, calling grow on guest") master_instance = Instance.load(context, db_instance.id) self.get_guest(master_instance).grow_cluster(new_ips) break for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) except Exception: LOG.exception("Error growing cluster %s.", cluster_id) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() def shrink_cluster(self, context, cluster_id, instance_ids): def _shrink_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() all_instance_ids = [db_instance.id for db_instance in db_instances] remove_instances = [Instance.load(context, instance_id) for instance_id in instance_ids] left_instances = [Instance.load(context, instance_id) for instance_id in all_instance_ids if instance_id not in instance_ids] remove_member_ips = [self.get_ip(instance) for instance in remove_instances] k = VerticaCluster.k_safety(len(left_instances)) for db_instance in db_instances: if db_instance['type'] == 'master': master_instance = Instance.load(context, db_instance.id) if self.get_ip(master_instance) in remove_member_ips: raise RuntimeError(_("Cannot remove master instance!")) LOG.debug("Marking cluster k-safety: %s", k) self.get_guest(master_instance).mark_design_ksafe(k) self.get_guest(master_instance).shrink_cluster( remove_member_ips) break for r in remove_instances: Instance.delete(r) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise LOG.exception("Timeout for shrinking cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) finally: timeout.cancel() LOG.debug("end shrink_cluster for Vertica cluster id %s", self.id) class VerticaTaskManagerAPI(task_api.API): def _cast(self, method_name, version, **kwargs): LOG.debug("Casting %s", method_name) cctxt = self.client.prepare(version=version) cctxt.cast(self.context, method_name, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/cluster/strategy.py0000644000175000017500000000312300000000000024644 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from trove.common import cfg from trove.common.utils import import_class CONF = cfg.CONF LOG = logging.getLogger(__name__) def load_api_strategy(manager): clazz = CONF.get(manager).get('api_strategy') LOG.debug("Loading class %s", clazz) api_strategy = import_class(clazz) return api_strategy() def load_taskmanager_strategy(manager): try: clazz = CONF.get(manager).get('taskmanager_strategy') LOG.debug("Loading class %s", clazz) taskmanager_strategy = import_class(clazz) return taskmanager_strategy() except NoSuchOptError: return None def load_guestagent_strategy(manager): try: clazz = CONF.get(manager).get('guestagent_strategy') LOG.debug("Loading class %s", clazz) guestagent_strategy = import_class(clazz) return guestagent_strategy() except NoSuchOptError: return None ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/common/strategies/storage/0000755000175000017500000000000000000000000022414 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/storage/__init__.py0000644000175000017500000000157700000000000024537 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) def get_storage_strategy(storage_driver, ns=__name__): return Strategy.get_strategy(storage_driver, ns) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/storage/base.py0000644000175000017500000000277200000000000023710 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from trove.common.strategies.strategy import Strategy class Storage(Strategy): """Base class for Storage Strategy implementation.""" __strategy_type__ = 'storage' __strategy_ns__ = 'trove.common.strategies.storage' def __init__(self, context): self.context = context super(Storage, self).__init__() @abc.abstractmethod def save(self, filename, stream, metadata=None): """Persist information from the stream.""" @abc.abstractmethod def load(self, location, backup_checksum): """Load a stream from a persisted storage location.""" @abc.abstractmethod def load_metadata(self, location, backup_checksum): """Load metadata for a persisted object.""" @abc.abstractmethod def save_metadata(self, location, metadata={}): """Save metadata for a persisted object.""" ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/common/strategies/storage/experimental/0000755000175000017500000000000000000000000025111 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/storage/experimental/__init__.py0000644000175000017500000000000000000000000027210 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/storage/swift.py0000644000175000017500000002634600000000000024135 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import hashlib import json from oslo_log import log as logging import six from trove.common import cfg from trove.common.clients import create_swift_client from trove.common.i18n import _ from trove.common.strategies.storage import base LOG = logging.getLogger(__name__) CONF = cfg.CONF CHUNK_SIZE = CONF.backup_chunk_size MAX_FILE_SIZE = CONF.backup_segment_max_size BACKUP_CONTAINER = CONF.backup_swift_container class DownloadError(Exception): """Error running the Swift Download Command.""" class SwiftDownloadIntegrityError(Exception): """Integrity error while running the Swift Download Command.""" class StreamReader(object): """Wrap the stream from the backup process and chunk it into segements.""" def __init__(self, stream, filename, max_file_size=MAX_FILE_SIZE): self.stream = stream self.filename = filename self.container = BACKUP_CONTAINER self.max_file_size = max_file_size self.segment_length = 0 self.process = None self.file_number = 0 self.end_of_file = False self.end_of_segment = False self.segment_checksum = hashlib.md5() @property def base_filename(self): """Filename with extensions removed.""" return self.filename.split('.')[0] @property def segment(self): return '%s_%08d' % (self.base_filename, self.file_number) @property def first_segment(self): return '%s_%08d' % (self.base_filename, 0) @property def segment_path(self): return '%s/%s' % (self.container, self.segment) def read(self, chunk_size=CHUNK_SIZE): if self.end_of_segment: self.segment_length = 0 self.segment_checksum = hashlib.md5() self.end_of_segment = False # Upload to a new file if we are starting or too large if self.segment_length > (self.max_file_size - chunk_size): self.file_number += 1 self.end_of_segment = True return '' chunk = self.stream.read(chunk_size) if not chunk: self.end_of_file = True return '' self.segment_checksum.update(chunk) self.segment_length += len(chunk) return chunk class SwiftStorage(base.Storage): """Implementation of Storage Strategy for Swift.""" __strategy_name__ = 'swift' def __init__(self, *args, **kwargs): super(SwiftStorage, self).__init__(*args, **kwargs) self.connection = create_swift_client(self.context) def save(self, filename, stream, metadata=None): """Persist information from the stream to swift. The file is saved to the location /. It will be a Swift Static Large Object (SLO). The filename is defined on the backup runner manifest property which is typically in the format '..gz' """ LOG.info('Saving %(filename)s to %(container)s in swift.', {'filename': filename, 'container': BACKUP_CONTAINER}) # Create the container if it doesn't already exist LOG.debug('Creating container %s.', BACKUP_CONTAINER) self.connection.put_container(BACKUP_CONTAINER) # Swift Checksum is the checksum of the concatenated segment checksums swift_checksum = hashlib.md5() # Wrap the output of the backup process to segment it for swift stream_reader = StreamReader(stream, filename, MAX_FILE_SIZE) LOG.debug('Using segment size %s', stream_reader.max_file_size) url = self.connection.url # Full location where the backup manifest is stored location = "%s/%s/%s" % (url, BACKUP_CONTAINER, filename) # Information about each segment upload job segment_results = [] # Read from the stream and write to the container in swift while not stream_reader.end_of_file: LOG.debug('Saving segment %s.', stream_reader.segment) path = stream_reader.segment_path etag = self.connection.put_object(BACKUP_CONTAINER, stream_reader.segment, stream_reader) segment_checksum = stream_reader.segment_checksum.hexdigest() # Check each segment MD5 hash against swift etag # Raise an error and mark backup as failed if etag != segment_checksum: LOG.error("Error saving data segment to swift. " "ETAG: %(tag)s Segment MD5: %(checksum)s.", {'tag': etag, 'checksum': segment_checksum}) return False, "Error saving data to Swift!", None, location segment_results.append({ 'path': path, 'etag': etag, 'size_bytes': stream_reader.segment_length }) if six.PY3: swift_checksum.update(segment_checksum.encode()) else: swift_checksum.update(segment_checksum) # All segments uploaded. num_segments = len(segment_results) LOG.debug('File uploaded in %s segments.', num_segments) # An SLO will be generated if the backup was more than one segment in # length. large_object = num_segments > 1 # Meta data is stored as headers if metadata is None: metadata = {} metadata.update(stream.metadata()) headers = {} for key, value in metadata.items(): headers[self._set_attr(key)] = value LOG.debug('Metadata headers: %s', str(headers)) if large_object: LOG.info('Creating the manifest file.') manifest_data = json.dumps(segment_results) LOG.debug('Manifest contents: %s', manifest_data) # The etag returned from the manifest PUT is the checksum of the # manifest object (which is empty); this is not the checksum we # want. self.connection.put_object(BACKUP_CONTAINER, filename, manifest_data, query_string='multipart-manifest=put') # Validation checksum is the Swift Checksum final_swift_checksum = swift_checksum.hexdigest() else: LOG.info('Backup fits in a single segment. Moving segment ' '%(segment)s to %(filename)s.', {'segment': stream_reader.first_segment, 'filename': filename}) segment_result = segment_results[0] # Just rename it via a special put copy. headers['X-Copy-From'] = segment_result['path'] self.connection.put_object(BACKUP_CONTAINER, filename, '', headers=headers) # Delete the old segment file that was copied LOG.debug('Deleting the old segment file %s.', stream_reader.first_segment) self.connection.delete_object(BACKUP_CONTAINER, stream_reader.first_segment) final_swift_checksum = segment_result['etag'] # Validate the object by comparing checksums # Get the checksum according to Swift resp = self.connection.head_object(BACKUP_CONTAINER, filename) # swift returns etag in double quotes # e.g. '"dc3b0827f276d8d78312992cc60c2c3f"' etag = resp['etag'].strip('"') # Raise an error and mark backup as failed if etag != final_swift_checksum: LOG.error( ("Error saving data to swift. Manifest " "ETAG: %(tag)s Swift MD5: %(checksum)s"), {'tag': etag, 'checksum': final_swift_checksum}) return False, "Error saving data to Swift!", None, location return (True, "Successfully saved data to Swift!", final_swift_checksum, location) def _explodeLocation(self, location): storage_url = "/".join(location.split('/')[:-2]) container = location.split('/')[-2] filename = location.split('/')[-1] return storage_url, container, filename def _verify_checksum(self, etag, checksum): etag_checksum = etag.strip('"') if etag_checksum != checksum: log_fmt = ("Original checksum: %(original)s does not match" " the current checksum: %(current)s") exc_fmt = _("Original checksum: %(original)s does not match" " the current checksum: %(current)s") msg_content = { 'original': etag_checksum, 'current': checksum} LOG.error(log_fmt, msg_content) raise SwiftDownloadIntegrityError(exc_fmt % msg_content) return True def load(self, location, backup_checksum): """Restore a backup from the input stream to the restore_location.""" storage_url, container, filename = self._explodeLocation(location) headers, info = self.connection.get_object(container, filename, resp_chunk_size=CHUNK_SIZE) if CONF.verify_swift_checksum_on_restore: self._verify_checksum(headers.get('etag', ''), backup_checksum) return info def _get_attr(self, original): """Get a friendly name from an object header key.""" key = original.replace('-', '_') key = key.replace('x_object_meta_', '') return key def _set_attr(self, original): """Return a swift friendly header key.""" key = original.replace('_', '-') return 'X-Object-Meta-%s' % key def load_metadata(self, location, backup_checksum): """Load metadata from swift.""" storage_url, container, filename = self._explodeLocation(location) headers = self.connection.head_object(container, filename) if CONF.verify_swift_checksum_on_restore: self._verify_checksum(headers.get('etag', ''), backup_checksum) _meta = {} for key, value in headers.items(): if key.startswith('x-object-meta'): _meta[self._get_attr(key)] = value return _meta def save_metadata(self, location, metadata={}): """Save metadata to a swift object.""" storage_url, container, filename = self._explodeLocation(location) headers = {} for key, value in metadata.items(): headers[self._set_attr(key)] = value LOG.info("Writing metadata: %s", str(headers)) self.connection.post_object(container, filename, headers=headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/strategies/strategy.py0000644000175000017500000000374100000000000023171 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from oslo_log import log as logging import six from trove.common.i18n import _ from trove.common import utils LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Strategy(object): __strategy_ns__ = None __strategy_name__ = None __strategy_type__ = None def __init__(self): self.name = self.get_canonical_name() LOG.debug("Loaded strategy %s", self.name) def is_enabled(self): """ Is this Strategy enabled? :retval: Boolean """ return True @classmethod def get_strategy(cls, name, ns=None): """ Load a strategy from namespace """ ns = ns or cls.__strategy_ns__ if ns is None: raise RuntimeError( _('No namespace provided and __strategy_ns__ unset')) LOG.debug('Looking for strategy %s in %s', name, ns) return utils.import_class(ns + "." + name) @classmethod def get_canonical_name(cls): """ Return the strategy name """ type_ = cls.get_strategy_type() name = cls.get_strategy_name() return "%s:%s" % (type_, name) @classmethod def get_strategy_name(cls): return cls.__strategy_name__ @classmethod def get_strategy_type(cls): return cls.__strategy_type__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/stream_codecs.py0000644000175000017500000004274100000000000021773 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import ast import csv import json import re import sys import six from six.moves import configparser import xmltodict import yaml from oslo_serialization import base64 from trove.common import utils as trove_utils class StringConverter(object): """A passthrough string-to-object converter. """ def __init__(self, object_mappings): """ :param object_mappings: string-to-object mappings :type object_mappings: dict """ self._object_mappings = object_mappings def to_strings(self, items): """Recursively convert collection items to strings. :returns: Copy of the input collection with all items converted. """ if trove_utils.is_collection(items): return map(self.to_strings, items) return self._to_string(items) def to_objects(self, items): """Recursively convert collection string to objects. :returns: Copy of the input collection with all items converted. """ if trove_utils.is_collection(items): return map(self.to_objects, items) return self._to_object(items) def _to_string(self, value): for k, v in self._object_mappings.items(): if v is value: return k return str(value) def _to_object(self, value): # Return known mappings and quoted strings right away. if value in self._object_mappings: return self._object_mappings[value] elif (isinstance(value, six.string_types) and re.match("^'(.*)'|\"(.*)\"$", value)): return value try: return ast.literal_eval(value) except Exception: return value @six.add_metaclass(abc.ABCMeta) class StreamCodec(object): @abc.abstractmethod def serialize(self, data): """Serialize a Python object into a stream. """ @abc.abstractmethod def deserialize(self, stream): """Deserialize stream data into a Python structure. """ class IdentityCodec(StreamCodec): """ A basic passthrough codec. Does not modify the data in any way. """ def serialize(self, data): return data def deserialize(self, stream): return stream class YamlCodec(StreamCodec): """ Read/write data from/into a YAML config file. a: 1 b: {c: 3, d: 4} ... The above file content (flow-style) would be represented as: {'a': 1, 'b': {'c': 3, 'd': 4,} ... } """ def __init__(self, default_flow_style=False): """ :param default_flow_style: Use flow-style (inline) formatting of nested collections. :type default_flow_style: boolean """ self._default_flow_style = default_flow_style def serialize(self, dict_data): return yaml.dump(dict_data, Dumper=self.dumper, default_flow_style=self._default_flow_style) def deserialize(self, stream): return yaml.load(stream, Loader=self.loader) @property def loader(self): return yaml.loader.Loader @property def dumper(self): return yaml.dumper.Dumper class SafeYamlCodec(YamlCodec): """ Same as YamlCodec except that it uses safe Loader and Dumper which encode Unicode strings and produce only basic YAML tags. """ def __init__(self, default_flow_style=False): super(SafeYamlCodec, self).__init__( default_flow_style=default_flow_style) @property def loader(self): return yaml.loader.SafeLoader @property def dumper(self): return yaml.dumper.SafeDumper class IniCodec(StreamCodec): """ Read/write data from/into an ini-style config file. [section_1] key = value key = value ... [section_2] key = value key = value ... The above file content would be represented as: {'section_1': {'key': value, 'key': value, ...}, 'section_2': {'key': value, 'key': value, ...} ... } """ def __init__(self, default_value=None, comment_markers=('#', ';')): """ :param default_value: Default value for keys with no value. If set, all keys are written as 'key = value'. The key is written without trailing '=' if None. :type default_value: object """ self._default_value = default_value self._comment_markers = comment_markers def serialize(self, dict_data): parser = self._init_config_parser(dict_data) output = six.StringIO() parser.write(output) return output.getvalue() def deserialize(self, stream): parser = self._init_config_parser() parser.readfp(self._pre_parse(stream)) return {s: {k: StringConverter({None: self._default_value}).to_objects(v) for k, v in parser.items(s, raw=True)} for s in parser.sections()} def _pre_parse(self, stream): buf = six.StringIO() for line in six.StringIO(stream): # Ignore commented lines. if not line.startswith(self._comment_markers): # Strip leading and trailing whitespaces from each line. buf.write(line.strip() + '\n') # Rewind the output buffer. buf.flush() buf.seek(0) return buf def _init_config_parser(self, sections=None): # SafeConfigParser was deprecated in Python 3.2 if sys.version_info >= (3, 2): parser = configparser.ConfigParser(allow_no_value=True) else: parser = configparser.SafeConfigParser(allow_no_value=True) if sections: for section in sections: parser.add_section(section) for key, value in sections[section].items(): str_val = StringConverter( {self._default_value: None}).to_strings(value) parser.set(section, key, str(str_val) if str_val is not None else str_val) return parser class PropertiesCodec(StreamCodec): """ Read/write data from/into a property-style config file. key1 k1arg1 k1arg2 ... k1argN key2 k2arg1 k2arg2 ... k2argN key3 k3arg1 k3arg2 ... key3 k3arg3 k3arg4 ... ... The above file content would be represented as: {'key1': [k1arg1, k1arg2 ... k1argN], 'key2': [k2arg1, k2arg2 ... k2argN] 'key3': [[k3arg1, k3arg2, ...], [k3arg3, k3arg4, ...]] ... } """ QUOTING_MODE = csv.QUOTE_MINIMAL STRICT_MODE = False SKIP_INIT_SPACE = True def __init__(self, delimiter=' ', comment_markers=('#'), unpack_singletons=True, string_mappings=None): """ :param delimiter: A one-character used to separate fields. :type delimiter: string :param empty_value: Value to represent None in the output. :type empty_value: object :param comment_markers: List of comment markers. :type comment_markers: list :param unpack_singletons: Whether to unpack singleton collections (collections with only a single value). :type unpack_singletons: boolean :param string_mappings: User-defined string representations of Python objects. :type string_mappings: dict """ self._delimiter = delimiter self._comment_markers = comment_markers self._string_converter = StringConverter(string_mappings or {}) self._unpack_singletons = unpack_singletons def serialize(self, dict_data): output = six.StringIO() writer = csv.writer(output, delimiter=self._delimiter, quoting=self.QUOTING_MODE, strict=self.STRICT_MODE, skipinitialspace=self.SKIP_INIT_SPACE) for key, value in dict_data.items(): writer.writerows(self._to_rows(key, value)) return output.getvalue() def deserialize(self, stream): reader = csv.reader(six.StringIO(stream), delimiter=self._delimiter, quoting=self.QUOTING_MODE, strict=self.STRICT_MODE, skipinitialspace=self.SKIP_INIT_SPACE) return self._to_dict(reader) def _to_dict(self, reader): data_dict = {} for row in reader: if row: key = row[0].strip() # Ignore comment lines. if not key.strip().startswith(self._comment_markers): # NOTE(zhaochao): a list object is expected for # trove_utils.unpack_singleton, however in python3 # map objects won't be treated as lists, so we # convert the result of StringConverter.to_objects # to a list explicitly. items = list(self._string_converter.to_objects( [v if v else None for v in map(self._strip_comments, row[1:])])) current = data_dict.get(key) if current is not None: current.append(trove_utils.unpack_singleton(items) if self._unpack_singletons else items) else: data_dict.update({key: [items]}) if self._unpack_singletons: # Unpack singleton values. # NOTE(zhaochao): In Python 3.x, dict.items() returns a view # object, which will reflect the changes of the dict itself: # https://docs.python.org/3/library/stdtypes.html#dict-views # This means as we're changing the dict, dict.items() cannot # guarantee we're safely iterating all entries in the dict. # Manually converting the result of dict.items() to a list will # fix. for k, v in list(data_dict.items()): data_dict.update({k: trove_utils.unpack_singleton(v)}) return data_dict def _strip_comments(self, value): # Strip in-line comments. for marker in self._comment_markers: value = value.split(marker)[0] return value.strip() def _to_rows(self, header, items): rows = [] if trove_utils.is_collection(items): if any(trove_utils.is_collection(item) for item in items): # This is multi-row property. for item in items: rows.extend(self._to_rows(header, item)) else: # This is a single-row property with multiple arguments. rows.append(self._to_list( header, self._string_converter.to_strings(items))) else: # This is a single-row property with only one argument. # Note(zhaochao): csv.writerows expects a list object before # python 3.5, but map objects won't be treated as lists in # python 3, so we explicitly convert the result of # StringConverter.to_strings to a list here to support py34 # unittests. rows.append( list(self._string_converter.to_strings( self._to_list(header, items)))) return rows def _to_list(self, *items): container = [] for item in items: if trove_utils.is_collection(item): # This item is a nested collection - unpack it. container.extend(self._to_list(*item)) else: # This item is not a collection - append it to the list. container.append(item) return container class KeyValueCodec(StreamCodec): """ Read/write data from/into a simple key=value file. key1=value1 key2=value2 key3=value3 ... The above file content would be represented as: {'key1': 'value1', 'key2': 'value2', 'key3': 'value3', ... } """ BOOL_PYTHON = 0 # True, False BOOL_LOWER = 1 # true, false BOOL_UPPER = 2 # TRUE, FALSE def __init__(self, delimiter='=', comment_marker='#', line_terminator='\r\n', value_quoting=False, value_quote_char="'", bool_case=BOOL_PYTHON, big_ints=False, hidden_marker=None): """ :param delimiter: string placed between key and value :param comment_marker: string indicating comment line in file :param line_terminator: string placed between lines :param value_quoting: whether or not to quote string values :param value_quote_char: character used to quote string values :param bool_case: BOOL_* setting case of bool values :param big_ints: treat K/M/G at the end of ints as an int :param hidden_marker: pattern prefixing hidden param """ self._delimeter = delimiter self._comment_marker = comment_marker self._line_terminator = line_terminator self._value_quoting = value_quoting self._value_quote_char = value_quote_char self._bool_case = bool_case self._big_ints = big_ints self._hidden_marker = hidden_marker def serialize(self, dict_data): lines = [] for k, v in dict_data.items(): lines.append(k + self._delimeter + self.serialize_value(v)) return self._line_terminator.join(lines) def deserialize(self, stream): # Note(zhaochao): In Python 3, when files are opened in text mode, # newlines will be translated to '\n' by default, so we just split # the stream by '\n'. if sys.version_info[0] >= 3: lines = stream.split('\n') else: lines = stream.split(self._line_terminator) result = {} for line in lines: line = line.lstrip().rstrip() if line == '' or line.startswith(self._comment_marker): continue k, v = re.split(re.escape(self._delimeter), line, 1) if self._value_quoting and v.startswith(self._value_quote_char): # remove trailing comments v = re.sub(r'%s *%s.*$' % ("'", '#'), '', v) v = v.lstrip( self._value_quote_char).rstrip( self._value_quote_char) else: # remove trailing comments v = re.sub('%s.*$' % self._comment_marker, '', v) if self._hidden_marker and v.startswith(self._hidden_marker): continue result[k.strip()] = v return result def serialize_value(self, value): if isinstance(value, bool): if self._bool_case == self.BOOL_PYTHON: value = str(value) elif self._bool_case == self.BOOL_LOWER: value = str(value).lower() elif self._bool_case == self.BOOL_UPPER: value = str(value).upper() if self.should_quote_value(value): value = self._value_quote_char + value + self._value_quote_char return str(value) def should_quote_value(self, value): if not self._value_quoting: return False if isinstance(value, bool) or isinstance(value, int): return False if value.lower() in ['true', 'false']: return False try: int(value) return False except ValueError: pass if self._big_ints and re.match(r'\d+[kKmMgGtTpP]', value): return False return True class JsonCodec(StreamCodec): def serialize(self, dict_data): return json.dumps(dict_data) def deserialize(self, stream): return json.load(six.StringIO(stream)) class Base64Codec(StreamCodec): """Serialize (encode) and deserialize (decode) using the base64 codec. To read binary data from a file and b64encode it, used the decode=False flag on operating_system's read calls. Use encode=False to decode binary data before writing to a file as well. """ # NOTE(zhaochao): migrate to oslo_serialization.base64 to serialize(return # a text object) and deserialize(return a bytes object) data. def serialize(self, data): return base64.encode_as_text(data) def deserialize(self, stream): return base64.decode_as_bytes(stream) class XmlCodec(StreamCodec): def __init__(self, encoding='utf-8'): self._encoding = encoding def serialize(self, dict_data): return xmltodict.unparse( dict_data, output=None, encoding=self._encoding, pretty=True) def deserialize(self, stream): return xmltodict.parse(stream, encoding=self._encoding) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/template.py0000644000175000017500000001125000000000000020762 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg as oslo_config from oslo_log import log as logging from trove.common import cfg from trove.common import configurations from trove.common import exception from trove.common import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) ENV = utils.ENV SERVICE_PARSERS = { 'mongodb': configurations.MongoDBConfParser, 'mysql': configurations.MySQLConfParser, 'percona': configurations.MySQLConfParser, 'mariadb': configurations.MySQLConfParser, 'pxc': configurations.MySQLConfParser, 'postgresql': configurations.PostgresqlConfParser, 'cassandra': configurations.CassandraConfParser, 'redis': configurations.RedisConfParser, 'vertica': configurations.VerticaConfParser, 'db2': configurations.DB2ConfParser, } class SingleInstanceConfigTemplate(object): """This class selects a single configuration file by database type for rendering on the guest """ template_name = "config.template" def __init__(self, datastore_version, flavor_dict, instance_id): """Constructor :param datastore_version: The datastore version. :type datastore_version: DatastoreVersion :param flavor_dict: dict containing flavor details for use in jinja. :type flavor_dict: dict. :param instance_id: trove instance id :type instance_id: str """ self.flavor_dict = flavor_dict self.datastore_version = datastore_version # TODO(tim.simpson): The current definition of datastore_version is a # bit iffy and I believe will change soon, so I'm # creating a dictionary here for jinja to consume # rather than pass in the datastore version object. self.datastore_dict = { 'name': self.datastore_version.datastore_name, 'manager': self.datastore_version.manager, 'version': self.datastore_version.name, } self.instance_id = instance_id def get_template(self): patterns = ['{name}/{version}/{template_name}', '{name}/{template_name}', '{manager}/{template_name}'] context = self.datastore_dict.copy() context['template_name'] = self.template_name names = [name.format(**context) for name in patterns] return ENV.select_template(names) def render(self, **kwargs): """Renders the jinja template :returns: str -- The rendered configuration file """ template = self.get_template() server_id = self._calculate_unique_id() self.config_contents = template.render( flavor=self.flavor_dict, datastore=self.datastore_dict, server_id=server_id, **kwargs) return self.config_contents def render_dict(self): """ Renders the default configuration template file as a dictionary to apply the default configuration dynamically. """ config = self.render() cfg_parser = SERVICE_PARSERS.get(self.datastore_version.manager) if not cfg_parser: raise exception.NoConfigParserFound( datastore_manager=self.datastore_version.manager) return cfg_parser(config).parse() def _calculate_unique_id(self): """ Returns a positive unique id based off of the instance id :return: a positive integer """ return abs(hash(self.instance_id) % (2 ** 31)) def _validate_datastore(datastore_manager): try: CONF.get(datastore_manager) except oslo_config.NoSuchOptError: raise exception.InvalidDatastoreManager( datastore_manager=datastore_manager) class ReplicaSourceConfigTemplate(SingleInstanceConfigTemplate): template_name = "replica_source.config.template" class ReplicaConfigTemplate(SingleInstanceConfigTemplate): template_name = "replica.config.template" class ClusterConfigTemplate(SingleInstanceConfigTemplate): template_name = "cluster.config.template" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/timeutils.py0000644000175000017500000000506500000000000021175 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from datetime import timedelta from datetime import tzinfo class zulutime(tzinfo): """A tzinfo class for zulu time""" def utcoffset(self, dt): return timedelta(0) def tzname(self, dt): return "Z" def dst(self, dt): return timedelta(0) def utcnow_aware(): """An aware utcnow() that uses zulutime for the tzinfo.""" return datetime.now(zulutime()) def utcnow(): """A wrapper around datetime.datetime.utcnow(). We're doing this because it is mock'ed in some places. """ return datetime.utcnow() def isotime(tm=None, subsecond=False): """Stringify a time and return it in an ISO 8601 format. Subsecond information is only provided if the subsecond parameter is set to True (default: False). If a time (tm) is provided, it will be stringified. If tm is not provided, the current UTC time is used instead. The timezone for UTC time will be provided as 'Z' and not [+-]00:00. Time zone differential for non UTC times will be provided as the full six character string format provided by datetime.datetime.isoformat() namely [+-]NN:NN. If an invalid time is provided such that tm.utcoffset() causes a ValueError, that exception will be propagated. """ _dt = tm if tm else utcnow_aware() if not subsecond: _dt = _dt.replace(microsecond=0) # might cause an exception if _dt has a bad utcoffset. delta = _dt.utcoffset() if _dt.utcoffset() else timedelta(0) ts = None if delta == timedelta(0): # either we are provided a naive time (tm) or no tm, or an # aware UTC time. In any event, we want to use 'Z' for the # timezone rather than the full 6 character offset. _dt = _dt.replace(tzinfo=None) ts = _dt.isoformat() ts += 'Z' else: # an aware non-UTC time was provided ts = _dt.isoformat() return ts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/trove_remote.py0000644000175000017500000000372400000000000021670 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils.importutils import import_class from trove.common import cfg from trove.common.clients import get_endpoint from trove.common.clients import normalize_url from troveclient.v1 import client as TroveClient CONF = cfg.CONF """ NOTE(mwj, Apr 2016): This module is separated from clients.py because clients.py is used on the Trove guest, but the trove client is not installed on the guest, so the imports here would fail. """ def trove_client(context, region_name=None): if CONF.trove_url: url = '%(url)s%(tenant)s' % { 'url': normalize_url(CONF.trove_url), 'tenant': context.project_id} else: region = region_name or CONF.service_credentials.region_name url = get_endpoint(context.service_catalog, service_type=CONF.trove_service_type, endpoint_region=region, endpoint_type=CONF.trove_endpoint_type) client = TroveClient.Client(context.user, context.auth_token, project_id=context.project_id, auth_url=CONF.service_credentials.auth_url) client.client.auth_token = context.auth_token client.client.management_url = url return client def create_trove_client(*arg, **kwargs): return import_class(CONF.remote_trove_client)(*arg, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/common/utils.py0000644000175000017500000003214500000000000020315 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """I totally stole most of this from melange, thx guys!!!""" import collections import inspect import os import shutil import uuid from eventlet.timeout import Timeout import jinja2 from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils.encodeutils import safe_encode from oslo_utils import importutils from oslo_utils import strutils from passlib import pwd import six import six.moves.urllib.parse as urlparse from trove.common import cfg from trove.common import exception from trove.common.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) import_class = importutils.import_class import_object = importutils.import_object import_module = importutils.import_module bool_from_string = strutils.bool_from_string execute = processutils.execute def build_jinja_environment(): env = jinja2.Environment( autoescape=True, loader=jinja2.ChoiceLoader([ jinja2.FileSystemLoader(CONF.template_path), jinja2.PackageLoader("trove", "templates") ])) # Add some basic operation not built-in. env.globals['max'] = max env.globals['min'] = min return env ENV = build_jinja_environment() def pagination_limit(limit, default_limit): limit = int(limit or default_limit) return min(limit, default_limit) def create_method_args_string(*args, **kwargs): """Returns a string representation of args and keyword args. I.e. for args=1,2,3 and kwargs={'a':4, 'b':5} you'd get: "1,2,3,a=4,b=5" """ # While %s turns a var into a string but in some rare cases explicit # repr() is less likely to raise an exception. arg_strs = [repr(arg) for arg in args] arg_strs += ['%s=%s' % (repr(key), repr(value)) for (key, value) in kwargs.items()] return ', '.join(arg_strs) def stringify_keys(dictionary): if dictionary is None: return None return {str(key): value for key, value in dictionary.items()} def exclude(key_values, *exclude_keys): if key_values is None: return None return {key: value for key, value in key_values.items() if key not in exclude_keys} def generate_uuid(): return str(uuid.uuid4()) def raise_if_process_errored(process, exception): try: err = process.stderr.read() if err: raise exception(err) except OSError: pass def clean_out(folder): for root, dirs, files in os.walk(folder): for f in files: os.unlink(os.path.join(root, f)) for d in dirs: shutil.rmtree(os.path.join(root, d)) class cached_property(object): """A decorator that converts a function into a lazy property. Taken from : https://github.com/nshah/python-memoize The function wrapped is called the first time to retrieve the result and then that calculated result is used the next time you access the value: class Foo(object): @cached_property def bar(self): # calculate something important here return 42 """ def __init__(self, func, name=None, doc=None): self.func = func self.__name__ = name or func.__name__ self.__doc__ = doc or func.__doc__ def __get__(self, obj, owner): if obj is None: return self value = self.func(obj) setattr(obj, self.__name__, value) return value class MethodInspector(object): def __init__(self, func): self._func = func @cached_property def required_args(self): return self.args[0:self.required_args_count] @cached_property def optional_args(self): keys = self.args[self.required_args_count: len(self.args)] return zip(keys, self.defaults) @cached_property def defaults(self): return self.argspec.defaults or () @cached_property def required_args_count(self): return len(self.args) - len(self.defaults) @cached_property def args(self): args = self.argspec.args if inspect.ismethod(self._func): args.pop(0) return args @cached_property def argspec(self): return inspect.getargspec(self._func) def __str__(self): optionals = ["[{0}=<{0}>]".format(k) for k, v in self.optional_args] required = ["{0}=<{0}>".format(arg) for arg in self.required_args] args_str = ' '.join(required + optionals) return "%s %s" % (self._func.__name__, args_str) def build_polling_task(retriever, condition=lambda value: value, sleep_time=1, time_out=0): """Run a function in a loop with backoff on error. The condition function runs based on the retriever function result. """ def poll_and_check(): obj = retriever() if condition(obj): raise loopingcall.LoopingCallDone(retvalue=obj) call = loopingcall.BackOffLoopingCall(f=poll_and_check) return call.start(initial_delay=0, starting_interval=sleep_time, max_interval=30, timeout=time_out) def wait_for_task(polling_task): """Waits for the task until it is finished""" try: return polling_task.wait() except loopingcall.LoopingCallTimeOut: raise exception.PollTimeOut def poll_until(retriever, condition=lambda value: value, sleep_time=3, time_out=0): """Retrieves object until it passes condition, then returns it. If time_out_limit is passed in, PollTimeOut will be raised once that amount of time is eclipsed. """ task = build_polling_task(retriever, condition=condition, sleep_time=sleep_time, time_out=time_out) return wait_for_task(task) # Copied from nova.api.openstack.common in the old code. def get_id_from_href(href): """Return the id or uuid portion of a url. Given: 'http://www.foo.com/bar/123?q=4' Returns: '123' Given: 'http://www.foo.com/bar/abc123?q=4' Returns: 'abc123' """ return urlparse.urlsplit("%s" % href).path.split('/')[-1] def execute_with_timeout(*args, **kwargs): time = kwargs.pop('timeout', CONF.command_process_timeout) log_output_on_error = kwargs.pop('log_output_on_error', False) timeout = Timeout(time) try: return execute(*args, **kwargs) except exception.ProcessExecutionError as e: if log_output_on_error: LOG.error( ("Command '%(cmd)s' failed. %(description)s " "Exit code: %(exit_code)s\nstderr: %(stderr)s\n" "stdout: %(stdout)s"), {'cmd': e.cmd, 'description': e.description or '', 'exit_code': e.exit_code, 'stderr': e.stderr, 'stdout': e.stdout}) raise except Timeout as t: if t is not timeout: LOG.error("Got a timeout but not the one expected.") raise else: log_fmt = ("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s.") exc_fmt = _("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s.") msg_content = { 'time': time, 'args': args, 'kwargs': kwargs} LOG.error(log_fmt, msg_content) raise exception.ProcessExecutionError(exc_fmt % msg_content) finally: timeout.cancel() def correct_id_with_req(id, request): # Due to a shortcoming with the way Trove uses routes.mapper, # URL entities right of the last slash that contain at least # one . are routed to our service without that suffix, as # it was interpreted as a filetype This method looks at the # request, and if applicable, reattaches the suffix to the id. routing_args = request.environ.get('wsgiorg.routing_args', []) for routing_arg in routing_args: try: found = routing_arg.get('format', '') if found and found not in CONF.expected_filetype_suffixes: return "%s.%s" % (id, found) except (AttributeError, KeyError): # Not the relevant routing_args entry. pass return id def generate_random_password(password_length=None): password_length = ( password_length or cfg.get_configuration_property('default_password_length') ) return pwd.genword(length=password_length) def try_recover(func): def _decorator(*args, **kwargs): recover_func = kwargs.pop("recover_func", None) try: func(*args, **kwargs) except Exception: if recover_func is not None: recover_func(func) else: LOG.debug("No recovery method defined for %(func)s", { 'func': func.__name__}) raise return _decorator def unpack_singleton(container): """Unpack singleton collections. Check whether a given collection is a singleton (has exactly one element) and unpack it if that is the case. Return the original collection otherwise. """ if is_collection(container) and len(container) == 1: return unpack_singleton(container[0]) return container def is_collection(item): """Return True is a given item is an iterable collection, but not a string. """ return (isinstance(item, collections.Iterable) and not isinstance(item, (bytes, six.text_type))) def format_output(message, format_len=79, truncate_len=None, replace_index=0): """Recursive function to try and keep line lengths below a certain amount, so they can be displayed nicely on the command-line or UI. Tries replacement patterns one at a time (in round-robin fashion) that insert \n at strategic spots. """ replacements = [['. ', '.\n'], [' (', '\n('], [': ', ':\n ']] replace_index %= len(replacements) if not isinstance(message, list): message = message.splitlines(1) msg_list = [] for line in message: if len(line) > format_len: ok_to_split_again = False for count in range(0, len(replacements)): lines = line.replace( replacements[replace_index][0], replacements[replace_index][1], 1 ).splitlines(1) replace_index = (replace_index + 1) % len(replacements) if len(lines) > 1: ok_to_split_again = True break for item in lines: # If we spilt, but a line is still too long, do it again if ok_to_split_again and len(item) > format_len: item = format_output(item, format_len=format_len, replace_index=replace_index) msg_list.append(item) else: msg_list.append(line) msg_str = "".join(msg_list) if truncate_len and len(msg_str) > truncate_len: msg_str = msg_str[:truncate_len - 3] + '...' return msg_str def to_gb(bytes): """ This was moved from dbaas.py so that it could be used as widely as a utility function. The tests corresponding to this were also moved out from test_dbaas.py to test_utils.py. """ if bytes == 0: return 0.0 size = bytes / 1024.0 ** 3 # Make sure we don't return 0.0 if the size is greater than 0 return max(round(size, 2), 0.01) def to_mb(bytes): """ This was moved from dbaas.py so that it could be used as widely as a utility function. The tests corresponding to this were also moved out from test_dbaas.py to test_utils.py. """ if bytes == 0: return 0.0 size = bytes / 1024.0 ** 2 # Make sure we don't return 0.0 if the size is greater than 0 return max(round(size, 2), 0.01) def req_to_text(req): """ We do a lot request logging for debug, but if the value of one request header is encoded in utf-8, an UnicodeEncodeError will be raised. So we should carefully encode request headers. To be consitent with webob, main procedures are copied from webob.Request.as_bytes. """ url = req.url host = req.host_url assert url.startswith(host) url = url[len(host):] parts = [safe_encode('%s %s %s' % (req.method, url, req.http_version))] for k, v in sorted(req.headers.items()): header = safe_encode('%s: %s' % (k, v)) parts.append(header) if req.body: parts.extend([b'', safe_encode(req.body)]) return b'\r\n'.join(parts).decode(req.charset) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/views.py0000644000175000017500000000252100000000000020305 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import wsgi def create_links(resource_path, request, id): """Creates the links dictionary in the format typical of most resources.""" context = request.environ[wsgi.CONTEXT_KEY] link_info = { 'host': request.host, 'version': request.url_version, 'tenant_id': context.project_id, 'resource_path': resource_path, 'id': id, } return [ { "href": "https://%(host)s/v%(version)s/%(tenant_id)s" "/%(resource_path)s/%(id)s" % link_info, "rel": "self" }, { "href": "https://%(host)s/%(resource_path)s/%(id)s" % link_info, "rel": "bookmark" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/wsgi.py0000644000175000017500000005602300000000000020127 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wsgi helper utilities for trove""" import math import re import time import traceback import uuid import eventlet.wsgi import jsonschema from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import service from oslo_utils import encodeutils import paste.urlmap import webob import webob.dec import webob.exc from trove.common import base_wsgi from trove.common import cfg from trove.common import context as rd_context from trove.common import exception from trove.common.i18n import _ from trove.common import pastedeploy from trove.common import utils CONTEXT_KEY = 'trove.context' Router = base_wsgi.Router Debug = base_wsgi.Debug Middleware = base_wsgi.Middleware JSONDictSerializer = base_wsgi.JSONDictSerializer RequestDeserializer = base_wsgi.RequestDeserializer CONF = cfg.CONF # Raise the default from 8192 to accommodate large tokens eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line eventlet.patcher.monkey_patch(all=False, socket=True) LOG = logging.getLogger('trove.common.wsgi') def versioned_urlmap(*args, **kwargs): urlmap = paste.urlmap.urlmap_factory(*args, **kwargs) return VersionedURLMap(urlmap) def launch(app_name, port, paste_config_file, data={}, host='0.0.0.0', backlog=128, threads=1000, workers=None): """Launches a wsgi server based on the passed in paste_config_file. Launch provides a easy way to create a paste app from the config file and launch it via the service launcher. It takes care of all of the plumbing. The only caveat is that the paste_config_file must be a file that paste.deploy can find and handle. There is a helper method in cfg.py that finds files. Example: conf_file = CONF.find_file(CONF.api_paste_config) launcher = wsgi.launch('myapp', CONF.bind_port, conf_file) launcher.wait() """ LOG.debug("Trove started on %s", host) app = pastedeploy.paste_deploy_app(paste_config_file, app_name, data) server = base_wsgi.Service(app, port, host=host, backlog=backlog, threads=threads) return service.launch(CONF, server, workers, restart_method='mutate') # Note: taken from Nova def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator class TroveMiddleware(Middleware): # Note: taken from nova @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = nova.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import nova.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory class VersionedURLMap(object): def __init__(self, urlmap): self.urlmap = urlmap def __call__(self, environ, start_response): req = Request(environ) if req.url_version is None and req.accept_version is not None: version = "/v" + req.accept_version http_exc = webob.exc.HTTPNotAcceptable(_("version not supported")) app = self.urlmap.get(version, Fault(http_exc)) else: app = self.urlmap return app(environ, start_response) class Router(base_wsgi.Router): # Original router did not allow for serialization of the 404 error. # To fix this the _dispatch was modified to use Fault() objects. @staticmethod @webob.dec.wsgify def _dispatch(req): """ Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return Fault(webob.exc.HTTPNotFound()) app = match['controller'] return app class Request(base_wsgi.Request): @property def params(self): return utils.stringify_keys(super(Request, self).params) def best_match_content_type(self, supported_content_types=None): """Determine the most acceptable content-type. Based on the query extension then the Accept header. """ parts = self.path.rsplit('.', 1) if len(parts) > 1: format = parts[1] if format in ['json']: return 'application/{0}'.format(parts[1]) ctypes = { 'application/vnd.openstack.trove+json': "application/json", 'application/json': "application/json", } bm = self.accept.best_match(ctypes.keys()) return ctypes.get(bm, 'application/json') @utils.cached_property def accept_version(self): accept_header = self.headers.get('ACCEPT', "") accept_version_re = re.compile(r".*?application/vnd.openstack.trove" r"(\+.+?)?;" r"version=(?P\d+\.?\d*)") match = accept_version_re.search(accept_header) return match.group("version_no") if match else None @utils.cached_property def url_version(self): versioned_url_re = re.compile(r"/v(?P\d+\.?\d*)") match = versioned_url_re.search(self.path) return match.group("version_no") if match else None class Result(object): """A result whose serialization is compatible with JSON.""" def __init__(self, data, status=200): self._data = data self.status = status def data(self, serialization_type): """Return an appropriate serialized type for the body. serialization_type is not used presently, but may be in the future, so it stays. """ if hasattr(self._data, "data_for_json"): return self._data.data_for_json() return self._data class Resource(base_wsgi.Resource): def __init__(self, controller, deserializer, serializer, exception_map=None): exception_map = exception_map or {} self.model_exception_map = self._invert_dict_list(exception_map) super(Resource, self).__init__(controller, deserializer, serializer) @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): return super(Resource, self).__call__(request) def execute_action(self, action, request, **action_args): if getattr(self.controller, action, None) is None: return Fault(webob.exc.HTTPNotFound()) try: self.controller.validate_request(action, action_args) result = super(Resource, self).execute_action( action, request, **action_args) if type(result) is dict: result = Result(result) return result except exception.TroveError as trove_error: LOG.debug(traceback.format_exc()) LOG.debug("Caught Trove Error %s", trove_error) httpError = self._get_http_error(trove_error) LOG.debug("Mapped Error to %s", httpError) return Fault(httpError(str(trove_error), request=request)) except webob.exc.HTTPError as http_error: LOG.debug(traceback.format_exc()) return Fault(http_error) except Exception as error: exception_uuid = str(uuid.uuid4()) LOG.exception(exception_uuid + ": " + str(error)) return Fault(webob.exc.HTTPInternalServerError( "Internal Server Error. Please keep this ID to help us " "figure out what went wrong: (%s)." % exception_uuid, request=request)) def _get_http_error(self, error): return self.model_exception_map.get(type(error), webob.exc.HTTPBadRequest) def _invert_dict_list(self, exception_dict): """Flattens values of keys and inverts keys and values. Example: {'x': [1, 2, 3], 'y': [4, 5, 6]} converted to {1: 'x', 2: 'x', 3: 'x', 4: 'y', 5: 'y', 6: 'y'} """ inverted_dict = {} for key, value_list in exception_dict.items(): for value in value_list: inverted_dict[value] = key return inverted_dict def serialize_response(self, action, action_result, accept): # If an exception is raised here in the base class, it is swallowed, # and the action_result is returned as-is. For us, that's bad news - # we never want that to happen except in the case of webob types. # So we override the behavior here so we can at least log it. try: return super(Resource, self).serialize_response( action, action_result, accept) except Exception: # execute_action either returns the results or a Fault object. # If action_result is not a Fault then there really was a # serialization error which we log. Otherwise return the Fault. if not isinstance(action_result, Fault): LOG.exception("Unserializable result detected.") raise return action_result class Controller(object): """Base controller that creates a Resource with default serializers.""" exception_map = { webob.exc.HTTPUnprocessableEntity: [ exception.UnprocessableEntity, ], webob.exc.HTTPUnauthorized: [ exception.Forbidden, exception.SwiftAuthError, ], webob.exc.HTTPForbidden: [ exception.ReplicaSourceDeleteForbidden, exception.BackupTooLarge, exception.ModuleAccessForbidden, exception.ModuleAppliedToInstance, exception.PolicyNotAuthorized, exception.LogAccessForbidden, ], webob.exc.HTTPBadRequest: [ exception.InvalidModelError, exception.BadRequest, exception.CannotResizeToSameSize, exception.BadValue, exception.DatabaseAlreadyExists, exception.UserAlreadyExists, exception.LocalStorageNotSpecified, exception.ModuleAlreadyExists, ], webob.exc.HTTPNotFound: [ exception.NotFound, exception.ComputeInstanceNotFound, exception.ModelNotFoundError, exception.UserNotFound, exception.DatabaseNotFound, exception.QuotaResourceUnknown, exception.BackupFileNotFound, exception.ClusterNotFound, exception.DatastoreNotFound, exception.SwiftNotFound, exception.ModuleTypeNotFound, exception.RootHistoryNotFound, ], webob.exc.HTTPConflict: [ exception.BackupNotCompleteError, exception.RestoreBackupIntegrityError, ], webob.exc.HTTPRequestEntityTooLarge: [ exception.OverLimit, exception.QuotaExceeded, exception.VolumeQuotaExceeded, ], webob.exc.HTTPServerError: [ exception.VolumeCreationFailure, exception.UpdateGuestError, exception.SwiftConnectionError, ], webob.exc.HTTPNotImplemented: [ exception.VolumeNotSupported, exception.LocalStorageNotSupported, exception.DatastoreOperationNotSupported, exception.ClusterInstanceOperationNotSupported, exception.ClusterDatastoreNotSupported, exception.LogsNotAvailable ], } schemas = {} @classmethod def get_schema(cls, action, body): LOG.debug("Getting schema for %(name)s:%(action)s", {'name': cls.__class__.__name__, 'action': action}) if cls.schemas: matching_schema = cls.schemas.get(action, {}) if matching_schema: LOG.debug("Found Schema: %s", matching_schema.get("name", matching_schema)) return matching_schema @staticmethod def format_validation_msg(errors): # format path like object['field1'][i]['subfield2'] messages = [] for error in errors: path = list(error.path) f_path = "%s%s" % (path[0], ''.join(['[%r]' % i for i in path[1:]])) messages.append("%s %s" % (f_path, error.message)) for suberror in sorted(error.context, key=lambda e: e.schema_path): messages.append(suberror.message) error_msg = "; ".join(messages) return "Validation error: %s" % error_msg def validate_request(self, action, action_args): body = action_args.get('body', {}) schema = self.get_schema(action, body) if schema: validator = jsonschema.Draft4Validator(schema) if not validator.is_valid(body): errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_msg = self.format_validation_msg(errors) LOG.info(error_msg) raise exception.BadRequest(message=error_msg) def create_resource(self): return Resource( self, RequestDeserializer(), TroveResponseSerializer(), self.exception_map) def _extract_limits(self, params): return {key: params[key] for key in params.keys() if key in ["limit", "marker"]} class TroveResponseSerializer(base_wsgi.ResponseSerializer): def serialize_body(self, response, data, content_type, action): """Overrides body serialization in base_wsgi.ResponseSerializer. If the "data" argument is the Result class, its data method is called and *that* is passed to the superclass implementation instead of the actual data. """ if isinstance(data, Result): data = data.data(content_type) super(TroveResponseSerializer, self).serialize_body( response, data, content_type, action) def serialize_headers(self, response, data, action): super(TroveResponseSerializer, self).serialize_headers( response, data, action) if isinstance(data, Result): response.status = data.status class Fault(webob.exc.HTTPException): """Error codes for API faults.""" code_wrapper = { 400: webob.exc.HTTPBadRequest, 401: webob.exc.HTTPUnauthorized, 403: webob.exc.HTTPForbidden, 404: webob.exc.HTTPNotFound, } resp_codes = [int(code) for code in code_wrapper.keys()] def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception @staticmethod def _get_error_name(exc): # Displays a Red Dwarf specific error name instead of a webob exc name. named_exceptions = { 'HTTPBadRequest': 'badRequest', 'HTTPUnauthorized': 'unauthorized', 'HTTPForbidden': 'forbidden', 'HTTPNotFound': 'itemNotFound', 'HTTPMethodNotAllowed': 'badMethod', 'HTTPRequestEntityTooLarge': 'overLimit', 'HTTPUnsupportedMediaType': 'badMediaType', 'HTTPInternalServerError': 'instanceFault', 'HTTPNotImplemented': 'notImplemented', 'HTTPServiceUnavailable': 'serviceUnavailable', } name = exc.__class__.__name__ if name in named_exceptions: return named_exceptions[name] # If the exception isn't in our list, at least strip off the # HTTP from the name, and then drop the case on the first letter. name = name.split("HTTP").pop() name = name[:1].lower() + name[1:] return name @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. fault_name = Fault._get_error_name(self.wrapped_exc) fault_data = { fault_name: { 'code': self.wrapped_exc.status_int, } } if self.wrapped_exc.detail: fault_data[fault_name]['message'] = self.wrapped_exc.detail else: fault_data[fault_name]['message'] = self.wrapped_exc.explanation content_type = req.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data, content_type) self.wrapped_exc.content_type = content_type return self.wrapped_exc class ContextMiddleware(base_wsgi.Middleware): def __init__(self, application): self.admin_roles = CONF.admin_roles super(ContextMiddleware, self).__init__(application) def _extract_limits(self, params): return {key: params[key] for key in params.keys() if key in ["limit", "marker"]} def process_request(self, request): service_catalog = None catalog_header = request.headers.get('X-Service-Catalog', None) if catalog_header: try: service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) tenant_id = request.headers.get('X-Tenant-Id', None) auth_token = request.headers["X-Auth-Token"] user_id = request.headers.get('X-User-ID', None) roles = request.headers.get('X-Role', '').split(',') is_admin = False for role in roles: if role.lower() in self.admin_roles: is_admin = True break limits = self._extract_limits(request.params) context = rd_context.TroveContext(auth_token=auth_token, tenant=tenant_id, user=user_id, is_admin=is_admin, limit=limits.get('limit'), marker=limits.get('marker'), service_catalog=service_catalog, roles=roles) request.environ[CONTEXT_KEY] = context @classmethod def factory(cls, global_config, **local_config): def _factory(app): LOG.debug("Created context middleware with config: %s", local_config) return cls(app) return _factory class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): try: resp = req.get_response(self.application) if resp.status_int in Fault.resp_codes: for (header, value) in resp._headerlist: if header == "Content-Type" and \ value == "text/plain; charset=UTF-8": return Fault(Fault.code_wrapper[resp.status_int]()) return resp return resp except Exception as ex: LOG.exception("Caught error: %s.", encodeutils.exception_to_unicode(ex)) exc = webob.exc.HTTPInternalServerError() return Fault(exc) @classmethod def factory(cls, global_config, **local_config): def _factory(app): return cls(app) return _factory # ported from Nova class OverLimitFault(webob.exc.HTTPException): """ Rate-limited request response. """ def __init__(self, message, details, retry_time): """ Initialize new `OverLimitFault` with relevant information. """ hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = {"overLimit": {"code": self.wrapped_exc.status_int, "message": message, "details": details, "retryAfter": hdrs['Retry-After'], }, } @staticmethod def _retry_after(retry_time): delay = int(math.ceil(retry_time - time.time())) retry_after = delay if delay > 0 else 0 headers = {'Retry-After': '%d' % retry_after} return headers @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """ Return the wrapped exception with a serialized body conforming to our error format. """ content_type = request.best_match_content_type() serializer = {'application/json': JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) self.wrapped_exc.body = content self.wrapped_exc.content_type = content_type return self.wrapped_exc class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/common/xmlutils.py0000644000175000017500000000552000000000000021033 0ustar00coreycorey00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from xml.dom import minidom from xml.parsers import expat from xml import sax from xml.sax import expatreader from trove.common.i18n import _ class ProtectedExpatParser(expatreader.ExpatParser): """An expat parser which disables DTD's and entities by default.""" def __init__(self, forbid_dtd=True, forbid_entities=True, *args, **kwargs): # Python 2.x old style class expatreader.ExpatParser.__init__(self, *args, **kwargs) self.forbid_dtd = forbid_dtd self.forbid_entities = forbid_entities def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise ValueError(_("Inline DTD forbidden")) def entity_decl(self, entityName, is_parameter_entity, value, base, systemId, publicId, notationName): raise ValueError(_(" entity declaration forbidden")) def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise ValueError(_(" unparsed entity forbidden")) def external_entity_ref(self, context, base, systemId, publicId): raise ValueError(_(" external entity forbidden")) def notation_decl(self, name, base, sysid, pubid): raise ValueError(_(" notation forbidden")) def reset(self): expatreader.ExpatParser.reset(self) if self.forbid_dtd: self._parser.StartDoctypeDeclHandler = self.start_doctype_decl self._parser.EndDoctypeDeclHandler = None if self.forbid_entities: self._parser.EntityDeclHandler = self.entity_decl self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl self._parser.ExternalEntityRefHandler = self.external_entity_ref self._parser.NotationDeclHandler = self.notation_decl try: self._parser.SkippedEntityHandler = None except AttributeError: # some pyexpat versions do not support SkippedEntity pass def safe_minidom_parse_string(xml_string): """Parse an XML string using minidom safely. """ try: return minidom.parseString(xml_string, parser=ProtectedExpatParser()) except sax.SAXParseException: raise expat.ExpatError() ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/conductor/0000755000175000017500000000000000000000000017306 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/conductor/__init__.py0000644000175000017500000000000000000000000021405 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/conductor/api.py0000644000175000017500000001113400000000000020431 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging as messaging from trove.common import cfg from trove.common.rpc import conductor_guest_serializer as sz from trove.common.serializable_notification import SerializableNotification from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class API(object): """API for interacting with trove conductor. API version history: * 1.0 - Initial version. When updating this API, also update API_LATEST_VERSION """ # API_LATEST_VERSION should bump the minor number each time # a method signature is added or changed API_LATEST_VERSION = '1.0' # API_BASE_VERSION should only change on major version upgrade API_BASE_VERSION = '1.0' VERSION_ALIASES = { 'icehouse': '1.0', 'juno': '1.0', 'kilo': '1.0', 'liberty': '1.0', 'mitaka': '1.0', 'newton': '1.0', 'latest': API_LATEST_VERSION } def __init__(self, context): self.context = context super(API, self).__init__() version_cap = self.VERSION_ALIASES.get( CONF.upgrade_levels.conductor, CONF.upgrade_levels.conductor) target = messaging.Target(topic=CONF.conductor_queue, version=version_cap) self.client = self.get_client(target, version_cap) def get_client(self, target, version_cap, serializer=None): return rpc.get_client(target, key=CONF.instance_rpc_encr_key, version_cap=version_cap, serializer=serializer, secure_serializer=sz.ConductorGuestSerializer) def heartbeat(self, instance_id, payload, sent=None): LOG.debug("Making async call to cast heartbeat for instance: %s", instance_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "heartbeat", instance_id=instance_id, sent=sent, payload=payload) def update_backup(self, instance_id, backup_id, sent=None, **backup_fields): LOG.debug("Making async call to cast update_backup for instance: %s", instance_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "update_backup", instance_id=instance_id, backup_id=backup_id, sent=sent, **backup_fields) def report_root(self, instance_id): LOG.debug("Making async call to cast report_root for instance: %s", instance_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "report_root", instance_id=instance_id) def notify_end(self, **notification_args): LOG.debug("Making async call to cast end notification") version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) context = self.context serialized = SerializableNotification.serialize(context, context.notification) cctxt.cast(self.context, "notify_end", serialized_notification=serialized, notification_args=notification_args) def notify_exc_info(self, message, exception): LOG.debug("Making async call to cast error notification") version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) context = self.context serialized = SerializableNotification.serialize(context, context.notification) serialized.update({'instance_id': CONF.guest_id}) cctxt.cast(self.context, "notify_exc_info", serialized_notification=serialized, message=message, exception=exception) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/conductor/manager.py0000644000175000017500000001372300000000000021300 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task from trove.backup import models as bkup_models from trove.common import cfg from trove.common import exception as trove_exception from trove.common.instance import ServiceStatus from trove.common.rpc import version as rpc_version from trove.common.serializable_notification import SerializableNotification from trove.conductor.models import LastSeen from trove.extensions.mysql import models as mysql_models from trove.instance import models as inst_models LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(periodic_task.PeriodicTasks): target = messaging.Target(version=rpc_version.RPC_API_VERSION) def __init__(self): super(Manager, self).__init__(CONF) def _message_too_old(self, instance_id, method_name, sent): fields = { "instance": instance_id, "method": method_name, "sent": sent, } if sent is None: LOG.error("[Instance %s] sent field not present. Cannot " "compare.", instance_id) return False LOG.debug("Instance %(instance)s sent %(method)s at %(sent)s ", fields) seen = None try: seen = LastSeen.load(instance_id=instance_id, method_name=method_name) except trove_exception.NotFound: # This is fine. pass if seen is None: LOG.debug("[Instance %s] Did not find any previous message. " "Creating.", instance_id) seen = LastSeen.create(instance_id=instance_id, method_name=method_name, sent=sent) seen.save() return False last_sent = float(seen.sent) if last_sent < sent: LOG.debug("[Instance %s] Rec'd message is younger than last " "seen. Updating.", instance_id) seen.sent = sent seen.save() return False LOG.info("[Instance %s] Rec'd message is older than last seen. " "Discarding.", instance_id) return True def heartbeat(self, context, instance_id, payload, sent=None): LOG.debug("Instance ID: %(instance)s, Payload: %(payload)s", {"instance": str(instance_id), "payload": str(payload)}) status = inst_models.InstanceServiceStatus.find_by( instance_id=instance_id) if self._message_too_old(instance_id, 'heartbeat', sent): return if payload.get('service_status') is not None: status.set_status(ServiceStatus.from_description( payload['service_status'])) status.save() def update_backup(self, context, instance_id, backup_id, sent=None, **backup_fields): LOG.debug("Instance ID: %(instance)s, Backup ID: %(backup)s", {"instance": str(instance_id), "backup": str(backup_id)}) backup = bkup_models.DBBackup.find_by(id=backup_id) # TODO(datsun180b): use context to verify tenant matches if self._message_too_old(instance_id, 'update_backup', sent): return # Some verification based on IDs if backup_id != backup.id: fields = { 'expected': backup_id, 'found': backup.id, 'instance': str(instance_id), } LOG.error("[Instance: %(instance)s] Backup IDs mismatch! " "Expected %(expected)s, found %(found)s", fields) return if instance_id != backup.instance_id: fields = { 'expected': instance_id, 'found': backup.instance_id, 'instance': str(instance_id), } LOG.error("[Instance: %(instance)s] Backup instance IDs " "mismatch! Expected %(expected)s, found " "%(found)s", fields) return for k, v in backup_fields.items(): if hasattr(backup, k): fields = { 'key': k, 'value': v, } LOG.debug("Backup %(key)s: %(value)s", fields) setattr(backup, k, v) backup.save() # NOTE(zhaochao): the 'user' argument is left here to keep # compatible with existing instances. def report_root(self, context, instance_id, user=None): if user is not None: LOG.debug("calling report_root with a username: %s, " "is deprecated now!" % user) mysql_models.RootHistory.create(context, instance_id) def notify_end(self, context, serialized_notification, notification_args): notification = SerializableNotification.deserialize( context, serialized_notification) notification.notify_end(**notification_args) def notify_exc_info(self, context, serialized_notification, message, exception): notification = SerializableNotification.deserialize( context, serialized_notification) LOG.error("Guest exception on request %(req)s:\n%(exc)s", {'req': notification.request_id, 'exc': exception}) notification.notify_exc_info(message, exception) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/conductor/models.py0000644000175000017500000000307300000000000021146 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from trove.db import get_db_api def persisted_models(): return {'conductor_lastseen': LastSeen} class LastSeen(object): """A table used only by Conductor to discard messages that arrive late and out of order. """ _auto_generated_attrs = [] _data_fields = ['instance_id', 'method_name', 'sent'] _table_name = 'conductor_lastseen' preserve_on_delete = False def __init__(self, instance_id, method_name, sent): self.instance_id = instance_id self.method_name = method_name self.sent = sent def save(self): return get_db_api().save(self) @classmethod def load(cls, instance_id, method_name): seen = get_db_api().find_by(cls, instance_id=instance_id, method_name=method_name) return seen @classmethod def create(cls, instance_id, method_name, sent): seen = LastSeen(instance_id, method_name, sent) return seen.save() ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/configuration/0000755000175000017500000000000000000000000020155 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/configuration/__init__.py0000644000175000017500000000000000000000000022254 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/configuration/models.py0000644000175000017500000003651600000000000022025 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.exception import ModelNotFoundError from trove.common.i18n import _ from trove.common import timeutils from trove.common import utils from trove.datastore import models as dstore_models from trove.db import get_db_api from trove.db import models as dbmodels CONF = cfg.CONF LOG = logging.getLogger(__name__) class Configurations(object): DEFAULT_LIMIT = CONF.configurations_page_size @staticmethod def load(context): if context is None: raise TypeError(_("Argument context not defined.")) elif id is None: raise TypeError(_("Argument is not defined.")) if context.is_admin: db_info = DBConfiguration.find_all(deleted=False) if db_info.count() == 0: LOG.debug("No configurations found for admin user") else: db_info = DBConfiguration.find_all(tenant_id=context.project_id, deleted=False) if db_info.count() == 0: LOG.debug("No configurations found for tenant %s", context.project_id) limit = utils.pagination_limit(context.limit, Configurations.DEFAULT_LIMIT) data_view = DBConfiguration.find_by_pagination('configurations', db_info, "foo", limit=limit, marker=context.marker) next_marker = data_view.next_page_marker return data_view.collection, next_marker class Configuration(object): def __init__(self, context, configuration_id): self.context = context self.configuration_id = configuration_id @property def instances(self): return self.instances @property def items(self): return self.items @staticmethod def create(name, description, tenant_id, datastore, datastore_version): configurationGroup = DBConfiguration.create( name=name, description=description, tenant_id=tenant_id, datastore_version_id=datastore_version) return configurationGroup @staticmethod def create_items(cfg_id, values): LOG.debug("Saving configuration values for %(id)s - " "values: %(values)s", {'id': cfg_id, 'values': values}) config_items = [] for key, val in values.items(): config_item = DBConfigurationParameter.create( configuration_id=cfg_id, configuration_key=key, configuration_value=val) config_items.append(config_item) return config_items @staticmethod def delete(context, group): deleted_at = timeutils.utcnow() Configuration.remove_all_items(context, group.id, deleted_at) group.deleted = True group.deleted_at = deleted_at group.save() @staticmethod def remove_all_items(context, id, deleted_at): items = DBConfigurationParameter.find_all(configuration_id=id, deleted=False).all() LOG.debug("Removing all configuration values for %s", id) for item in items: item.deleted = True item.deleted_at = deleted_at item.save() @staticmethod def load_configuration_datastore_version(context, id): config = Configuration.load(context, id) datastore_version = dstore_models.DatastoreVersion.load_by_uuid( config.datastore_version_id) return datastore_version @staticmethod def load(context, id): try: if context.is_admin: return DBConfiguration.find_by(id=id, deleted=False) else: return DBConfiguration.find_by(id=id, tenant_id=context.project_id, deleted=False) except ModelNotFoundError: msg = _("Configuration group with ID %s could not be found.") % id raise ModelNotFoundError(msg) @staticmethod def find_parameter_details(name, detail_list): for item in detail_list: if item.name == name: return item return None @staticmethod def load_items(context, id): datastore_v = Configuration.load_configuration_datastore_version( context, id) config_items = DBConfigurationParameter.find_all( configuration_id=id, deleted=False).all() detail_list = DatastoreConfigurationParameters.load_parameters( datastore_v.id) for item in config_items: rule = Configuration.find_parameter_details( str(item.configuration_key), detail_list) if not rule: continue if rule.data_type == 'boolean': item.configuration_value = bool(int(item.configuration_value)) elif rule.data_type == 'integer': item.configuration_value = int(item.configuration_value) else: item.configuration_value = str(item.configuration_value) return config_items def get_configuration_overrides(self): """Gets the overrides dictionary to apply to an instance.""" overrides = {} if self.configuration_id: config_items = Configuration.load_items(self.context, id=self.configuration_id) for i in config_items: overrides[i.configuration_key] = i.configuration_value return overrides def does_configuration_need_restart(self): datastore_v = Configuration.load_configuration_datastore_version( self.context, self.configuration_id) config_items = Configuration.load_items(self.context, id=self.configuration_id) LOG.debug("config_items: %s", config_items) detail_list = DatastoreConfigurationParameters.load_parameters( datastore_v.id, show_deleted=True) for i in config_items: LOG.debug("config item: %s", i) details = Configuration.find_parameter_details( i.configuration_key, detail_list) LOG.debug("parameter details: %s", details) if not details: raise exception.NotFound(uuid=i.configuration_key) if bool(details.restart_required): return True return False @staticmethod def save(configuration, configuration_items): DBConfiguration.save(configuration) for item in configuration_items: item["deleted_at"] = None DBConfigurationParameter.save(item) @staticmethod def find(context, configuration_id, datastore_version_id): try: info = Configuration.load(context, configuration_id) if (info.datastore_version_id == datastore_version_id): return Configuration(context, configuration_id) except exception.ModelNotFoundError: raise exception.NotFound( message='Configuration group id: %s could not be found.' % configuration_id) raise exception.ConfigurationDatastoreNotMatchInstance( config_datastore_version=info.datastore_version_id, instance_datastore_version=datastore_version_id) class DBConfiguration(dbmodels.DatabaseModelBase): _data_fields = ['name', 'description', 'tenant_id', 'datastore_version_id', 'deleted', 'deleted_at', 'created', 'updated'] _table_name = 'configurations' @property def datastore(self): datastore_version = dstore_models.DatastoreVersion.load_by_uuid( self.datastore_version_id) datastore = dstore_models.Datastore.load( datastore_version.datastore_id) return datastore @property def datastore_version(self): datastore_version = dstore_models.DatastoreVersion.load_by_uuid( self.datastore_version_id) return datastore_version class DBConfigurationParameter(dbmodels.DatabaseModelBase): _auto_generated_attrs = [] _data_fields = ['configuration_id', 'configuration_key', 'configuration_value', 'deleted', 'deleted_at'] _table_name = 'configuration_parameters' def __hash__(self): return self.configuration_key.__hash__() class DBDatastoreConfigurationParameters(dbmodels.DatabaseModelBase): """Model for storing the configuration parameters on a datastore.""" _data_fields = [ 'name', 'datastore_version_id', 'restart_required', 'max_size', 'min_size', 'data_type', 'deleted', 'deleted_at', ] _table_name = "datastore_configuration_parameters" class DatastoreConfigurationParameters(object): def __init__(self, db_info): self.db_info = db_info @staticmethod def create(**kwargs): """Create a configuration parameter for a datastore version.""" # Do we already have a parameter in the db? # yes: and its deleted then modify the param # yes: and its not deleted then error on create. # no: then just create the new param ds_v_id = kwargs.get('datastore_version_id') config_param_name = kwargs.get('name') try: param = DatastoreConfigurationParameters.load_parameter_by_name( ds_v_id, config_param_name, show_deleted=True) if param.deleted == 1: param.restart_required = kwargs.get('restart_required') param.data_type = kwargs.get('data_type') param.max_size = kwargs.get('max_size') param.min_size = kwargs.get('min_size') param.deleted = 0 param.save() return param else: raise exception.ConfigurationParameterAlreadyExists( parameter_name=config_param_name, datastore_version=ds_v_id) except exception.NotFound: pass config_param = DBDatastoreConfigurationParameters.create( **kwargs) return config_param @staticmethod def delete(version_id, config_param_name): config_param = DatastoreConfigurationParameters.load_parameter_by_name( version_id, config_param_name) config_param.deleted = True config_param.deleted_at = timeutils.utcnow() config_param.save() @classmethod def load_parameters(cls, datastore_version_id, show_deleted=False): try: if show_deleted: return DBDatastoreConfigurationParameters.find_all( datastore_version_id=datastore_version_id ) else: return DBDatastoreConfigurationParameters.find_all( datastore_version_id=datastore_version_id, deleted=False ) except exception.NotFound: raise exception.NotFound(uuid=datastore_version_id) @classmethod def load_parameter(cls, config_id, show_deleted=False): try: if show_deleted: return DBDatastoreConfigurationParameters.find_by( id=config_id ) else: return DBDatastoreConfigurationParameters.find_by( id=config_id, deleted=False ) except exception.NotFound: raise exception.NotFound(uuid=config_id) @classmethod def load_parameter_by_name(cls, datastore_version_id, config_param_name, show_deleted=False): try: if show_deleted: return DBDatastoreConfigurationParameters.find_by( datastore_version_id=datastore_version_id, name=config_param_name ) else: return DBDatastoreConfigurationParameters.find_by( datastore_version_id=datastore_version_id, name=config_param_name, deleted=False ) except exception.NotFound: raise exception.NotFound(uuid=config_param_name) def create_or_update_datastore_configuration_parameter(name, datastore_version_id, restart_required, data_type, max_size, min_size): get_db_api().configure_db(CONF) datastore_version = dstore_models.DatastoreVersion.load_by_uuid( datastore_version_id) try: config = DatastoreConfigurationParameters.load_parameter_by_name( datastore_version_id, name, show_deleted=True) config.restart_required = restart_required config.max_size = max_size config.min_size = min_size config.data_type = data_type get_db_api().save(config) except exception.NotFound: config = DBDatastoreConfigurationParameters( id=utils.generate_uuid(), name=name, datastore_version_id=datastore_version.id, restart_required=restart_required, data_type=data_type, max_size=max_size, min_size=min_size, deleted=False, ) get_db_api().save(config) def load_datastore_configuration_parameters(datastore, datastore_version, config_file): get_db_api().configure_db(CONF) (ds, ds_v) = dstore_models.get_datastore_version( type=datastore, version=datastore_version, return_inactive=True) with open(config_file) as f: config = json.load(f) for param in config['configuration-parameters']: create_or_update_datastore_configuration_parameter( param['name'], ds_v.id, param['restart_required'], param['type'], param.get('max'), param.get('min'), ) def persisted_models(): return { 'configurations': DBConfiguration, 'configuration_parameters': DBConfigurationParameter, 'datastore_configuration_parameters': DBDatastoreConfigurationParameters, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/configuration/service.py0000644000175000017500000004367500000000000022206 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six from trove.cluster import models as cluster_models import trove.common.apischema as apischema from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification, EndNotification from trove.common import pagination from trove.common import policy from trove.common import timeutils from trove.common import wsgi from trove.configuration import models from trove.configuration.models import DBConfigurationParameter from trove.configuration import views from trove.datastore import models as ds_models from trove.instance import models as instances_models CONF = cfg.CONF LOG = logging.getLogger(__name__) class ConfigurationsController(wsgi.Controller): schemas = apischema.configuration @classmethod def authorize_config_action(cls, context, config_rule_name, config): policy.authorize_on_target( context, 'configuration:%s' % config_rule_name, {'tenant': config.tenant_id}) def index(self, req, tenant_id): context = req.environ[wsgi.CONTEXT_KEY] configs, marker = models.Configurations.load(context) policy.authorize_on_tenant(context, 'configuration:index') view = views.ConfigurationsView(configs) paged = pagination.SimplePaginatedDataView(req.url, 'configurations', view, marker) return wsgi.Result(paged.data(), 200) def show(self, req, tenant_id, id): LOG.debug("Showing configuration group %(id)s on tenant %(tenant)s", {"tenant": tenant_id, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] configuration = models.Configuration.load(context, id) self.authorize_config_action(context, 'show', configuration) configuration_items = models.Configuration.load_items(context, id) configuration.instance_count = instances_models.DBInstance.find_all( tenant_id=context.project_id, configuration_id=configuration.id, deleted=False).count() return wsgi.Result(views.DetailedConfigurationView( configuration, configuration_items).data(), 200) def instances(self, req, tenant_id, id): context = req.environ[wsgi.CONTEXT_KEY] configuration = models.Configuration.load(context, id) self.authorize_config_action(context, 'instances', configuration) instances = instances_models.DBInstance.find_all( tenant_id=context.tenant, configuration_id=configuration.id, deleted=False) limit = int(context.limit or CONF.instances_page_size) if limit > CONF.instances_page_size: limit = CONF.instances_page_size data_view = instances_models.DBInstance.find_by_pagination( 'instances', instances, "foo", limit=limit, marker=context.marker) view = views.DetailedConfigurationInstancesView(data_view.collection) paged = pagination.SimplePaginatedDataView(req.url, 'instances', view, data_view.next_page_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id): LOG.debug("req : '%s'\n\n", req) LOG.debug("body : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'configuration:create') context.notification = notification.DBaaSConfigurationCreate( context, request=req) name = body['configuration']['name'] description = body['configuration'].get('description') values = body['configuration']['values'] msg = ("Creating configuration group on tenant " "%(tenant_id)s with name: %(cfg_name)s") LOG.info(msg, {"tenant_id": tenant_id, "cfg_name": name}) datastore_args = body['configuration'].get('datastore', {}) datastore, datastore_version = ( ds_models.get_datastore_version(**datastore_args)) with StartNotification(context, name=name, datastore=datastore.name, datastore_version=datastore_version.name): configItems = [] if values: # validate that the values passed in are permitted by the # operator. ConfigurationsController._validate_configuration( body['configuration']['values'], datastore_version, models.DatastoreConfigurationParameters.load_parameters( datastore_version.id)) for k, v in values.items(): configItems.append(DBConfigurationParameter( configuration_key=k, configuration_value=v)) cfg_group = models.Configuration.create(name, description, tenant_id, datastore.id, datastore_version.id) with EndNotification(context, configuration_id=cfg_group.id): cfg_group_items = models.Configuration.create_items( cfg_group.id, values) view_data = views.DetailedConfigurationView(cfg_group, cfg_group_items) return wsgi.Result(view_data.data(), 200) def delete(self, req, tenant_id, id): msg = ("Deleting configuration group %(cfg_id)s on tenant: " "%(tenant_id)s") LOG.info(msg, {"tenant_id": tenant_id, "cfg_id": id}) context = req.environ[wsgi.CONTEXT_KEY] group = models.Configuration.load(context, id) self.authorize_config_action(context, 'delete', group) context.notification = notification.DBaaSConfigurationDelete( context, request=req) with StartNotification(context, configuration_id=id): instances = instances_models.DBInstance.find_all( tenant_id=context.project_id, configuration_id=id, deleted=False).all() if instances: raise exception.InstanceAssignedToConfiguration() models.Configuration.delete(context, group) return wsgi.Result(None, 202) def update(self, req, body, tenant_id, id): msg = ("Updating configuration group %(cfg_id)s for tenant " "id %(tenant_id)s") LOG.info(msg, {"tenant_id": tenant_id, "cfg_id": id}) context = req.environ[wsgi.CONTEXT_KEY] group = models.Configuration.load(context, id) # Note that changing the configuration group will also # indirectly affect all the instances which attach it. # # The Trove instance itself won't be changed (the same group is still # attached) but the configuration values will. # # The operator needs to keep this in mind when defining the related # policies. self.authorize_config_action(context, 'update', group) # if name/description are provided in the request body, update the # model with these values as well. if 'name' in body['configuration']: group.name = body['configuration']['name'] if 'description' in body['configuration']: group.description = body['configuration']['description'] context.notification = notification.DBaaSConfigurationUpdate( context, request=req) with StartNotification(context, configuration_id=id, name=group.name, description=group.description): items = self._configuration_items_list(group, body['configuration']) deleted_at = timeutils.utcnow() models.Configuration.remove_all_items(context, group.id, deleted_at) models.Configuration.save(group, items) self._refresh_on_all_instances(context, id) self._refresh_on_all_clusters(context, id) return wsgi.Result(None, 202) def edit(self, req, body, tenant_id, id): context = req.environ[wsgi.CONTEXT_KEY] group = models.Configuration.load(context, id) self.authorize_config_action(context, 'edit', group) context.notification = notification.DBaaSConfigurationEdit( context, request=req) with StartNotification(context, configuration_id=id): items = self._configuration_items_list(group, body['configuration']) models.Configuration.save(group, items) self._refresh_on_all_instances(context, id) self._refresh_on_all_clusters(context, id) def _refresh_on_all_instances(self, context, configuration_id): """Refresh a configuration group on all single instances. """ LOG.debug("Re-applying configuration group '%s' to all instances.", configuration_id) single_instances = instances_models.DBInstance.find_all( tenant_id=context.project_id, configuration_id=configuration_id, cluster_id=None, deleted=False).all() config = models.Configuration(context, configuration_id) for dbinstance in single_instances: LOG.debug("Re-applying configuration to instance: %s", dbinstance.id) instance = instances_models.Instance.load(context, dbinstance.id) instance.update_configuration(config) def _refresh_on_all_clusters(self, context, configuration_id): """Refresh a configuration group on all clusters. """ LOG.debug("Re-applying configuration group '%s' to all clusters.", configuration_id) clusters = cluster_models.DBCluster.find_all( tenant_id=context.tenant, configuration_id=configuration_id, deleted=False).all() for dbcluster in clusters: LOG.debug("Re-applying configuration to cluster: %s", dbcluster.id) cluster = cluster_models.Cluster.load(context, dbcluster.id) cluster.configuration_attach(configuration_id) def _configuration_items_list(self, group, configuration): ds_version_id = group.datastore_version_id ds_version = ds_models.DatastoreVersion.load_by_uuid(ds_version_id) items = [] if 'values' in configuration: # validate that the values passed in are permitted by the operator. ConfigurationsController._validate_configuration( configuration['values'], ds_version, models.DatastoreConfigurationParameters.load_parameters( ds_version.id)) for k, v in configuration['values'].items(): items.append(DBConfigurationParameter( configuration_id=group.id, configuration_key=k, configuration_value=v, deleted=False)) return items @staticmethod def _validate_configuration(values, datastore_version, config_rules): LOG.info("Validating configuration values") # create rules dictionary based on parameter name rules_lookup = {} for item in config_rules: rules_lookup[item.name.lower()] = item # checking if there are any rules for the datastore if not rules_lookup: output = {"version": datastore_version.name, "name": datastore_version.datastore_name} msg = _("Configuration groups are not supported for this " "datastore: %(name)s %(version)s") % output raise exception.UnprocessableEntity(message=msg) for k, v in values.items(): key = k.lower() # parameter name validation if key not in rules_lookup: output = {"key": k, "version": datastore_version.name, "name": datastore_version.datastore_name} msg = _("The configuration parameter %(key)s is not " "supported for this datastore: " "%(name)s %(version)s.") % output raise exception.UnprocessableEntity(message=msg) rule = rules_lookup[key] # type checking value_type = rule.data_type if not isinstance(v, ConfigurationsController._find_type( value_type)): output = {"key": k, "type": value_type} msg = _("The value provided for the configuration " "parameter %(key)s is not of type %(type)s.") % output raise exception.UnprocessableEntity(message=msg) # integer min/max checking if isinstance(v, six.integer_types) and not isinstance(v, bool): if rule.min_size is not None: try: min_value = int(rule.min_size) except ValueError: raise exception.TroveError(_( "Invalid or unsupported min value defined in the " "configuration-parameters configuration file. " "Expected integer.")) if v < min_value: output = {"key": k, "min": min_value} message = _( "The value for the configuration parameter " "%(key)s is less than the minimum allowed: " "%(min)s") % output raise exception.UnprocessableEntity(message=message) if rule.max_size is not None: try: max_value = int(rule.max_size) except ValueError: raise exception.TroveError(_( "Invalid or unsupported max value defined in the " "configuration-parameters configuration file. " "Expected integer.")) if v > max_value: output = {"key": k, "max": max_value} message = _( "The value for the configuration parameter " "%(key)s is greater than the maximum " "allowed: %(max)s") % output raise exception.UnprocessableEntity(message=message) @staticmethod def _find_type(value_type): if value_type == "boolean": return bool elif value_type == "string": return six.string_types elif value_type == "integer": return six.integer_types elif value_type == "float": return float else: raise exception.TroveError(_( "Invalid or unsupported type defined in the " "configuration-parameters configuration file.")) @staticmethod def _get_item(key, dictList): for item in dictList: if key == item.get('name'): return item raise exception.UnprocessableEntity( message=_("%s is not a supported configuration parameter.") % key) class ParametersController(wsgi.Controller): @classmethod def authorize_request(cls, req, rule_name): """Parameters (configuration templates) bind to a datastore. Datastores are not owned by any particular tenant so we only check the current tenant is allowed to perform the action. """ context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'configuration-parameter:%s' % rule_name) def index(self, req, tenant_id, datastore, id): self.authorize_request(req, 'index') ds, ds_version = ds_models.get_datastore_version( type=datastore, version=id) rules = models.DatastoreConfigurationParameters.load_parameters( ds_version.id) return wsgi.Result(views.ConfigurationParametersView(rules).data(), 200) def show(self, req, tenant_id, datastore, id, name): self.authorize_request(req, 'show') ds, ds_version = ds_models.get_datastore_version( type=datastore, version=id) rule = models.DatastoreConfigurationParameters.load_parameter_by_name( ds_version.id, name) return wsgi.Result(views.ConfigurationParameterView(rule).data(), 200) def index_by_version(self, req, tenant_id, version): self.authorize_request(req, 'index_by_version') ds_version = ds_models.DatastoreVersion.load_by_uuid(version) rules = models.DatastoreConfigurationParameters.load_parameters( ds_version.id) return wsgi.Result(views.ConfigurationParametersView(rules).data(), 200) def show_by_version(self, req, tenant_id, version, name): self.authorize_request(req, 'show_by_version') ds_models.DatastoreVersion.load_by_uuid(version) rule = models.DatastoreConfigurationParameters.load_parameter_by_name( version, name) return wsgi.Result(views.ConfigurationParameterView(rule).data(), 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/configuration/views.py0000644000175000017500000001074200000000000021670 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils class ConfigurationView(object): def __init__(self, configuration): self.configuration = configuration def data(self): configuration_dict = { "id": self.configuration.id, "name": self.configuration.name, "description": self.configuration.description, "created": self.configuration.created, "updated": self.configuration.updated, "datastore_version_id": self.configuration.datastore_version_id, "datastore_name": self.configuration.datastore.name, "datastore_version_name": self.configuration.datastore_version.name} return {"configuration": configuration_dict} class ConfigurationsView(object): def __init__(self, configurations): self.configurations = configurations def data(self): data = [] for configuration in self.configurations: data.append(self.data_for_configuration(configuration)) return {"configurations": data} def data_for_configuration(self, configuration): view = ConfigurationView(configuration) return view.data()['configuration'] class DetailedConfigurationInstancesView(object): def __init__(self, instances): self.instances = instances def instance_data(self): instances_list = [] if self.instances: for instance in self.instances: instances_list.append( { "id": instance.id, "name": instance.name } ) return instances_list def data(self): return {"instances": self.instance_data()} class DetailedConfigurationView(object): def __init__(self, configuration, configuration_items): self.configuration = configuration self.configuration_items = configuration_items def data(self): values = {} for configItem in self.configuration_items: key = configItem.configuration_key value = configItem.configuration_value values[key] = value configuration_dict = { "id": self.configuration.id, "name": self.configuration.name, "description": self.configuration.description, "values": strutils.mask_dict_password(values), "created": self.configuration.created, "updated": self.configuration.updated, "instance_count": getattr(self.configuration, "instance_count", 0), "datastore_name": self.configuration.datastore.name, "datastore_version_id": self.configuration.datastore_version_id, "datastore_version_name": self.configuration.datastore_version.name } return {"configuration": configuration_dict} class ConfigurationParameterView(object): def __init__(self, config): self.config = config def data(self): # v1 api is to be a 'true' or 'false' json boolean instead of 1/0 restart_required = True if self.config.restart_required else False ret = { "name": self.config.name, "datastore_version_id": self.config.datastore_version_id, "restart_required": restart_required, "type": self.config.data_type, } if self.config.max_size: ret["max"] = int(self.config.max_size) if self.config.min_size: ret["min"] = int(self.config.min_size) return ret class ConfigurationParametersView(object): def __init__(self, configs): self.configs = configs def data(self): params = [] for p in self.configs: param = ConfigurationParameterView(p) params.append(param.data()) return {"configuration-parameters": params} ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/datastore/0000755000175000017500000000000000000000000017274 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/datastore/__init__.py0000644000175000017500000000000000000000000021373 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/datastore/models.py0000644000175000017500000007420100000000000021135 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import cfg from trove.common.clients import create_nova_client from trove.common import exception from trove.common.i18n import _ from trove.common import timeutils from trove.common import utils from trove.db import get_db_api from trove.db import models as dbmodels from trove.flavor.models import Flavor as flavor_model from trove.volume_type import models as volume_type_models LOG = logging.getLogger(__name__) CONF = cfg.CONF db_api = get_db_api() def persisted_models(): return { 'datastores': DBDatastore, 'capabilities': DBCapabilities, 'datastore_versions': DBDatastoreVersion, 'capability_overrides': DBCapabilityOverrides, 'datastore_version_metadata': DBDatastoreVersionMetadata } class DBDatastore(dbmodels.DatabaseModelBase): _data_fields = ['name', 'default_version_id'] _table_name = 'datastores' class DBCapabilities(dbmodels.DatabaseModelBase): _data_fields = ['name', 'description', 'enabled'] _table_name = 'capabilities' class DBCapabilityOverrides(dbmodels.DatabaseModelBase): _data_fields = ['capability_id', 'datastore_version_id', 'enabled'] _table_name = 'capability_overrides' class DBDatastoreVersion(dbmodels.DatabaseModelBase): _data_fields = ['datastore_id', 'name', 'image_id', 'packages', 'active', 'manager'] _table_name = 'datastore_versions' class DBDatastoreVersionMetadata(dbmodels.DatabaseModelBase): _data_fields = ['datastore_version_id', 'key', 'value', 'created', 'deleted', 'deleted_at', 'updated_at'] _table_name = 'datastore_version_metadata' class Capabilities(object): def __init__(self, datastore_version_id=None): self.capabilities = [] self.datastore_version_id = datastore_version_id def __contains__(self, item): return item in [capability.name for capability in self.capabilities] def __len__(self): return len(self.capabilities) def __iter__(self): for item in self.capabilities: yield item def __repr__(self): return '<%s: %s>' % (type(self), self.capabilities) def add(self, capability, enabled): """ Add a capability override to a datastore version. """ if self.datastore_version_id is not None: DBCapabilityOverrides.create( capability_id=capability.id, datastore_version_id=self.datastore_version_id, enabled=enabled) self._load() def _load(self): """ Bulk load and override default capabilities with configured datastore version specific settings. """ capability_defaults = [Capability(c) for c in DBCapabilities.find_all()] capability_overrides = [] if self.datastore_version_id is not None: # This should always happen but if there is any future case where # we don't have a datastore version id number it won't stop # defaults from rendering. capability_overrides = [ CapabilityOverride(ce) for ce in DBCapabilityOverrides.find_all( datastore_version_id=self.datastore_version_id) ] def override(cap): # This logic is necessary to apply datastore version specific # capability overrides when they are present in the database. for capability_override in capability_overrides: if cap.id == capability_override.capability_id: # we have a mapped entity that indicates this datastore # version has an override so we honor that. return capability_override # There were no overrides for this capability so we just hand it # right back. return cap self.capabilities = [override(obj) for obj in capability_defaults] LOG.debug('Capabilities for datastore %(ds_id)s: %(capabilities)s', {'ds_id': self.datastore_version_id, 'capabilities': self.capabilities}) @classmethod def load(cls, datastore_version_id=None): """ Generates a Capabilities object by looking up all capabilities from defaults and overrides and provides the one structure that should be used as the interface to controlling capabilities per datastore. :returns: Capabilities """ self = cls(datastore_version_id) self._load() return self class BaseCapability(object): def __init__(self, db_info): self.db_info = db_info def __repr__(self): return ('<%(my_class)s: name: %(name)s, enabled: %(enabled)s>' % {'my_class': type(self), 'name': self.name, 'enabled': self.enabled}) @property def id(self): """ The capability's id :returns: str """ return self.db_info.id @property def enabled(self): """ Is the capability/feature enabled? :returns: bool """ return self.db_info.enabled def enable(self): """ Enable the capability. """ self.db_info.enabled = True self.db_info.save() def disable(self): """ Disable the capability """ self.db_info.enabled = False self.db_info.save() def delete(self): """ Delete the capability from the database. """ self.db_info.delete() class CapabilityOverride(BaseCapability): """ A capability override is simply an setting that applies to a specific datastore version that overrides the default setting in the base capability's entry for Trove. """ def __init__(self, db_info): super(CapabilityOverride, self).__init__(db_info) # This *may* be better solved with a join in the SQLAlchemy model but # I was unable to get our query object to work properly for this. parent_capability = Capability.load(db_info.capability_id) if parent_capability: self.parent_name = parent_capability.name self.parent_description = parent_capability.description else: raise exception.CapabilityNotFound( _("Somehow we got a datastore version capability without a " "parent, that shouldn't happen. %s") % db_info.capability_id) @property def name(self): """ The name of the capability. :returns: str """ return self.parent_name @property def description(self): """ The description of the capability. :returns: str """ return self.parent_description @property def capability_id(self): """ Because capability overrides is an association table there are times where having the capability id is necessary. :returns: str """ return self.db_info.capability_id @classmethod def load(cls, capability_id): """ Generates a CapabilityOverride object from the capability_override id. :returns: CapabilityOverride """ try: return cls(DBCapabilityOverrides.find_by( capability_id=capability_id)) except exception.ModelNotFoundError: raise exception.CapabilityNotFound( _("Capability Override not found for " "capability %s") % capability_id) @classmethod def create(cls, capability, datastore_version_id, enabled): """ Create a new CapabilityOverride. :param capability: The capability to be overridden for this DS Version :param datastore_version_id: The datastore version to apply the override to. :param enabled: Set enabled to True or False :returns: CapabilityOverride """ return CapabilityOverride( DBCapabilityOverrides.create( capability_id=capability.id, datastore_version_id=datastore_version_id, enabled=enabled) ) class Capability(BaseCapability): @property def name(self): """ The Capability name :returns: str """ return self.db_info.name @property def description(self): """ The Capability description :returns: str """ return self.db_info.description @classmethod def load(cls, capability_id_or_name): """ Generates a Capability object by looking up the capability first by ID then by name. :returns: Capability """ try: return cls(DBCapabilities.find_by(id=capability_id_or_name)) except exception.ModelNotFoundError: try: return cls(DBCapabilities.find_by(name=capability_id_or_name)) except exception.ModelNotFoundError: raise exception.CapabilityNotFound( capability=capability_id_or_name) @classmethod def create(cls, name, description, enabled=False): """ Creates a new capability. :returns: Capability """ return Capability(DBCapabilities.create( name=name, description=description, enabled=enabled)) class Datastore(object): def __init__(self, db_info): self.db_info = db_info def __repr__(self, *args, **kwargs): return "%s(%s)" % (self.name, self.id) @classmethod def load(cls, id_or_name): try: return cls(DBDatastore.find_by(id=id_or_name)) except exception.ModelNotFoundError: try: return cls(DBDatastore.find_by(name=id_or_name)) except exception.ModelNotFoundError: raise exception.DatastoreNotFound(datastore=id_or_name) @property def id(self): return self.db_info.id @property def name(self): return self.db_info.name @property def default_version_id(self): return self.db_info.default_version_id def delete(self): self.db_info.delete() class Datastores(object): def __init__(self, db_info): self.db_info = db_info @classmethod def load(cls, only_active=True): datastores = DBDatastore.find_all() if only_active: datastores = datastores.join(DBDatastoreVersion).filter( DBDatastoreVersion.active == 1) return cls(datastores) def __iter__(self): for item in self.db_info: yield item class DatastoreVersion(object): def __init__(self, db_info): self._capabilities = None self.db_info = db_info self._datastore_name = None def __repr__(self, *args, **kwargs): return "%s(%s)" % (self.name, self.id) @classmethod def load(cls, datastore, id_or_name): try: return cls(DBDatastoreVersion.find_by(datastore_id=datastore.id, id=id_or_name)) except exception.ModelNotFoundError: versions = DBDatastoreVersion.find_all(datastore_id=datastore.id, name=id_or_name) if versions.count() == 0: raise exception.DatastoreVersionNotFound(version=id_or_name) if versions.count() > 1: raise exception.NoUniqueMatch(name=id_or_name) return cls(versions.first()) @classmethod def load_by_uuid(cls, uuid): try: return cls(DBDatastoreVersion.find_by(id=uuid)) except exception.ModelNotFoundError: raise exception.DatastoreVersionNotFound(version=uuid) def delete(self): self.db_info.delete() @property def id(self): return self.db_info.id @property def datastore_id(self): return self.db_info.datastore_id @property def datastore_name(self): if self._datastore_name is None: self._datastore_name = Datastore.load(self.datastore_id).name return self._datastore_name # TODO(tim.simpson): This would be less confusing if it was called # "version" and datastore_name was called "name". @property def name(self): return self.db_info.name @property def image_id(self): return self.db_info.image_id @property def packages(self): return self.db_info.packages @property def active(self): return (True if self.db_info.active else False) @property def manager(self): return self.db_info.manager @property def default(self): datastore = Datastore.load(self.datastore_id) return (datastore.default_version_id == self.db_info.id) @property def capabilities(self): if self._capabilities is None: self._capabilities = Capabilities.load(self.db_info.id) return self._capabilities class DatastoreVersions(object): def __init__(self, db_info): self.db_info = db_info @classmethod def load(cls, id_or_name, only_active=True): datastore = Datastore.load(id_or_name) if only_active: versions = DBDatastoreVersion.find_all(datastore_id=datastore.id, active=True) else: versions = DBDatastoreVersion.find_all(datastore_id=datastore.id) return cls(versions) @classmethod def load_all(cls, only_active=True): if only_active: return cls(DBDatastoreVersion.find_all(active=True)) return cls(DBDatastoreVersion.find_all()) def __iter__(self): for item in self.db_info: yield item def get_datastore_version(type=None, version=None, return_inactive=False): datastore = type or CONF.default_datastore if not datastore: raise exception.DatastoreDefaultDatastoreNotDefined() try: datastore = Datastore.load(datastore) except exception.DatastoreNotFound: if not type: raise exception.DatastoreDefaultDatastoreNotFound( datastore=datastore) raise version = version or datastore.default_version_id if not version: raise exception.DatastoreDefaultVersionNotFound( datastore=datastore.name) datastore_version = DatastoreVersion.load(datastore, version) if datastore_version.datastore_id != datastore.id: raise exception.DatastoreNoVersion(datastore=datastore.name, version=datastore_version.name) if not datastore_version.active and not return_inactive: raise exception.DatastoreVersionInactive( version=datastore_version.name) return (datastore, datastore_version) def get_datastore_or_version(datastore=None, datastore_version=None): """ Validate that the specified datastore/version exists, and return the corresponding ids. This differs from 'get_datastore_version' in that you don't need to specify both - specifying only a datastore will return 'None' in the ds_ver field. Raises DatastoreNoVersion if you pass in a ds_ver without a ds. Originally designed for module management. :param datastore: Datastore name or id :param datastore_version: Version name or id :return: Tuple of ds_id, ds_ver_id if found """ datastore_id = None datastore_version_id = None if datastore: if datastore_version: ds, ds_ver = get_datastore_version( type=datastore, version=datastore_version) datastore_id = ds.id datastore_version_id = ds_ver.id else: ds = Datastore.load(datastore) datastore_id = ds.id elif datastore_version: # Cannot specify version without datastore. raise exception.DatastoreNoVersion( datastore=datastore, version=datastore_version) return datastore_id, datastore_version_id def update_datastore(name, default_version): db_api.configure_db(CONF) try: datastore = DBDatastore.find_by(name=name) except exception.ModelNotFoundError: # Create a new one datastore = DBDatastore() datastore.id = utils.generate_uuid() datastore.name = name if default_version: version = DatastoreVersion.load(datastore, default_version) if not version.active: raise exception.DatastoreVersionInactive(version=version.name) datastore.default_version_id = version.id else: datastore.default_version_id = None db_api.save(datastore) def update_datastore_version(datastore, name, manager, image_id, packages, active): db_api.configure_db(CONF) datastore = Datastore.load(datastore) try: version = DBDatastoreVersion.find_by(datastore_id=datastore.id, name=name) except exception.ModelNotFoundError: # Create a new one version = DBDatastoreVersion() version.id = utils.generate_uuid() version.name = name version.datastore_id = datastore.id version.manager = manager version.image_id = image_id version.packages = packages version.active = active db_api.save(version) class DatastoreVersionMetadata(object): @classmethod def _datastore_version_find(cls, datastore_name, datastore_version_name): """ Helper to find a datastore version id for a given datastore and datastore version name. """ db_api.configure_db(CONF) db_ds_record = DBDatastore.find_by( name=datastore_name ) db_dsv_record = DBDatastoreVersion.find_by( datastore_id=db_ds_record.id, name=datastore_version_name ) return db_dsv_record.id @classmethod def _datastore_version_metadata_add(cls, datastore_name, datastore_version_name, datastore_version_id, key, value, exception_class): """ Create a record of the specified key and value in the metadata table. """ # if an association does not exist, create a new one. # if a deleted association exists, undelete it. # if an un-deleted association exists, raise an exception. try: db_record = DBDatastoreVersionMetadata.find_by( datastore_version_id=datastore_version_id, key=key, value=value) if db_record.deleted == 1: db_record.deleted = 0 db_record.updated_at = timeutils.utcnow() db_record.save() return else: raise exception_class( datastore=datastore_name, datastore_version=datastore_version_name, id=value) except exception.NotFound: pass # the record in the database only contains the datastore_verion_id DBDatastoreVersionMetadata.create( datastore_version_id=datastore_version_id, key=key, value=value) @classmethod def _datastore_version_metadata_delete(cls, datastore_name, datastore_version_name, key, value, exception_class): """ Delete a record of the specified key and value in the metadata table. """ # if an association does not exist, raise an exception # if a deleted association exists, raise an exception # if an un-deleted association exists, delete it datastore_version_id = cls._datastore_version_find( datastore_name, datastore_version_name) try: db_record = DBDatastoreVersionMetadata.find_by( datastore_version_id=datastore_version_id, key=key, value=value) if db_record.deleted == 0: db_record.delete() return else: raise exception_class( datastore=datastore_name, datastore_version=datastore_version_name, id=value) except exception.ModelNotFoundError: raise exception_class(datastore=datastore_name, datastore_version=datastore_version_name, id=value) @classmethod def add_datastore_version_flavor_association(cls, datastore_name, datastore_version_name, flavor_ids): datastore_version_id = cls._datastore_version_find( datastore_name, datastore_version_name) for flavor_id in flavor_ids: cls._datastore_version_metadata_add( datastore_name, datastore_version_name, datastore_version_id, 'flavor', flavor_id, exception.DatastoreFlavorAssociationAlreadyExists) @classmethod def delete_datastore_version_flavor_association(cls, datastore_name, datastore_version_name, flavor_id): cls._datastore_version_metadata_delete( datastore_name, datastore_version_name, 'flavor', flavor_id, exception.DatastoreFlavorAssociationNotFound) @classmethod def list_datastore_version_flavor_associations(cls, context, datastore_type, datastore_version_id): if datastore_type and datastore_version_id: """ All nova flavors are permitted for a datastore_version unless one or more entries are found in datastore_version_metadata, in which case only those are permitted. """ (datastore, datastore_version) = get_datastore_version( type=datastore_type, version=datastore_version_id) # If datastore_version_id and flavor key exists in the # metadata table return all the associated flavors for # that datastore version. nova_flavors = create_nova_client(context).flavors.list() bound_flavors = DBDatastoreVersionMetadata.find_all( datastore_version_id=datastore_version.id, key='flavor', deleted=False ) if (bound_flavors.count() != 0): bound_flavors = tuple(f.value for f in bound_flavors) # Generate a filtered list of nova flavors ds_nova_flavors = (f for f in nova_flavors if f.id in bound_flavors) associated_flavors = tuple(flavor_model(flavor=item) for item in ds_nova_flavors) else: # Return all nova flavors if no flavor metadata found # for datastore_version. associated_flavors = tuple(flavor_model(flavor=item) for item in nova_flavors) return associated_flavors else: msg = _("Specify both the datastore and datastore_version_id.") raise exception.BadRequest(msg) @classmethod def add_datastore_version_volume_type_association(cls, datastore_name, datastore_version_name, volume_type_names): datastore_version_id = cls._datastore_version_find( datastore_name, datastore_version_name) # the database record will contain # datastore_version_id, 'volume_type', volume_type_name for volume_type_name in volume_type_names: cls._datastore_version_metadata_add( datastore_name, datastore_version_name, datastore_version_id, 'volume_type', volume_type_name, exception.DatastoreVolumeTypeAssociationAlreadyExists) @classmethod def delete_datastore_version_volume_type_association( cls, datastore_name, datastore_version_name, volume_type_name): cls._datastore_version_metadata_delete( datastore_name, datastore_version_name, 'volume_type', volume_type_name, exception.DatastoreVolumeTypeAssociationNotFound) @classmethod def list_datastore_version_volume_type_associations(cls, datastore_version_id): """ List the datastore associations for a given datastore version id as found in datastore version metadata. Note that this may return an empty set (if no associations are provided) """ if datastore_version_id: return DBDatastoreVersionMetadata.find_all( datastore_version_id=datastore_version_id, key='volume_type', deleted=False ) else: msg = _("Specify the datastore_version_id.") raise exception.BadRequest(msg) @classmethod def list_datastore_volume_type_associations(cls, datastore_name, datastore_version_name): """ List the datastore associations for a given datastore and version. """ if datastore_name and datastore_version_name: datastore_version_id = cls._datastore_version_find( datastore_name, datastore_version_name) return cls.list_datastore_version_volume_type_associations( datastore_version_id) else: msg = _("Specify the datastore_name and datastore_version_name.") raise exception.BadRequest(msg) @classmethod def datastore_volume_type_associations_exist(cls, datastore_name, datastore_version_name): return cls.list_datastore_volume_type_associations( datastore_name, datastore_version_name).count() > 0 @classmethod def allowed_datastore_version_volume_types(cls, context, datastore_name, datastore_version_name): """ List all allowed volume types for a given datastore and datastore version. If datastore version metadata is provided, then the valid volume types in that list are allowed. If datastore version metadata is not provided then all volume types known to cinder are allowed. """ if datastore_name and datastore_version_name: # first obtain the list in the dsvmetadata datastore_version_id = cls._datastore_version_find( datastore_name, datastore_version_name) metadata = cls.list_datastore_version_volume_type_associations( datastore_version_id) # then get the list of all volume types all_volume_types = volume_type_models.VolumeTypes(context) # if there's metadata: intersect, # else, whatever cinder has. if (metadata.count() != 0): # the volume types from metadata first ds_volume_types = tuple(f.value for f in metadata) # Cinder volume type names are unique, intersect allowed_volume_types = tuple( f for f in all_volume_types if ((f.name in ds_volume_types) or (f.id in ds_volume_types))) else: allowed_volume_types = tuple(all_volume_types) return allowed_volume_types else: msg = _("Specify the datastore_name and datastore_version_name.") raise exception.BadRequest(msg) @classmethod def validate_volume_type(cls, context, volume_type, datastore_name, datastore_version_name): if cls.datastore_volume_type_associations_exist( datastore_name, datastore_version_name): allowed = cls.allowed_datastore_version_volume_types( context, datastore_name, datastore_version_name) if len(allowed) == 0: raise exception.DatastoreVersionNoVolumeTypes( datastore=datastore_name, datastore_version=datastore_version_name) if volume_type is None: raise exception.DataStoreVersionVolumeTypeRequired( datastore=datastore_name, datastore_version=datastore_version_name) allowed_names = tuple(f.name for f in allowed) for n in allowed_names: LOG.debug("Volume Type: %s is allowed for datastore " "%s, version %s." % (n, datastore_name, datastore_version_name)) if volume_type not in allowed_names: raise exception.DatastoreVolumeTypeAssociationNotFound( datastore=datastore_name, version_id=datastore_version_name, id=volume_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/datastore/service.py0000644000175000017500000001243100000000000021307 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import exception from trove.common import policy from trove.common import wsgi from trove.datastore import models, views from trove.flavor import views as flavor_views from trove.volume_type import views as volume_type_view LOG = logging.getLogger(__name__) class DatastoreController(wsgi.Controller): @classmethod def authorize_request(cls, req, rule_name): """Datastores are not owned by any particular tenant so we only check the current tenant is allowed to perform the action. """ context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'datastore:%s' % rule_name) def show(self, req, tenant_id, id): self.authorize_request(req, 'show') datastore = models.Datastore.load(id) datastore_versions = (models.DatastoreVersions.load(datastore.id)) return wsgi.Result(views. DatastoreView(datastore, datastore_versions, req).data(), 200) def index(self, req, tenant_id): self.authorize_request(req, 'index') context = req.environ[wsgi.CONTEXT_KEY] only_active = True if context.is_admin: only_active = False datastores = models.Datastores.load(only_active) datastores_versions = models.DatastoreVersions.load_all(only_active) return wsgi.Result(views. DatastoresView(datastores, datastores_versions, req).data(), 200) def version_show(self, req, tenant_id, datastore, id): self.authorize_request(req, 'version_show') datastore = models.Datastore.load(datastore) datastore_version = models.DatastoreVersion.load(datastore, id) return wsgi.Result(views.DatastoreVersionView(datastore_version, req).data(), 200) def version_show_by_uuid(self, req, tenant_id, uuid): self.authorize_request(req, 'version_show_by_uuid') datastore_version = models.DatastoreVersion.load_by_uuid(uuid) return wsgi.Result(views.DatastoreVersionView(datastore_version, req).data(), 200) def version_index(self, req, tenant_id, datastore): self.authorize_request(req, 'version_index') context = req.environ[wsgi.CONTEXT_KEY] only_active = True if context.is_admin: only_active = False datastore_versions = models.DatastoreVersions.load(datastore, only_active) return wsgi.Result(views. DatastoreVersionsView(datastore_versions, req).data(), 200) def list_associated_flavors(self, req, tenant_id, datastore, version_id): """ All nova flavors are returned for a datastore-version unless one or more entries are found in datastore_version_metadata, in which case only those are returned. """ self.authorize_request(req, 'list_associated_flavors') context = req.environ[wsgi.CONTEXT_KEY] flavors = (models.DatastoreVersionMetadata. list_datastore_version_flavor_associations( context, datastore, version_id)) return wsgi.Result(flavor_views.FlavorsView(flavors, req).data(), 200) def list_associated_volume_types(self, req, tenant_id, datastore, version_id): """ Return all known volume types if no restrictions have been established in datastore_version_metadata, otherwise return that restricted set. """ context = req.environ[wsgi.CONTEXT_KEY] volume_types = (models.DatastoreVersionMetadata. allowed_datastore_version_volume_types( context, datastore, version_id)) return wsgi.Result(volume_type_view.VolumeTypesView( volume_types, req).data(), 200) def delete(self, req, tenant_id, id): """Remove an existing datastore.""" self.authorize_request(req, 'delete') ds_versions = models.DatastoreVersions.load(id, only_active=False) if len(ds_versions.db_info.all()) > 0: raise exception.DatastoreVersionsExist(datastore=id) LOG.info("Deleting datastore %s", id) datastore = models.Datastore.load(id) datastore.delete() return wsgi.Result(None, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/datastore/views.py0000644000175000017500000001046200000000000021006 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common.views import create_links from trove.common import wsgi class DatastoreView(object): def __init__(self, datastore, datastore_versions, req=None): self.datastore = datastore self.datastore_versions = datastore_versions self.req = req def data(self): datastore_dict = { "id": self.datastore.id, "name": self.datastore.name, "links": self._build_links(), } datastore_dict.update(DatastoreVersionsView(self.datastore_versions, self.req).data(False)) default_version = self.datastore.default_version_id if default_version: datastore_dict["default_version"] = default_version return {"datastore": datastore_dict} def _build_links(self): return create_links("datastores", self.req, self.datastore.id) class DatastoresView(object): def __init__(self, datastores, datastores_versions, req=None): self.datastores = datastores self.datastores_versions = datastores_versions self.req = req def data(self): data = [] for datastore in self.datastores: datastores_versions = [ datastore_version for datastore_version in self.datastores_versions if datastore_version.datastore_id == datastore.id] data.append(self.data_for_datastore(datastore, datastores_versions)) return {'datastores': data} def data_for_datastore(self, datastore, datastore_versions): view = DatastoreView(datastore, datastore_versions, req=self.req) return view.data()['datastore'] class DatastoreVersionView(object): def __init__(self, datastore_version, req=None): self.datastore_version = datastore_version self.req = req self.context = req.environ[wsgi.CONTEXT_KEY] def data(self, include_datastore_id=True): datastore_version_dict = { "id": self.datastore_version.id, "name": self.datastore_version.name, "links": self._build_links(), } if include_datastore_id: datastore_version_dict["datastore"] = (self.datastore_version. datastore_id) if self.context.is_admin: datastore_version_dict['active'] = self.datastore_version.active datastore_version_dict['packages'] = (self.datastore_version. packages) datastore_version_dict['image'] = self.datastore_version.image_id return {"version": datastore_version_dict} def _build_links(self): return create_links("datastores/versions", self.req, self.datastore_version.id) class DatastoreVersionsView(object): def __init__(self, datastore_versions, req=None): self.datastore_versions = datastore_versions self.req = req def data(self, include_datastore_id=True): data = [] for datastore_version in self.datastore_versions: data.append(self. data_for_datastore_version(datastore_version, include_datastore_id)) return {'versions': data} def data_for_datastore_version(self, datastore_version, include_datastore_id): view = DatastoreVersionView(datastore_version, req=self.req) return view.data(include_datastore_id)['version'] ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1586541819.74411 trove-12.1.0.dev92/trove/db/0000755000175000017500000000000000000000000015673 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/db/__init__.py0000644000175000017500000000553000000000000020007 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.common import utils CONF = cfg.CONF def get_db_api(): return utils.import_module(CONF.db_api_implementation) class Query(object): """Mimics sqlalchemy query object. This class allows us to store query conditions and use them with bulk updates and deletes just like sqlalchemy query object. Using this class makes the models independent of sqlalchemy """ def __init__(self, model, query_func, **conditions): self._query_func = query_func self._model = model self._conditions = conditions self.db_api = get_db_api() def all(self): return self.db_api.list(self._query_func, self._model, **self._conditions) def count(self): return self.db_api.count(self._query_func, self._model, **self._conditions) def first(self): return self.db_api.first(self._query_func, self._model, **self._conditions) def join(self, *args): return self.db_api.join(self._query_func, self._model, *args) def __iter__(self): return iter(self.all()) def update(self, **values): self.db_api.update_all(self._query_func, self._model, self._conditions, values) def delete(self): self.db_api.delete_all(self._query_func, self._model, **self._conditions) def limit(self, limit=200, marker=None, marker_column=None): return self.db_api.find_all_by_limit( self._query_func, self._model, self._conditions, limit=limit, marker=marker, marker_column=marker_column) def paginated_collection(self, limit=200, marker=None, marker_column=None): collection = self.limit(int(limit) + 1, marker, marker_column) if len(collection) > int(limit): return (collection[0:-1], collection[-2]['id']) return (collection, None) class Queryable(object): def __getattr__(self, item): return lambda model, **conditions: Query( model, query_func=getattr(get_db_api(), item), **conditions) db_query = Queryable() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/models.py0000644000175000017500000001200100000000000017522 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import strutils from trove.common import exception from trove.common.i18n import _ from trove.common import models from trove.common import pagination from trove.common import timeutils from trove.common import utils from trove.db import db_query from trove.db import get_db_api LOG = logging.getLogger(__name__) class DatabaseModelBase(models.ModelBase): _auto_generated_attrs = ['id'] @classmethod def create(cls, **values): init_vals = { 'id': utils.generate_uuid(), 'created': timeutils.utcnow(), } if hasattr(cls, 'deleted'): init_vals['deleted'] = False init_vals.update(values) instance = cls(**init_vals) if not instance.is_valid(): raise exception.InvalidModelError(errors=instance.errors) return instance.save() @property def db_api(self): return get_db_api() @property def preserve_on_delete(self): return hasattr(self, 'deleted') and hasattr(self, 'deleted_at') @classmethod def query(cls): return get_db_api()._base_query(cls) def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated'] = timeutils.utcnow() LOG.debug("Saving %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': strutils.mask_dict_password(self.__dict__)}) return self.db_api.save(self) def delete(self): self['updated'] = timeutils.utcnow() LOG.debug("Deleting %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': strutils.mask_dict_password(self.__dict__)}) if self.preserve_on_delete: self['deleted_at'] = timeutils.utcnow() self['deleted'] = True return self.db_api.save(self) else: return self.db_api.delete(self) def update(self, **values): for key in values: if hasattr(self, key): setattr(self, key, values[key]) self['updated'] = timeutils.utcnow() return self.db_api.save(self) def __init__(self, **kwargs): self.merge_attributes(kwargs) if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) def merge_attributes(self, values): """dict.update() behaviour.""" for k, v in values.items(): self[k] = v @classmethod def find_by(cls, context=None, **conditions): model = cls.get_by(**conditions) if model is None: raise exception.ModelNotFoundError(_("%(s_name)s Not Found") % {"s_name": cls.__name__}) if ((context and not context.is_admin and hasattr(model, 'tenant_id') and model.tenant_id != context.project_id)): log_fmt = ("Tenant %(s_tenant)s tried to access " "%(s_name)s, owned by %(s_owner)s.") exc_fmt = _("Tenant %(s_tenant)s tried to access " "%(s_name)s, owned by %(s_owner)s.") msg_content = { "s_tenant": context.project_id, "s_name": cls.__name__, "s_owner": model.tenant_id} LOG.error(log_fmt, msg_content) raise exception.ModelNotFoundError(exc_fmt % msg_content) return model @classmethod def find_by_filter(cls, **kwargs): return db_query.find_by_filter(cls, **cls._process_conditions(kwargs)) @classmethod def get_by(cls, **kwargs): return get_db_api().find_by(cls, **cls._process_conditions(kwargs)) @classmethod def find_all(cls, **kwargs): return db_query.find_all(cls, **cls._process_conditions(kwargs)) @classmethod def _process_conditions(cls, raw_conditions): """Override in inheritors to format/modify any conditions.""" return raw_conditions @classmethod def find_by_pagination(cls, collection_type, collection_query, paginated_url, **kwargs): elements, next_marker = collection_query.paginated_collection(**kwargs) return pagination.PaginatedDataView(collection_type, elements, paginated_url, next_marker) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7481103 trove-12.1.0.dev92/trove/db/sqlalchemy/0000755000175000017500000000000000000000000020035 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/__init__.py0000644000175000017500000000000000000000000022134 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/api.py0000644000175000017500000000723700000000000021171 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy.exc from trove.common import exception from trove.db.sqlalchemy import migration from trove.db.sqlalchemy import session def list(query_func, *args, **kwargs): return query_func(*args, **kwargs).all() def count(query, *args, **kwargs): return query(*args, **kwargs).count() def first(query, *args, **kwargs): return query(*args, **kwargs).first() def join(query, model, *args): return query(model).join(*args) def find_all(model, **conditions): return _query_by(model, **conditions) def find_all_by_limit(query_func, model, conditions, limit, marker=None, marker_column=None): return _limits(query_func, model, conditions, limit, marker, marker_column).all() def find_by(model, **kwargs): return _query_by(model, **kwargs).first() def find_by_filter(model, **kwargs): filters = kwargs.pop('filters', []) return _query_by_filter(model, *filters, **kwargs) def save(model): try: db_session = session.get_session() model = db_session.merge(model) db_session.flush() return model except sqlalchemy.exc.IntegrityError as error: raise exception.DBConstraintError(model_name=model.__class__.__name__, error=str(error.orig)) def delete(model): db_session = session.get_session() model = db_session.merge(model) db_session.delete(model) db_session.flush() def delete_all(query_func, model, **conditions): query_func(model, **conditions).delete() def update(model, **values): for k, v in values.items(): model[k] = v def update_all(query_func, model, conditions, values): query_func(model, **conditions).update(values) def configure_db(options, *plugins): session.configure_db(options) configure_db_for_plugins(options, *plugins) def configure_db_for_plugins(options, *plugins): for plugin in plugins: session.configure_db(options, models_mapper=plugin.mapper) def drop_db(options): session.drop_db(options) def clean_db(): session.clean_db() def db_sync(options, version=None, repo_path=None): migration.db_sync(options, version, repo_path) def db_upgrade(options, version=None, repo_path=None): migration.upgrade(options, version, repo_path) def db_reset(options, *plugins): drop_db(options) db_sync(options) configure_db(options) def _base_query(cls): return session.get_session().query(cls) def _query_by(cls, **conditions): query = _base_query(cls) if conditions: query = query.filter_by(**conditions) return query def _query_by_filter(cls, *filters, **conditions): query = _query_by(cls, **conditions) if filters: query = query.filter(*filters) return query def _limits(query_func, model, conditions, limit, marker, marker_column=None): query = query_func(model, **conditions) marker_column = marker_column or model.id if marker: query = query.filter(marker_column > marker) return query.order_by(marker_column).limit(limit) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/mappers.py0000644000175000017500000000722100000000000022060 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import orm from sqlalchemy.orm import exc as orm_exc from sqlalchemy import Table def map(engine, models): meta = MetaData() meta.bind = engine if mapping_exists(models['instances']): return orm.mapper(models['instances'], Table('instances', meta, autoload=True)) orm.mapper(models['instance_faults'], Table('instance_faults', meta, autoload=True)) orm.mapper(models['root_enabled_history'], Table('root_enabled_history', meta, autoload=True)) orm.mapper(models['datastores'], Table('datastores', meta, autoload=True)) orm.mapper(models['datastore_versions'], Table('datastore_versions', meta, autoload=True)) orm.mapper(models['datastore_version_metadata'], Table('datastore_version_metadata', meta, autoload=True)) orm.mapper(models['capabilities'], Table('capabilities', meta, autoload=True)) orm.mapper(models['capability_overrides'], Table('capability_overrides', meta, autoload=True)) orm.mapper(models['service_statuses'], Table('service_statuses', meta, autoload=True)) orm.mapper(models['dns_records'], Table('dns_records', meta, autoload=True)) orm.mapper(models['agent_heartbeats'], Table('agent_heartbeats', meta, autoload=True)) orm.mapper(models['quotas'], Table('quotas', meta, autoload=True)) orm.mapper(models['quota_usages'], Table('quota_usages', meta, autoload=True)) orm.mapper(models['reservations'], Table('reservations', meta, autoload=True)) orm.mapper(models['backups'], Table('backups', meta, autoload=True)) orm.mapper(models['security_groups'], Table('security_groups', meta, autoload=True)) orm.mapper(models['security_group_rules'], Table('security_group_rules', meta, autoload=True)) orm.mapper(models['security_group_instance_associations'], Table('security_group_instance_associations', meta, autoload=True)) orm.mapper(models['configurations'], Table('configurations', meta, autoload=True)) orm.mapper(models['configuration_parameters'], Table('configuration_parameters', meta, autoload=True)) orm.mapper(models['conductor_lastseen'], Table('conductor_lastseen', meta, autoload=True)) orm.mapper(models['clusters'], Table('clusters', meta, autoload=True)) orm.mapper(models['datastore_configuration_parameters'], Table('datastore_configuration_parameters', meta, autoload=True)) orm.mapper(models['modules'], Table('modules', meta, autoload=True)) orm.mapper(models['instance_modules'], Table('instance_modules', meta, autoload=True)) def mapping_exists(model): try: orm.class_mapper(model) return True except orm_exc.UnmappedClassError: return False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7481103 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/0000755000175000017500000000000000000000000022512 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/README0000644000175000017500000000015300000000000023371 0ustar00coreycorey00000000000000This is a database migration repository. More information at http://code.google.com/p/sqlalchemy-migrate/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/__init__.py0000644000175000017500000000000000000000000024611 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/manage.py0000644000175000017500000000140100000000000024310 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.versioning.shell import main if __name__ == "__main__": main(debug='False', repository='.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/migrate.cfg0000644000175000017500000000177400000000000024634 0ustar00coreycorey00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=Trove Migrations # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=['mysql','postgres','sqlite'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/schema.py0000644000175000017500000000515000000000000024325 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Various conveniences used for migration scripts.""" from oslo_log import log as logging import sqlalchemy.types logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') class String(sqlalchemy.types.String): def __init__(self, length, *args, **kwargs): super(String, self).__init__(*args, length=length, **kwargs) class Text(sqlalchemy.types.Text): def __init__(self, length=None, *args, **kwargs): super(Text, self).__init__(*args, **kwargs) self.with_variant(sqlalchemy.types.Text(length=length), 'mysql') class Boolean(sqlalchemy.types.Boolean): def __init__(self, create_constraint=True, name=None, *args, **kwargs): super(Boolean, self).__init__(*args, create_constraint=create_constraint, name=name, **kwargs) class DateTime(sqlalchemy.types.DateTime): def __init__(self, timezone=False, *args, **kwargs): super(DateTime, self).__init__(*args, timezone=timezone, **kwargs) class Integer(sqlalchemy.types.Integer): def __init__(self, *args, **kwargs): super(Integer, self).__init__(*args, **kwargs) class BigInteger(sqlalchemy.types.BigInteger): def __init__(self, *args, **kwargs): super(BigInteger, self).__init__(*args, **kwargs) class Float(sqlalchemy.types.Float): def __init__(self, *args, **kwargs): super(Float, self).__init__(*args, **kwargs) def create_tables(tables): for table in tables: logger.info("creating table %(table)s", {'table': table}) table.create() def drop_tables(tables): for table in tables: logger.info("dropping table %(table)s", {'table': table}) table.drop() def Table(name, metadata, *args, **kwargs): return sqlalchemy.schema.Table(name, metadata, *args, mysql_engine='INNODB', **kwargs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/0000755000175000017500000000000000000000000024362 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/001_base_schema.py0000644000175000017500000000306200000000000027547 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() instances = Table( 'instances', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime()), Column('updated', DateTime()), Column('name', String(255)), Column('hostname', String(255)), Column('compute_instance_id', String(36)), Column('task_id', Integer()), Column('task_description', String(32)), Column('task_start_time', DateTime()), Column('volume_id', String(36))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([instances]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/002_service_images.py0000644000175000017500000000227500000000000030310 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() service_images = Table( 'service_images', meta, Column('id', String(36), primary_key=True, nullable=False), Column('service_name', String(255)), Column('image_id', String(255))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([service_images]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/003_service_statuses.py0000644000175000017500000000267500000000000030723 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() service_statuses = Table( 'service_statuses', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_id', String(36), nullable=False), Column('status_id', Integer(), nullable=False), Column('status_description', String(64), nullable=False), Column('updated_at', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([service_statuses]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/004_root_enabled.py0000644000175000017500000000241300000000000027754 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() root_enabled_history = Table( 'root_enabled_history', meta, Column('id', String(36), primary_key=True, nullable=False), Column('user', String(length=255)), Column('created', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([root_enabled_history]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/005_heartbeat.py0000644000175000017500000000241700000000000027263 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() agent_heartbeats = Table( 'agent_heartbeats', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_id', String(36), nullable=False), Column('updated_at', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([agent_heartbeats]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/006_dns_records.py0000644000175000017500000000221100000000000027622 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() dns_records = Table( 'dns_records', meta, Column('name', String(length=255), primary_key=True), Column('record_id', String(length=64))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([dns_records]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/007_add_volume_flavor.py0000644000175000017500000000226600000000000031020 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: instances = Table('instances', meta, autoload=True) volume_size = Column('volume_size', Integer()) flavor_id = Column('flavor_id', String(36)) instances.create_column(flavor_id) instances.create_column(volume_size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/008_add_instance_fields.py0000644000175000017500000000212300000000000031263 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: instances = Table('instances', meta, autoload=True) instances.create_column(Column('tenant_id', String(36), nullable=True)) instances.create_column(Column('server_status', String(64))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/009_add_deleted_flag_to_instances.py0000644000175000017500000000217400000000000033310 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: instances = Table('instances', meta, autoload=True) instances.create_column(Column('deleted', Boolean())) instances.create_column(Column('deleted_at', DateTime())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/010_add_usage.py0000644000175000017500000000303400000000000027230 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() usage_events = Table( 'usage_events', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_name', String(36)), Column('tenant_id', String(36)), Column('nova_instance_id', String(36)), Column('instance_size', Integer()), Column('nova_volume_id', String(36)), Column('volume_size', Integer()), Column('end_time', DateTime()), Column('updated', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([usage_events]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/011_quota.py0000644000175000017500000000510000000000000026442 0ustar00coreycorey00000000000000# Copyright [2013] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() quotas = Table('quotas', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime()), Column('updated', DateTime()), Column('tenant_id', String(36)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer()), UniqueConstraint('tenant_id', 'resource')) quota_usages = Table('quota_usages', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime()), Column('updated', DateTime()), Column('tenant_id', String(36)), Column('in_use', Integer(), default=0), Column('reserved', Integer(), default=0), Column('resource', String(length=255), nullable=False), UniqueConstraint('tenant_id', 'resource')) reservations = Table('reservations', meta, Column('created', DateTime()), Column('updated', DateTime()), Column('id', String(36), primary_key=True, nullable=False), Column('usage_id', String(36)), Column('delta', Integer(), nullable=False), Column('status', String(length=36))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([quotas, quota_usages, reservations]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/012_backup.py0000644000175000017500000000366600000000000026576 0ustar00coreycorey00000000000000# Copyright [2013] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Float from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() backups = Table('backups', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), nullable=False), Column('description', String(512)), Column('location', String(1024)), Column('backup_type', String(32)), Column('size', Float()), Column('tenant_id', String(36)), Column('state', String(32), nullable=False), Column('instance_id', String(36)), Column('checksum', String(32)), Column('backup_timestamp', DateTime()), Column('deleted', Boolean()), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted_at', DateTime())) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([backups, ]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/013_add_security_group_artifacts.py0000644000175000017500000000623600000000000033261 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() security_groups = Table( 'security_groups', meta, Column('id', String(length=36), primary_key=True, nullable=False), Column('name', String(length=255)), Column('description', String(length=255)), Column('user', String(length=255)), Column('tenant_id', String(length=255)), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted', Boolean(), default=0), Column('deleted_at', DateTime()), ) security_group_instance_associations = Table( 'security_group_instance_associations', meta, Column('id', String(length=36), primary_key=True, nullable=False), Column('security_group_id', String(length=36), ForeignKey('security_groups.id', ondelete="CASCADE", onupdate="CASCADE")), Column('instance_id', String(length=36), ForeignKey('instances.id', ondelete="CASCADE", onupdate="CASCADE")), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted', Boolean(), default=0), Column('deleted_at', DateTime()), ) security_group_rules = Table( 'security_group_rules', meta, Column('id', String(length=36), primary_key=True, nullable=False), Column('group_id', String(length=36), ForeignKey('security_groups.id', ondelete="CASCADE", onupdate="CASCADE")), Column('parent_group_id', String(length=36), ForeignKey('security_groups.id', ondelete="CASCADE", onupdate="CASCADE")), Column('protocol', String(length=255)), Column('from_port', Integer()), Column('to_port', Integer()), Column('cidr', String(length=255)), Column('created', DateTime()), Column('updated', DateTime()), Column('deleted', Boolean(), default=0), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine Table( 'instances', meta, autoload=True, ) create_tables([security_groups, security_group_rules, security_group_instance_associations]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py0000644000175000017500000000265400000000000032522 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # pgsql <= 8.3 was lax about char->other casting but this was tightened up # in 8.4+. We now have to specify the USING clause for the cast to succeed. # NB: The generated sqlalchemy query doesn't support this, so this override # is needed. if migrate_engine.name == 'postgresql': migrate_engine.execute('ALTER TABLE instances ALTER COLUMN flavor_id ' 'TYPE INTEGER USING flavor_id::integer') else: instances = Table('instances', meta, autoload=True) # modify column instances.c.flavor_id.alter(type=Integer()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py0000644000175000017500000000212600000000000030633 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) service_type = Column('service_type', String(36)) instances.create_column(service_type) instances.update().values({'service_type': 'mysql'}).execute() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py0000644000175000017500000000434700000000000031171 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() datastores = Table( 'datastores', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), unique=True), Column('manager', String(255), nullable=False), Column('default_version_id', String(36)), ) datastore_versions = Table( 'datastore_versions', meta, Column('id', String(36), primary_key=True, nullable=False), Column('datastore_id', String(36), ForeignKey('datastores.id')), Column('name', String(255), unique=True), Column('image_id', String(36), nullable=False), Column('packages', String(511)), Column('active', Boolean(), nullable=False), UniqueConstraint('datastore_id', 'name', name='ds_versions') ) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([datastores, datastore_versions]) instances = Table('instances', meta, autoload=True) datastore_version_id = Column('datastore_version_id', String(36), ForeignKey('datastore_versions.id')) instances.create_column(datastore_version_id) instances.drop_column('service_type') # Table 'service_images' is deprecated since this version. # Leave it for few releases. # drop_tables([service_images]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py0000644000175000017500000000334400000000000031042 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.sql.expression import select from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def migrate_datastore_manager(datastores, datastore_versions): versions = select([datastore_versions]).execute() for ds_v in versions: ds = select([datastores]).\ where(datastores.c.id == ds_v.datastore_id).\ execute().fetchone() datastore_versions.update().\ where(datastore_versions.c.id == ds_v.id).\ values(manager=ds.manager).\ execute() def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastores = Table('datastores', meta, autoload=True) datastore_versions = Table('datastore_versions', meta, autoload=True) # add column to datastore_versions manager = Column('manager', String(255)) datastore_versions.create_column(manager) migrate_datastore_manager(datastores, datastore_versions) # drop column from datastores datastores.drop_column('manager') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/018_datastore_versions_fix.py0000644000175000017500000000165300000000000032115 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastore_versions = Table('datastore_versions', meta, autoload=True) # modify column datastore_versions.c.name.alter(unique=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/019_datastore_fix.py0000644000175000017500000001053300000000000030163 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from sqlalchemy.sql.expression import insert from sqlalchemy.sql.expression import select from sqlalchemy.sql.expression import update from sqlalchemy import text from trove.common import cfg from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy import utils as db_utils CONF = cfg.CONF LEGACY_IMAGE_ID = "00000000-0000-0000-0000-000000000000" LEGACY_DATASTORE_ID = "10000000-0000-0000-0000-000000000001" LEGACY_VERSION_ID = "20000000-0000-0000-0000-000000000002" meta = MetaData() def create_legacy_version(datastores_table, datastore_versions_table, image_id): insert( table=datastores_table, values=dict(id=LEGACY_DATASTORE_ID, name="Legacy MySQL") ).execute() insert( table=datastore_versions_table, values=dict(id=LEGACY_VERSION_ID, datastore_id=LEGACY_DATASTORE_ID, name="Unknown Legacy Version", image_id=image_id, packages="", active=False, manager="mysql") ).execute() return LEGACY_VERSION_ID def find_image(service_name): image_table = Table('service_images', meta, autoload=True) image = select( columns=[text("id"), text("image_id"), text("service_name")], from_obj=image_table, whereclause=text("service_name='%s'" % service_name), limit=1 ).execute().fetchone() if image: return image.id return LEGACY_IMAGE_ID def has_instances_wo_datastore_version(instances_table): instance = select( columns=[text("id")], from_obj=instances_table, whereclause=text("datastore_version_id is NULL"), limit=1 ).execute().fetchone() return instance is not None def find_all_instances_wo_datastore_version(instances_table): instances = select( columns=[text("id")], from_obj=instances_table, whereclause=text("datastore_version_id is NULL") ).execute() return instances def upgrade(migrate_engine): meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True) datastore_versions_table = Table('datastore_versions', meta, autoload=True) if has_instances_wo_datastore_version(instance_table): instances = find_all_instances_wo_datastore_version(instance_table) image_id = find_image("mysql") datastores_table = Table('datastores', meta, autoload=True) version_id = create_legacy_version(datastores_table, datastore_versions_table, image_id) for instance in instances: update( table=instance_table, whereclause=text("id='%s'" % instance.id), values=dict(datastore_version_id=version_id) ).execute() constraint_names = db_utils.get_foreign_key_constraint_names( engine=migrate_engine, table='instances', columns=[text('datastore_version_id')], ref_table='datastore_versions', ref_columns=[text('id')]) db_utils.drop_foreign_key_constraints( constraint_names=constraint_names, columns=[instance_table.c.datastore_version_id], ref_columns=[datastore_versions_table.c.id]) instance_table.c.datastore_version_id.alter(nullable=False) db_utils.create_foreign_key_constraints( constraint_names=constraint_names, columns=[instance_table.c.datastore_version_id], ref_columns=[datastore_versions_table.c.id]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/020_configurations.py0000644000175000017500000000432300000000000030351 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() configurations = Table( 'configurations', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(64), nullable=False), Column('description', String(256)), Column('tenant_id', String(36), nullable=False), Column('datastore_version_id', String(36), nullable=False), Column('deleted', Boolean(), nullable=False, default=False), Column('deleted_at', DateTime()), ) configuration_parameters = Table( 'configuration_parameters', meta, Column('configuration_id', String(36), ForeignKey("configurations.id"), nullable=False, primary_key=True), Column('configuration_key', String(128), nullable=False, primary_key=True), Column('configuration_value', String(128)), Column('deleted', Boolean(), nullable=False, default=False), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([configurations]) create_tables([configuration_parameters]) instances = Table('instances', meta, autoload=True) instances.create_column(Column('configuration_id', String(36), ForeignKey("configurations.id"))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/021_conductor_last_seen.py0000644000175000017500000000243000000000000031352 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import Float from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() conductor_lastseen = Table( 'conductor_lastseen', meta, Column('instance_id', String(36), primary_key=True, nullable=False), Column('method_name', String(36), primary_key=True, nullable=False), Column('sent', Float(precision=32))) def upgrade(migrate_engine): meta.bind = migrate_engine create_tables([conductor_lastseen]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/022_add_backup_parent_id.py0000644000175000017500000000201400000000000031416 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # add column: backups = Table('backups', meta, autoload=True) backups.create_column(Column('parent_id', String(36), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/023_add_instance_indexes.py0000644000175000017500000000253400000000000031457 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) tenant_id_idx = Index("instances_tenant_id", instances.c.tenant_id) try: tenant_id_idx.create() except OperationalError as e: logger.info(e) deleted_idx = Index("instances_deleted", instances.c.deleted) try: deleted_idx.create() except OperationalError as e: logger.info(e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/024_add_backup_indexes.py0000644000175000017500000000263300000000000031121 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) backups_instance_id_idx = Index("backups_instance_id", backups.c.instance_id) backups_deleted_idx = Index("backups_deleted", backups.c.deleted) try: backups_instance_id_idx.create() except OperationalError as e: logger.info(e) try: backups_deleted_idx.create() except OperationalError as e: logger.info(e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/025_add_service_statuses_indexes.py0000644000175000017500000000230500000000000033244 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine service_statuses = Table('service_statuses', meta, autoload=True) idx = Index("service_statuses_instance_id", service_statuses.c.instance_id) try: idx.create() except OperationalError as e: logger.info(e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/026_datastore_versions_unique_fix.py0000644000175000017500000000321200000000000033473 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import UniqueConstraint from oslo_log import log as logging from sqlalchemy.exc import InternalError from sqlalchemy.exc import OperationalError from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine datastore_versions = Table('datastore_versions', meta, autoload=True) # drop the unique index on the name column - unless we are # using sqlite - it doesn't support dropping unique constraints uc = None if migrate_engine.name == "mysql": uc = UniqueConstraint('name', table=datastore_versions, name='name') elif migrate_engine.name == "postgresql": uc = UniqueConstraint('name', table=datastore_versions, name='datastore_versions_name_key') if uc: try: uc.drop() except (OperationalError, InternalError) as e: logger.info(e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/027_add_datastore_capabilities.py0000644000175000017500000000366400000000000032644 0ustar00coreycorey00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() capabilities = Table( 'capabilities', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), unique=True), Column('description', String(255), nullable=False), Column('enabled', Boolean()) ) capability_overrides = Table( 'capability_overrides', meta, Column('id', String(36), primary_key=True, nullable=False), Column('datastore_version_id', String(36), ForeignKey('datastore_versions.id')), Column('capability_id', String(36), ForeignKey('capabilities.id')), Column('enabled', Boolean()), UniqueConstraint('datastore_version_id', 'capability_id', name='idx_datastore_capabilities_enabled') ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('datastores', meta, autoload=True) Table('datastore_versions', meta, autoload=True) create_tables([capabilities, capability_overrides]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/028_recreate_agent_heartbeat.py0000644000175000017500000000416500000000000032322 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # new table with desired columns, indexes, and constraints new_agent_heartbeats = Table( 'agent_heartbeats', meta, Column('id', String(36), primary_key=True, nullable=False), Column('instance_id', String(36), nullable=False, unique=True, index=True), Column('guest_agent_version', String(255), index=True), Column('deleted', Boolean(), index=True), Column('deleted_at', DateTime()), Column('updated_at', DateTime(), nullable=False)) # original table from migration 005_heartbeat.py previous_agent_heartbeats = Table('agent_heartbeats', meta, autoload=True) try: drop_tables([previous_agent_heartbeats]) except OperationalError as e: logger.warn("This table may have been dropped by some other means.") logger.warn(e) create_tables([new_agent_heartbeats]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/029_add_backup_datastore.py0000644000175000017500000000230300000000000031447 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) Table('datastore_versions', meta, autoload=True) datastore_version_id = Column('datastore_version_id', String(36), ForeignKey('datastore_versions.id')) backups.create_column(datastore_version_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/030_add_master_slave.py0000644000175000017500000000215300000000000030614 0ustar00coreycorey00000000000000# Copyright Tesora, Inc. 2014 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import ForeignKey from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table COLUMN_NAME = 'slave_of_id' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.create_column( Column(COLUMN_NAME, String(36), ForeignKey('instances.id')), nullable=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/031_add_timestamps_to_configurations.py0000644000175000017500000000213400000000000034131 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) configurations = Table('configurations', meta, autoload=True) created = Column('created', DateTime()) updated = Column('updated', DateTime()) configurations.create_column(created) configurations.create_column(updated) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/032_clusters.py0000644000175000017500000000453400000000000027172 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() clusters = Table( 'clusters', meta, Column('id', String(36), primary_key=True, nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('name', String(255), nullable=False), Column('task_id', Integer(), nullable=False), Column('tenant_id', String(36), nullable=False), Column("datastore_version_id", String(36), ForeignKey('datastore_versions.id'), nullable=False), Column('deleted', Boolean()), Column('deleted_at', DateTime()), Index("clusters_tenant_id", "tenant_id"), Index("clusters_deleted", "deleted"),) def upgrade(migrate_engine): meta.bind = migrate_engine Table('datastores', meta, autoload=True) Table('datastore_versions', meta, autoload=True) instances = Table('instances', meta, autoload=True) create_tables([clusters]) instances.create_column(Column('cluster_id', String(36), ForeignKey("clusters.id"))) instances.create_column(Column('shard_id', String(36))) instances.create_column(Column('type', String(64))) cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id) cluster_id_idx.create() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/033_datastore_parameters.py0000644000175000017500000000402000000000000031526 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() datastore_configuration_parameters = Table( 'datastore_configuration_parameters', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(128), primary_key=True, nullable=False), Column('datastore_version_id', String(36), ForeignKey("datastore_versions.id"), primary_key=True, nullable=False), Column('restart_required', Boolean(), nullable=False, default=False), Column('max_size', String(40)), Column('min_size', String(40)), Column('data_type', String(128), nullable=False), Column('deleted', Boolean()), Column('deleted_at', DateTime()), UniqueConstraint( 'datastore_version_id', 'name', name='UQ_datastore_configuration_parameters_datastore_version_id_name') ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('datastore_versions', meta, autoload=True) create_tables([datastore_configuration_parameters]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/034_change_task_description.py0000644000175000017500000000167200000000000032202 0ustar00coreycorey00000000000000# Copyright 2014 AWCloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.c.task_description.alter(type=String(255)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/035_flavor_id_int_to_string.py0000644000175000017500000000166500000000000032242 0ustar00coreycorey00000000000000# # Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() def upgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.c.flavor_id.alter(String(255)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/036_add_datastore_version_metadata.py0000644000175000017500000000403600000000000033532 0ustar00coreycorey00000000000000# Copyright 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() datastore_version_metadata = Table( 'datastore_version_metadata', meta, Column('id', String(36), primary_key=True, nullable=False), Column( 'datastore_version_id', String(36), ForeignKey('datastore_versions.id', ondelete='CASCADE'), ), Column('key', String(128), nullable=False), Column('value', String(128)), Column('created', DateTime(), nullable=False), Column('deleted', Boolean(), nullable=False, default=False), Column('deleted_at', DateTime()), Column('updated_at', DateTime()), UniqueConstraint( 'datastore_version_id', 'key', 'value', name='UQ_datastore_version_metadata_datastore_version_id_key_value') ) def upgrade(migrate_engine): meta.bind = migrate_engine # Load the datastore_versions table into the session. # creates datastore_version_metadata table Table('datastore_versions', meta, autoload=True) create_tables([datastore_version_metadata]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/037_modules.py0000644000175000017500000000617100000000000027002 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import UniqueConstraint from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy.migrate_repo.schema import Text meta = MetaData() modules = Table( 'modules', meta, Column('id', String(length=64), primary_key=True, nullable=False), Column('name', String(length=255), nullable=False), Column('type', String(length=255), nullable=False), Column('contents', Text(length=16777215), nullable=False), Column('description', String(length=255)), Column('tenant_id', String(length=64), nullable=True), Column('datastore_id', String(length=64), nullable=True), Column('datastore_version_id', String(length=64), nullable=True), Column('auto_apply', Boolean(), default=0, nullable=False), Column('visible', Boolean(), default=1, nullable=False), Column('live_update', Boolean(), default=0, nullable=False), Column('md5', String(length=32), nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('deleted', Boolean(), default=0, nullable=False), Column('deleted_at', DateTime()), UniqueConstraint( 'type', 'tenant_id', 'datastore_id', 'datastore_version_id', 'name', 'deleted_at', name='UQ_type_tenant_datastore_datastore_version_name'), ) instance_modules = Table( 'instance_modules', meta, Column('id', String(length=64), primary_key=True, nullable=False), Column('instance_id', String(length=64), ForeignKey('instances.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False), Column('module_id', String(length=64), ForeignKey('modules.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False), Column('md5', String(length=32), nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('deleted', Boolean(), default=0, nullable=False), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('instances', meta, autoload=True) create_tables([modules, instance_modules]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/038_instance_faults.py0000644000175000017500000000352700000000000030517 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy.migrate_repo.schema import Text meta = MetaData() instance_faults = Table( 'instance_faults', meta, Column('id', String(length=64), primary_key=True, nullable=False), Column('instance_id', String(length=64), ForeignKey('instances.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False), Column('message', String(length=255), nullable=False), Column('details', Text(length=65535), nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('deleted', Boolean(), default=0, nullable=False), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('instances', meta, autoload=True) create_tables([instance_faults]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/039_region.py0000644000175000017500000000236600000000000026621 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.common import cfg from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table CONF = cfg.CONF logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') meta = MetaData() def upgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.create_column(Column('region_id', String(255))) instances.update().values( region_id=CONF.service_credentials.region_name).execute() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py0000644000175000017500000000362600000000000030554 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.sql.expression import update from sqlalchemy import text from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import Integer from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy.migrate_repo.schema import Text COLUMN_NAME_1 = 'priority_apply' COLUMN_NAME_2 = 'apply_order' COLUMN_NAME_3 = 'is_admin' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine modules = Table('modules', meta, autoload=True) is_nullable = True if migrate_engine.name == "sqlite" else False column = Column(COLUMN_NAME_1, Boolean(), nullable=is_nullable, default=0) modules.create_column(column) column = Column(COLUMN_NAME_2, Integer(), nullable=is_nullable, default=5) modules.create_column(column) column = Column(COLUMN_NAME_3, Boolean(), nullable=is_nullable, default=0) modules.create_column(column) modules.c.contents.alter(Text(length=4294967295)) # mark all non-visible, auto-apply and all-tenant modules as is_admin update(table=modules, values=dict(is_admin=1), whereclause=text("visible=0 or auto_apply=1 or tenant_id is null") ).execute() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py0000644000175000017500000000200400000000000030153 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table meta = MetaData() def upgrade(migrate_engine): meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) instances.create_column(Column('encrypted_key', String(255))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/042_add_cluster_configuration_id.py0000644000175000017500000000257700000000000033230 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.common import cfg from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table CONF = cfg.CONF logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') meta = MetaData() def upgrade(migrate_engine): meta.bind = migrate_engine # Load 'configurations' table to MetaData. Table('configurations', meta, autoload=True, autoload_with=migrate_engine) instances = Table('clusters', meta, autoload=True) instances.create_column(Column('configuration_id', String(36), ForeignKey("configurations.id"))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migrate_repo/versions/__init__.py0000644000175000017500000000000000000000000026461 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/migration.py0000644000175000017500000000752500000000000022411 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from migrate.versioning import api as versioning_api # See LP bug #719834. sqlalchemy-migrate changed location of # exceptions.py after 0.6.0. try: from migrate.versioning import exceptions as versioning_exceptions except ImportError: from migrate import exceptions as versioning_exceptions from oslo_log import log as logging from trove.common import exception logger = logging.getLogger('trove.db.sqlalchemy.migration') def db_version(options, repo_path=None): """Return the database's current migration number. :param options: options dict :retval version number """ repo_path = get_migrate_repo_path(repo_path) sql_connection = options['database']['connection'] try: return versioning_api.db_version(sql_connection, repo_path) except versioning_exceptions.DatabaseNotControlledError: msg = ("database '%(sql_connection)s' is not under migration control" % {'sql_connection': sql_connection}) raise exception.DatabaseMigrationError(msg) def upgrade(options, version=None, repo_path=None): """Upgrade the database's current migration level. :param options: options dict :param version: version to upgrade (defaults to latest) :retval version number """ db_version(options, repo_path) # Ensure db is under migration control repo_path = get_migrate_repo_path(repo_path) sql_connection = options['database']['connection'] version_str = version or 'latest' logger.info("Upgrading %(sql_connection)s to version %(version_str)s", {'sql_connection': sql_connection, 'version_str': version_str}) return versioning_api.upgrade(sql_connection, repo_path, version) def version_control(options, repo_path=None): """Place a database under migration control. :param options: options dict """ sql_connection = options['database']['connection'] try: _version_control(options) except versioning_exceptions.DatabaseAlreadyControlledError: msg = ("database '%(sql_connection)s' is already under migration " "control" % {'sql_connection': sql_connection}) raise exception.DatabaseMigrationError(msg) def _version_control(options, repo_path): """Place a database under migration control. :param options: options dict """ repo_path = get_migrate_repo_path(repo_path) sql_connection = options['database']['connection'] return versioning_api.version_control(sql_connection, repo_path) def db_sync(options, version=None, repo_path=None): """Place a database under migration control and perform an upgrade. :param options: options dict :param repo_path: used for plugin db migrations, defaults to main repo :retval version number """ try: _version_control(options, repo_path) except versioning_exceptions.DatabaseAlreadyControlledError: pass upgrade(options, version=version, repo_path=repo_path) def get_migrate_repo_path(repo_path=None): """Get the path for the migrate repository.""" default_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') repo_path = repo_path or default_path assert os.path.exists(repo_path) return repo_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/session.py0000644000175000017500000001127700000000000022102 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import threading from oslo_db.sqlalchemy import session from oslo_log import log as logging from sqlalchemy import MetaData from trove.common import cfg from trove.common.i18n import _ from trove.db.sqlalchemy import mappers _FACADE = None _LOCK = threading.Lock() LOG = logging.getLogger(__name__) CONF = cfg.CONF def configure_db(options, models_mapper=None): facade = _create_facade(options) if models_mapper: models_mapper.map(facade) else: from trove.backup import models as backup_models from trove.cluster import models as cluster_models from trove.conductor import models as conductor_models from trove.configuration import models as configurations_models from trove.datastore import models as datastores_models from trove.dns import models as dns_models from trove.extensions.mysql import models as mysql_models from trove.extensions.security_group import models as secgrp_models from trove.guestagent import models as agent_models from trove.instance import models as base_models from trove.module import models as module_models from trove.quota import models as quota_models model_modules = [ base_models, datastores_models, dns_models, mysql_models, agent_models, quota_models, backup_models, secgrp_models, configurations_models, conductor_models, cluster_models, module_models ] models = {} for module in model_modules: models.update(module.persisted_models()) mappers.map(get_engine(), models) def _create_facade(options): global _LOCK, _FACADE # TODO(mvandijk): Refactor this once oslo.db spec is implemented: # https://specs.openstack.org/openstack/oslo-specs/specs/kilo/ # make-enginefacade-a-facade.html if _FACADE is None: with _LOCK: if _FACADE is None: conf = CONF.database # pop the deprecated config option 'query_log' if conf.query_log: if conf.connection_debug < 50: conf['connection_debug'] = 50 LOG.warning(('Configuration option "query_log" has been ' 'depracated. Use "connection_debug" ' 'instead. Setting connection_debug = ' '%(debug_level)s instead.'), conf.get('connection_debug')) # TODO(mvandijk): once query_log is removed, # use enginefacade.from_config() instead database_opts = dict(CONF.database) database_opts.pop('query_log') _FACADE = session.EngineFacade( options['database']['connection'], **database_opts ) return _FACADE def _check_facade(): if _FACADE is None: LOG.exception("***The Database has not been setup!!!***") raise RuntimeError( _("***The Database has not been setup!!!***")) def get_facade(): _check_facade() return _FACADE def get_engine(use_slave=False): _check_facade() return _FACADE.get_engine(use_slave=use_slave) def get_session(**kwargs): return get_facade().get_session(**kwargs) def raw_query(model, **kwargs): return get_session(**kwargs).query(model) def clean_db(): engine = get_engine() meta = MetaData() meta.bind = engine meta.reflect() with contextlib.closing(engine.connect()) as con: trans = con.begin() # pylint: disable=E1101 for table in reversed(meta.sorted_tables): if table.name != "migrate_version": con.execute(table.delete()) # pylint: disable=E1101 trans.commit() def drop_db(options): if options: _create_facade(options) engine = get_engine() meta = MetaData() meta.bind = engine meta.reflect() meta.drop_all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/db/sqlalchemy/utils.py0000644000175000017500000000557600000000000021564 0ustar00coreycorey00000000000000# Copyright 2014 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset.constraint import ForeignKeyConstraint from sqlalchemy.engine import reflection def get_foreign_key_constraint_names(engine, table, columns, ref_table, ref_columns): """Retrieve the names of foreign key constraints that match the given criteria. :param engine: The sqlalchemy engine to be used. :param table: Name of the child table. :param columns: List of the foreign key columns. :param ref_table: Name of the parent table. :param ref_columns: List of the referenced columns. :return: List of foreign key constraint names. """ constraint_names = [] inspector = reflection.Inspector.from_engine(engine) fks = inspector.get_foreign_keys(table) for fk in fks: if (fk['referred_table'] == ref_table and fk['constrained_columns'] == columns and fk['referred_columns'] == ref_columns): constraint_names.append(fk['name']) return constraint_names def drop_foreign_key_constraints(constraint_names, columns, ref_columns): """Drop the foreign key constraints that match the given criteria. :param constraint_names: List of foreign key constraint names :param columns: List of the foreign key columns. :param ref_columns: List of the referenced columns. """ for constraint_name in constraint_names: fkey_constraint = ForeignKeyConstraint(columns=columns, refcolumns=ref_columns, name=constraint_name) fkey_constraint.drop() def create_foreign_key_constraints(constraint_names, columns, ref_columns): """Create the foreign key constraints that match the given criteria. :param constraint_names: List of foreign key constraint names :param columns: List of the foreign key columns. :param ref_columns: List of the referenced columns. """ for constraint_name in constraint_names: fkey_constraint = ForeignKeyConstraint(columns=columns, refcolumns=ref_columns, name=constraint_name) fkey_constraint.create() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/dns/0000755000175000017500000000000000000000000016072 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/dns/__init__.py0000644000175000017500000000000000000000000020171 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/dns/designate/0000755000175000017500000000000000000000000020035 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/dns/designate/__init__.py0000644000175000017500000000000000000000000022134 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/dns/designate/driver.py0000644000175000017500000001215500000000000021706 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Dns Driver that uses Designate DNSaaS. """ import base64 import hashlib from designateclient import client from keystoneauth1 import loading from keystoneauth1 import session from oslo_log import log as logging from oslo_utils import encodeutils import six from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.dns import driver CONF = cfg.CONF DNS_TENANT_ID = CONF.dns_account_id DNS_AUTH_URL = CONF.dns_auth_url DNS_USERNAME = CONF.dns_username DNS_PASSKEY = CONF.dns_passkey DNS_TTL = CONF.dns_ttl DNS_DOMAIN_ID = CONF.dns_domain_id DNS_DOMAIN_NAME = CONF.dns_domain_name DNS_USER_DOMAIN_ID = CONF.dns_user_domain_id DNS_PROJECT_DOMAIN_ID = CONF.dns_project_domain_id LOG = logging.getLogger(__name__) def create_designate_client(api_version='2'): """Creates a Designate DNSaaS client.""" loader = loading.get_plugin_loader('password') auth = loader.load_from_options(auth_url=DNS_AUTH_URL, username=DNS_USERNAME, password=DNS_PASSKEY, project_id=DNS_TENANT_ID, user_domain_id=DNS_USER_DOMAIN_ID, project_domain_id=DNS_PROJECT_DOMAIN_ID) sesh = session.Session(auth=auth) return client.Client(api_version, session=sesh) class DesignateDriverV2(driver.DnsDriver): def __init__(self): self.dns_client = create_designate_client() self.default_dns_zone = DesignateDnsZone(id=DNS_DOMAIN_ID, name=DNS_DOMAIN_NAME) def create_entry(self, entry, content): """Creates the entry in the driver at the given dns zone.""" dns_zone = entry.dns_zone or self.default_dns_zone if not dns_zone.id: raise TypeError(_("The entry's dns_zone must have an ID " "specified.")) name = entry.name LOG.debug("Creating DNS entry %s.", name) client = self.dns_client # Record name has to end with a '.' by dns standard client.recordsets.create(DNS_DOMAIN_ID, entry.name + '.', entry.type, records=[content]) def delete_entry(self, name, type, dns_zone=None): """Deletes an entry with the given name and type from a dns zone.""" dns_zone = dns_zone or self.default_dns_zone records = self._get_records(dns_zone) matching_record = [rec for rec in records if rec['name'] == name + '.' and rec['type'] == type] if not matching_record: raise exception.DnsRecordNotFound(name) LOG.debug("Deleting DNS entry %s.", name) self.dns_client.recordsets.delete(dns_zone.id, matching_record[0]['id']) def _get_records(self, dns_zone): dns_zone = dns_zone or self.default_dns_zone if not dns_zone: raise TypeError(_('DNS domain is must be specified')) return self.dns_client.recordsets.list(dns_zone.id) class DesignateInstanceEntryFactory(driver.DnsInstanceEntryFactory): """Defines how instance DNS entries are created for instances.""" def create_entry(self, instance_id): zone = DesignateDnsZone(id=DNS_DOMAIN_ID, name=DNS_DOMAIN_NAME) # Constructing the hostname by hashing the instance ID. name = encodeutils.to_utf8(instance_id) name = hashlib.md5(name).digest() name = base64.b32encode(name)[:11].lower() if six.PY3: name = name.decode('ascii') hostname = ("%s.%s" % (name, zone.name)) # Removing the leading dot if present if hostname.endswith('.'): hostname = hostname[:-1] return driver.DnsEntry(name=hostname, content=None, type="A", ttl=DNS_TTL, dns_zone=zone) class DesignateDnsZone(driver.DnsZone): def __init__(self, id, name): self._name = name self._id = id @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def id(self): return self._id @id.setter def id(self, value): self._id = value def __eq__(self, other): return (isinstance(other, DesignateDnsZone) and self.name == other.name and self.id == other.id) def __str__(self): return "%s:%s" % (self.id, self.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/dns/driver.py0000644000175000017500000000730000000000000017737 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Dns Driver base class that all DNS drivers should inherit from """ class DnsDriver(object): """The base class that all Dns drivers should inherit from.""" def __init__(self): pass def create_entry(self, entry): """Creates the entry in the driver at the given dns zone.""" pass def delete_entry(self, name, type, dns_zone=None): """Deletes an entry with the given name and type from a dns zone.""" pass def get_entries_by_content(self, content, dns_zone=None): """Retrieves all entries in a DNS zone with matching content field.""" pass def get_entries_by_name(self, name, dns_zone=None): """Retrieves all entries in a dns zone with the given name field.""" pass def get_dns_zones(self, name=None): """Returns all dns zones (optionally filtered by the name argument.""" pass def modify_content(self, name, content, dns_zone): # TODO(tim.simpson) I've found no use for this in RS impl of DNS w/ # instances. Check to see its really needed. pass def rename_entry(self, content, name, dns_zone): # TODO(tim.simpson) I've found no use for this in RS impl of DNS w/ # instances. Check to see its really needed. pass class DnsInstanceEntryFactory(object): """Defines how instance DNS entries are created for instances. By default, the DNS entry returns None meaning instances do not get entries associated with them. Override the create_entry method to change this behavior. """ def create_entry(self, instance): return None class DnsSimpleInstanceEntryFactory(object): """Creates a CNAME with the name being the instance name.""" def create_entry(self, instance): return DnsEntry(name=instance.name, content=None, type="CNAME") class DnsEntry(object): """Simple representation of a DNS record.""" def __init__(self, name, content, type, ttl=None, priority=None, dns_zone=None): self.content = content self.name = name self.type = type self.priority = priority self.dns_zone = dns_zone self.ttl = ttl def __repr__(self): msg = ('DnsEntry(name="%s", content="%s", type="%s", ' 'ttl=%s, priority=%s, dns_zone=%s)') params = (self.name, self.content, self.type, self.ttl, self.priority, self.dns_zone) return msg % params def __str__(self): return "{ name:%s, content:%s, type:%s, zone:%s }" % \ (self.name, self.content, self.type, self.dns_zone) class DnsZone(object): """Represents a DNS Zone. For some APIs it is inefficient to simply represent a zone as a string because this would necessitate a look up on every call. So this opaque object can contain additional data needed by the DNS driver. The only constant is it must contain the domain name of the zone. """ @property def name(self): return "" def __str__(self): return self.name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/dns/manager.py0000644000175000017500000000523200000000000020060 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Dns manager. """ from oslo_log import log as logging from trove.common import cfg from trove.common import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF class DnsManager(object): """Handles associating DNS to and from IPs.""" def __init__(self, dns_driver=None, dns_instance_entry_factory=None, *args, **kwargs): if not dns_driver: dns_driver = CONF.dns_driver dns_driver = utils.import_class(dns_driver) self.driver = dns_driver() if not dns_instance_entry_factory: dns_instance_entry_factory = CONF.dns_instance_entry_factory entry_factory = utils.import_class(dns_instance_entry_factory) self.entry_factory = entry_factory() def create_instance_entry(self, instance_id, content): """Connects a new instance with a DNS entry. :param instance_id: The trove instance_id to associate. :param content: The IP content attached to the instance. """ entry = self.entry_factory.create_entry(instance_id) if entry: LOG.debug("Creating entry address %s.", str(entry)) self.driver.create_entry(entry, content) else: LOG.debug("Entry address not found for instance %s", instance_id) def delete_instance_entry(self, instance_id, content=None): """Removes a DNS entry associated to an instance. :param instance_id: The trove instance id to associate. :param content: The IP content attached to the instance. """ entry = self.entry_factory.create_entry(instance_id) LOG.debug("Deleting instance entry with %s", str(entry)) if entry: self.driver.delete_entry(entry.name, entry.type) def determine_hostname(self, instance_id): """ Create the hostname field based on the instance id. Use instance by default. """ entry = self.entry_factory.create_entry(instance_id) if entry: return entry.name else: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/dns/models.py0000644000175000017500000000461300000000000017733 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Model classes that map instance Ip to dns record. """ from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common.models import ModelBase from trove.db import get_db_api LOG = logging.getLogger(__name__) def persisted_models(): return { 'dns_records': DnsRecord, } class DnsRecord(ModelBase): _data_fields = ['name', 'record_id'] _table_name = 'dns_records' def __init__(self, name, record_id): self.name = name self.record_id = record_id @classmethod def create(cls, **values): record = cls(**values).save() if not record.is_valid(): raise exception.InvalidModelError(errors=record.errors) return record def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) LOG.debug("Saving %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self) def delete(self): LOG.debug("Deleting %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().delete(self) @classmethod def find_by(cls, **conditions): model = cls.get_by(**conditions) if model is None: raise exception.ModelNotFoundError(_("%s Not Found") % cls.__name__) return model @classmethod def get_by(cls, **kwargs): return get_db_api().find_by(cls, **cls._process_conditions(kwargs)) @classmethod def _process_conditions(cls, raw_conditions): """Override in inheritors to format/modify any conditions.""" return raw_conditions ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/extensions/0000755000175000017500000000000000000000000017505 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/__init__.py0000644000175000017500000000000000000000000021604 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/extensions/common/0000755000175000017500000000000000000000000020775 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/common/__init__.py0000644000175000017500000000000000000000000023074 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/common/models.py0000644000175000017500000001054300000000000022635 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.clients import create_guest_client from trove.common.db import models as guest_models from trove.common import exception from trove.common import timeutils from trove.db import get_db_api from trove.instance import models as base_models LOG = logging.getLogger(__name__) def load_and_verify(context, instance_id): # Load InstanceServiceStatus to verify if its running instance = base_models.Instance.load(context, instance_id) if not instance.is_datastore_running: raise exception.UnprocessableEntity( "Instance %s is not ready, status: %s." % (instance.id, instance.datastore_status.status) ) else: return instance class Root(object): @classmethod def load(cls, context, instance_id): load_and_verify(context, instance_id) # TODO(pdmars): remove the is_root_enabled call from the guest agent, # just check the database for this information. # If the root history returns null or raises an exception, the root # user hasn't been enabled. try: root_history = RootHistory.load(context, instance_id) except exception.NotFound: return False if not root_history: return False return True @classmethod def create(cls, context, instance_id, root_password, cluster_instances_list=None): load_and_verify(context, instance_id) if root_password: root = create_guest_client(context, instance_id).enable_root_with_password( root_password) else: root = create_guest_client(context, instance_id).enable_root() root_user = guest_models.DatastoreUser.deserialize(root, verify=False) root_user.make_root() # if cluster_instances_list none, then root create is called for # single instance, adding an RootHistory entry for the instance_id if cluster_instances_list is None: RootHistory.create(context, instance_id) return root_user @classmethod def delete(cls, context, instance_id): load_and_verify(context, instance_id) create_guest_client(context, instance_id).disable_root() class ClusterRoot(Root): @classmethod def create(cls, context, instance_id, root_password, cluster_instances_list=None): root_user = super(ClusterRoot, cls).create(context, instance_id, root_password, cluster_instances_list=None) if cluster_instances_list: for instance in cluster_instances_list: RootHistory.create(context, instance) return root_user class RootHistory(object): _auto_generated_attrs = ['id'] _data_fields = ['instance_id', 'user', 'created'] _table_name = 'root_enabled_history' def __init__(self, instance_id, user): self.id = instance_id self.user = user self.created = timeutils.utcnow() def save(self): LOG.debug("Saving %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self) @classmethod def load(cls, context, instance_id): history = get_db_api().find_by(cls, id=instance_id) return history @classmethod def create(cls, context, instance_id): history = cls.load(context, instance_id) if history is not None: return history history = RootHistory(instance_id, context.user) return history.save() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/common/service.py0000644000175000017500000002602100000000000023010 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from oslo_utils import importutils import six from trove.cluster import models as cluster_models from trove.cluster.models import DBCluster from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import policy from trove.common import wsgi from trove.datastore import models as datastore_models from trove.extensions.common import models from trove.extensions.common import views from trove.instance import models as instance_models from trove.instance.models import DBInstance LOG = logging.getLogger(__name__) import_class = importutils.import_class CONF = cfg.CONF class ExtensionController(wsgi.Controller): @classmethod def authorize_target_action(cls, context, target_rule_name, target_id, is_cluster=False): target = None if is_cluster: target = cluster_models.Cluster.load(context, target_id) else: target = instance_models.Instance.load(context, target_id) if not target: if is_cluster: raise exception.ClusterNotFound(cluster=target_id) raise exception.InstanceNotFound(instance=target_id) target_type = 'cluster' if is_cluster else 'instance' policy.authorize_on_target( context, '%s:extension:%s' % (target_type, target_rule_name), {'tenant': target.tenant_id}) @six.add_metaclass(abc.ABCMeta) class BaseDatastoreRootController(ExtensionController): """Base class that defines the contract for root controllers.""" @abc.abstractmethod def root_index(self, req, tenant_id, instance_id, is_cluster): pass @abc.abstractmethod def root_create(self, req, body, tenant_id, instance_id, is_cluster): pass @abc.abstractmethod def root_delete(self, req, tenant_id, instance_id, is_cluster): pass @staticmethod def _get_password_from_body(body=None): if body: return body['password'] if 'password' in body else None return None class DefaultRootController(BaseDatastoreRootController): def root_index(self, req, tenant_id, instance_id, is_cluster): """Returns True if root is enabled; False otherwise.""" if is_cluster: raise exception.ClusterOperationNotSupported( operation='show_root') LOG.info("Getting root enabled for instance '%s'.", instance_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] is_root_enabled = models.Root.load(context, instance_id) return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200) def root_create(self, req, body, tenant_id, instance_id, is_cluster): if is_cluster: raise exception.ClusterOperationNotSupported( operation='enable_root') LOG.info("Enabling root for instance '%s'.", instance_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] password = DefaultRootController._get_password_from_body(body) root = models.Root.create(context, instance_id, password) return wsgi.Result(views.RootCreatedView(root).data(), 200) def root_delete(self, req, tenant_id, instance_id, is_cluster): if is_cluster: raise exception.ClusterOperationNotSupported( operation='disable_root') LOG.info("Disabling root for instance '%s'.", instance_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] is_root_enabled = models.Root.load(context, instance_id) if not is_root_enabled: raise exception.RootHistoryNotFound() models.Root.delete(context, instance_id) return wsgi.Result(None, 204) class ClusterRootController(DefaultRootController): def root_index(self, req, tenant_id, instance_id, is_cluster): """Returns True if root is enabled; False otherwise.""" if is_cluster: return self.cluster_root_index(req, tenant_id, instance_id) else: return self.instance_root_index(req, tenant_id, instance_id) def instance_root_index(self, req, tenant_id, instance_id): LOG.info("Getting root enabled for instance '%s'.", instance_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] try: is_root_enabled = models.ClusterRoot.load(context, instance_id) except exception.UnprocessableEntity: raise exception.UnprocessableEntity( _("Cluster %s is not ready.") % instance_id) return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200) def cluster_root_index(self, req, tenant_id, cluster_id): LOG.info("Getting root enabled for cluster '%s'.", cluster_id) single_instance_id, cluster_instances = self._get_cluster_instance_id( tenant_id, cluster_id) return self.instance_root_index(req, tenant_id, single_instance_id) def _block_cluster_instance_actions(self): return False def check_cluster_instance_actions(self, instance_id): # Check if instance is in a cluster and if actions are allowed instance = DBInstance.find_by(id=instance_id) if instance.cluster_id and self._block_cluster_instance_actions(): raise exception.ClusterInstanceOperationNotSupported() def root_create(self, req, body, tenant_id, instance_id, is_cluster): if is_cluster: return self.cluster_root_create(req, body, tenant_id, instance_id) else: self.check_cluster_instance_actions(instance_id) return self.instance_root_create(req, body, instance_id) def instance_root_create(self, req, body, instance_id, cluster_instances=None): LOG.info("Enabling root for instance '%s'.", instance_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] password = ClusterRootController._get_password_from_body(body) root = models.ClusterRoot.create(context, instance_id, password, cluster_instances) return wsgi.Result(views.RootCreatedView(root).data(), 200) def cluster_root_create(self, req, body, tenant_id, cluster_id): LOG.info("Enabling root for cluster '%s'.", cluster_id) single_instance_id, cluster_instances = self._get_cluster_instance_id( tenant_id, cluster_id) return self.instance_root_create(req, body, single_instance_id, cluster_instances) def _find_cluster_node_ids(self, tenant_id, cluster_id): args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'deleted': False} cluster_instances = DBInstance.find_all(**args).all() return [db_instance.id for db_instance in cluster_instances] def _get_cluster_instance_id(self, tenant_id, cluster_id): instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id) single_instance_id = instance_ids[0] return single_instance_id, instance_ids class RootController(ExtensionController): """Controller for instance functionality.""" def index(self, req, tenant_id, instance_id): """Returns True if root is enabled; False otherwise.""" datastore_manager, is_cluster = self._get_datastore(tenant_id, instance_id) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'root:index', instance_id, is_cluster=is_cluster) root_controller = self.load_root_controller(datastore_manager) return root_controller.root_index(req, tenant_id, instance_id, is_cluster) def create(self, req, tenant_id, instance_id, body=None): """Enable the root user for the db instance.""" datastore_manager, is_cluster = self._get_datastore(tenant_id, instance_id) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'root:create', instance_id, is_cluster=is_cluster) root_controller = self.load_root_controller(datastore_manager) if root_controller is not None: return root_controller.root_create(req, body, tenant_id, instance_id, is_cluster) else: opt = 'root_controller' raise NoSuchOptError(opt, group='datastore_manager') def delete(self, req, tenant_id, instance_id): datastore_manager, is_cluster = self._get_datastore(tenant_id, instance_id) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'root:delete', instance_id, is_cluster=is_cluster) root_controller = self.load_root_controller(datastore_manager) if root_controller is not None: return root_controller.root_delete(req, tenant_id, instance_id, is_cluster) else: opt = 'root_controller' raise NoSuchOptError(opt, group='datastore_manager') def _get_datastore(self, tenant_id, instance_or_cluster_id): """ Returns datastore manager and a boolean showing if instance_or_cluster_id is a cluster id """ args = {'id': instance_or_cluster_id, 'tenant_id': tenant_id} is_cluster = False try: db_info = DBInstance.find_by(**args) except exception.ModelNotFoundError: is_cluster = True db_info = DBCluster.find_by(**args) ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) ds_manager = ds_version.manager return (ds_manager, is_cluster) def load_root_controller(self, manager): try: clazz = CONF.get(manager).get('root_controller') LOG.debug("Loading Root Controller class %s.", clazz) root_controller = import_class(clazz) return root_controller() except NoSuchOptError: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/common/views.py0000644000175000017500000000247700000000000022516 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class UserView(object): def __init__(self, user): self.user = user def data(self): user_dict = { "name": self.user.name, "host": self.user.host, "databases": self.user.databases } return {"user": user_dict} class RootCreatedView(UserView): def data(self): user_dict = { "name": self.user.name, "password": self.user.password } return {"user": user_dict} class RootEnabledView(object): def __init__(self, is_root_enabled): self.is_root_enabled = is_root_enabled def data(self): return {'rootEnabled': self.is_root_enabled} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/extensions/mgmt/0000755000175000017500000000000000000000000020451 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/__init__.py0000644000175000017500000000000000000000000022550 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/extensions/mgmt/clusters/0000755000175000017500000000000000000000000022315 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/clusters/__init__.py0000644000175000017500000000000000000000000024414 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/clusters/models.py0000644000175000017500000000336300000000000024157 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.cluster import models as cluster_models from trove.instance import models as instance_models class MgmtCluster(cluster_models.Cluster): def __init__(self, context, db_info, datastore=None, datastore_version=None): super(MgmtCluster, self).__init__(context, db_info, datastore, datastore_version) @classmethod def load(cls, context, id): db_cluster = cluster_models.DBCluster.find_by(id=id) return cls(context, db_cluster) @classmethod def load_all(cls, context, deleted=None): args = {} if deleted is not None: args['deleted'] = deleted db_infos = cluster_models.DBCluster.find_all(**args) clusters = [cls(context, db_info) for db_info in db_infos] return clusters @property def instances(self): db_instances = instance_models.DBInstance.find_all( cluster_id=self.db_info.id, deleted=False) instances = [instance_models.load_any_instance( self.context, db_inst.id) for db_inst in db_instances] return instances ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/clusters/service.py0000644000175000017500000000643600000000000024340 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.cluster.service import ClusterController import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.clusters import models from trove.extensions.mgmt.clusters import views LOG = logging.getLogger(__name__) class MgmtClusterController(ClusterController): """Controller for cluster functionality.""" schemas = apischema.mgmt_cluster @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] return action_schema.get(action_type, {}) @admin_context def index(self, req, tenant_id): """Return a list of clusters.""" LOG.debug("Showing a list of clusters for tenant '%s'.", tenant_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] deleted = None deleted_q = req.GET.get('deleted', '').lower() if deleted_q in ['true']: deleted = True elif deleted_q in ['false']: deleted = False clusters = models.MgmtCluster.load_all(context, deleted=deleted) view_cls = views.MgmtClustersView return wsgi.Result(view_cls(clusters, req=req).data(), 200) @admin_context def show(self, req, tenant_id, id): """Return a single cluster.""" LOG.info("Showing cluster for tenant '%(tenant_id)s'.\n" "req : '%(req)s'\n" "id : '%(id)s'", { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.MgmtCluster.load(context, id) return wsgi.Result( views.load_mgmt_view(cluster, req=req).data(), 200) @admin_context def action(self, req, body, tenant_id, id): LOG.debug("Committing an action against cluster %(cluster)s for " "tenant '%(tenant)s'.", {'cluster': id, 'tenant': tenant_id}) LOG.info("req : '%s'\n\n", req) if not body: raise exception.BadRequest(_("Invalid request body.")) context = req.environ[wsgi.CONTEXT_KEY] cluster = models.MgmtCluster.load(context=context, id=id) if 'reset-task' in body: return self._action_reset_task(context, cluster, body) else: msg = _("Invalid cluster action requested.") raise exception.BadRequest(msg) def _action_reset_task(self, context, cluster, body): cluster.reset_task() return wsgi.Result(None, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/clusters/views.py0000644000175000017500000000375100000000000024032 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.cluster.views import ClusterView from trove.common.strategies.cluster import strategy class MgmtClusterView(ClusterView): def __init__(self, cluster, req=None, load_servers=True): super(MgmtClusterView, self).__init__(cluster, req, load_servers) def data(self): result = super(MgmtClusterView, self).data() result['cluster']['tenant_id'] = self.cluster.tenant_id result['cluster']['deleted'] = bool(self.cluster.deleted) if self.cluster.deleted_at: result['cluster']['deleted_at'] = self.cluster.deleted_at return result def build_instances(self): raise NotImplementedError() class MgmtClustersView(object): """Shows a list of MgmtCluster objects.""" def __init__(self, clusters, req=None): self.clusters = clusters self.req = req def data(self): data = [] for cluster in self.clusters: data.append(self.data_for_cluster(cluster)) return {'clusters': data} def data_for_cluster(self, cluster): view = load_mgmt_view(cluster, req=self.req, load_servers=False) return view.data()['cluster'] def load_mgmt_view(cluster, req, load_servers=True): manager = cluster.datastore_version.manager return strategy.load_api_strategy(manager).mgmt_cluster_view_class( cluster, req, load_servers) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/extensions/mgmt/configuration/0000755000175000017500000000000000000000000023320 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/configuration/__init__.py0000644000175000017500000000000000000000000025417 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/configuration/service.py0000644000175000017500000001320200000000000025330 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.configuration import models as config_models from trove.datastore import models as ds_models from trove.extensions.mgmt.configuration import views LOG = logging.getLogger(__name__) class ConfigurationsParameterController(wsgi.Controller): """Controller for configuration parameters functionality.""" schemas = apischema.mgmt_configuration @admin_context def index(self, req, tenant_id, version_id): """List all configuration parameters.""" ds_version = ds_models.DatastoreVersion.load_by_uuid(version_id) config_params = config_models.DatastoreConfigurationParameters rules = config_params.load_parameters( ds_version.id, show_deleted=True) return wsgi.Result(views.MgmtConfigurationParametersView(rules).data(), 200) @admin_context def show(self, req, tenant_id, version_id, id): """Show a configuration parameter.""" ds_models.DatastoreVersion.load_by_uuid(version_id) config_params = config_models.DatastoreConfigurationParameters rule = config_params.load_parameter_by_name( version_id, id, show_deleted=True) return wsgi.Result(views.MgmtConfigurationParameterView(rule).data(), 200) def _validate_data_type(self, parameter): min_size = None max_size = None data_type = parameter['data_type'] if data_type == "integer": if 'max_size' not in parameter: raise exception.BadRequest(_("max_size is required for " "integer data type.")) if 'min_size' not in parameter: raise exception.BadRequest(_("min_size is required for " "integer data type.")) max_size = int(parameter['max_size']) min_size = int(parameter['min_size']) if max_size < min_size: raise exception.BadRequest( _("max_size must be greater than or equal to min_size.")) return data_type, min_size, max_size @admin_context def create(self, req, body, tenant_id, version_id): """Create configuration parameter for datastore version.""" LOG.info("Creating configuration parameter for datastore") LOG.debug("req : '%s'\n\n", req) LOG.debug("body : '%s'\n\n", body) if not body: raise exception.BadRequest(_("Invalid request body.")) parameter = body['configuration-parameter'] name = parameter['name'] restart_required = bool(parameter['restart_required']) data_type, min_size, max_size = self._validate_data_type(parameter) datastore_version = ds_models.DatastoreVersion.load_by_uuid(version_id) rule = config_models.DatastoreConfigurationParameters.create( name=name, datastore_version_id=datastore_version.id, restart_required=restart_required, data_type=data_type, max_size=max_size, min_size=min_size ) return wsgi.Result( views.MgmtConfigurationParameterView(rule).data(), 200) @admin_context def update(self, req, body, tenant_id, version_id, id): """Updating configuration parameter for datastore version.""" LOG.info("Updating configuration parameter for datastore") LOG.debug("req : '%s'\n\n", req) LOG.debug("body : '%s'\n\n", body) if not body: raise exception.BadRequest(_("Invalid request body.")) parameter = body['configuration-parameter'] restart_required = bool(parameter['restart_required']) data_type, min_size, max_size = self._validate_data_type(parameter) ds_models.DatastoreVersion.load_by_uuid(version_id) ds_config_params = config_models.DatastoreConfigurationParameters param = ds_config_params.load_parameter_by_name( version_id, id) param.restart_required = restart_required param.data_type = data_type param.max_size = max_size param.min_size = min_size param.save() return wsgi.Result( views.MgmtConfigurationParameterView(param).data(), 200) @admin_context def delete(self, req, tenant_id, version_id, id): """Delete configuration parameter for datastore version.""" LOG.info("Deleting configuration parameter for datastore") LOG.debug("req : '%s'\n\n", req) ds_config_params = config_models.DatastoreConfigurationParameters try: ds_config_params.delete(version_id, id) except exception.NotFound: raise exception.BadRequest(_("Parameter %s does not exist in the " "database.") % id) return wsgi.Result(None, 204) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/configuration/views.py0000644000175000017500000000351400000000000025032 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging LOG = logging.getLogger(__name__) class MgmtConfigurationParameterView(object): def __init__(self, config): self.config = config def data(self): # v1 api is to be a 'true' or 'false' json boolean instead of 1/0 restart_required = True if self.config.restart_required else False ret = { "name": self.config.name, "datastore_version_id": self.config.datastore_version_id, "restart_required": restart_required, "type": self.config.data_type, "deleted": self.config.deleted, "deleted_at": self.config.deleted_at, } if self.config.max_size: ret["max_size"] = int(self.config.max_size) if self.config.min_size: ret["min_size"] = int(self.config.min_size) return ret class MgmtConfigurationParametersView(object): def __init__(self, configs): self.configs = configs def data(self): params = [] LOG.debug(self.configs.__dict__) for p in self.configs: param = MgmtConfigurationParameterView(p) params.append(param.data()) return {"configuration-parameters": params} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7521102 trove-12.1.0.dev92/trove/extensions/mgmt/datastores/0000755000175000017500000000000000000000000022622 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/datastores/__init__.py0000644000175000017500000000000000000000000024721 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/datastores/service.py0000644000175000017500000001433600000000000024643 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from glanceclient import exc as glance_exceptions from oslo_log import log as logging from trove.common import apischema from trove.common.auth import admin_context from trove.common import clients from trove.common import exception from trove.common import utils from trove.common import wsgi from trove.datastore import models from trove.extensions.mgmt.datastores import views LOG = logging.getLogger(__name__) class DatastoreVersionController(wsgi.Controller): """Controller for datastore version registration functionality.""" schemas = apischema.mgmt_datastore_version @admin_context def create(self, req, body, tenant_id): """Adds a new datastore version.""" context = req.environ[wsgi.CONTEXT_KEY] datastore_name = body['version']['datastore_name'] version_name = body['version']['name'] manager = body['version']['datastore_manager'] image_id = body['version']['image'] packages = body['version']['packages'] if type(packages) is list: packages = ','.join(packages) active = body['version']['active'] default = body['version']['default'] LOG.info("Tenant: '%(tenant)s' is adding the datastore " "version: '%(version)s' to datastore: '%(datastore)s'", {'tenant': tenant_id, 'version': version_name, 'datastore': datastore_name}) client = clients.create_glance_client(context) try: client.images.get(image_id) except glance_exceptions.HTTPNotFound: raise exception.ImageNotFound(uuid=image_id) try: datastore = models.Datastore.load(datastore_name) except exception.DatastoreNotFound: # Create the datastore if datastore_name does not exists. LOG.info("Creating datastore %s", datastore_name) datastore = models.DBDatastore() datastore.id = utils.generate_uuid() datastore.name = datastore_name datastore.save() try: models.DatastoreVersion.load(datastore, version_name) raise exception.DatastoreVersionAlreadyExists(name=version_name) except exception.DatastoreVersionNotFound: models.update_datastore_version(datastore.name, version_name, manager, image_id, packages, active) if default: models.update_datastore(datastore.name, version_name) return wsgi.Result(None, 202) @admin_context def index(self, req, tenant_id): """Lists all datastore-versions for given datastore.""" db_ds_versions = models.DatastoreVersions.load_all(only_active=False) datastore_versions = [models.DatastoreVersion.load_by_uuid( ds_version.id) for ds_version in db_ds_versions] return wsgi.Result( views.DatastoreVersionsView(datastore_versions).data(), 200) @admin_context def show(self, req, tenant_id, id): """Lists details of a datastore-version for given datastore.""" datastore_version = models.DatastoreVersion.load_by_uuid(id) return wsgi.Result( views.DatastoreVersionView(datastore_version).data(), 200) @admin_context def edit(self, req, body, tenant_id, id): """Updates the attributes of a datastore version.""" context = req.environ[wsgi.CONTEXT_KEY] datastore_version = models.DatastoreVersion.load_by_uuid(id) LOG.info("Tenant: '%(tenant)s' is updating the datastore " "version: '%(version)s' for datastore: '%(datastore)s'", {'tenant': tenant_id, 'version': datastore_version.name, 'datastore': datastore_version.datastore_name}) manager = body.get('datastore_manager', datastore_version.manager) image_id = body.get('image', datastore_version.image_id) active = body.get('active', datastore_version.active) default = body.get('default', None) packages = body.get('packages', datastore_version.packages) if type(packages) is list: packages = ','.join(packages) client = clients.create_glance_client(context) try: client.images.get(image_id) except glance_exceptions.HTTPNotFound: raise exception.ImageNotFound(uuid=image_id) models.update_datastore_version(datastore_version.datastore_name, datastore_version.name, manager, image_id, packages, active) if default: models.update_datastore(datastore_version.datastore_name, datastore_version.name) elif (default is False and datastore_version.default is True): models.update_datastore(datastore_version.datastore_name, None) return wsgi.Result(None, 202) @admin_context def delete(self, req, tenant_id, id): """Remove an existing datastore version.""" datastore_version = models.DatastoreVersion.load_by_uuid(id) datastore = models.Datastore.load(datastore_version.datastore_id) LOG.info("Tenant: '%(tenant)s' is removing the datastore " "version: '%(version)s' for datastore: '%(datastore)s'", {'tenant': tenant_id, 'version': datastore_version.name, 'datastore': datastore.name}) if datastore.default_version_id == datastore_version.id: models.update_datastore(datastore.name, None) datastore_version.delete() return wsgi.Result(None, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/datastores/views.py0000644000175000017500000000341000000000000024327 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DatastoreVersionView(object): def __init__(self, datastore_version): self.datastore_version = datastore_version def data(self): datastore_version_dict = { "id": self.datastore_version.id, "name": self.datastore_version.name, "datastore_id": self.datastore_version.datastore_id, "datastore_name": self.datastore_version.datastore_name, "datastore_manager": self.datastore_version.manager, "image": self.datastore_version.image_id, "packages": (self.datastore_version.packages.split( ',') if self.datastore_version.packages else ['']), "active": self.datastore_version.active, "default": self.datastore_version.default} return {'version': datastore_version_dict} class DatastoreVersionsView(object): def __init__(self, datastore_versions): self.datastore_versions = datastore_versions def data(self): data = [] for datastore_version in self.datastore_versions: data.append( DatastoreVersionView(datastore_version).data()['version']) return {'versions': data} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/mgmt/instances/0000755000175000017500000000000000000000000022440 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/instances/__init__.py0000644000175000017500000000000000000000000024537 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/extensions/mgmt/instances/models.py0000644000175000017500000002513100000000000024277 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_log import log as logging from trove.common import cfg from trove.common import clients from trove.common import exception from trove.common.i18n import _ from trove.common import timeutils from trove.extensions.mysql import models as mysql_models from trove.instance import models as instance_models from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF def load_mgmt_instances(context, deleted=None, client=None, include_clustered=None): if not client: client = clients.create_nova_client( context, CONF.service_credentials.region_name ) mgmt_servers = client.servers.list(search_opts={'all_tenants': 1}) LOG.info("Found %d servers in Nova", len(mgmt_servers if mgmt_servers else [])) args = {} if deleted is not None: args['deleted'] = deleted if not include_clustered: args['cluster_id'] = None db_infos = instance_models.DBInstance.find_all(**args) instances = MgmtInstances.load_status_from_existing(context, db_infos, mgmt_servers) return instances def load_mgmt_instance(cls, context, id, include_deleted): try: instance = instance_models.load_instance( cls, context, id, needs_server=True, include_deleted=include_deleted) client = clients.create_nova_client( context, CONF.service_credentials.region_name ) try: server = client.rdservers.get(instance.server_id) except AttributeError: server = client.servers.get(instance.server_id) if hasattr(server, 'host'): instance.server.host = server.host elif hasattr(server, 'hostId'): instance.server.host = server.hostId if hasattr(server, 'deleted'): instance.server.deleted = server.deleted if hasattr(server, 'deleted_at'): instance.server.deleted_at = server.deleted_at if hasattr(server, 'local_id'): instance.server.local_id = server.local_id assert instance.server is not None except Exception as e: LOG.error(e) instance = instance_models.load_instance( cls, context, id, needs_server=False, include_deleted=include_deleted) return instance class SimpleMgmtInstance(instance_models.BaseInstance): def __init__(self, context, db_info, server, datastore_status): super(SimpleMgmtInstance, self).__init__(context, db_info, server, datastore_status) @property def status(self): if self.deleted: return instance_models.InstanceStatus.SHUTDOWN return super(SimpleMgmtInstance, self).status @property def deleted(self): return self.db_info.deleted @property def deleted_at(self): return self.db_info.deleted_at @classmethod def load(cls, context, id, include_deleted=False): return load_mgmt_instance(cls, context, id, include_deleted) @property def task_description(self): return self.db_info.task_description class DetailedMgmtInstance(SimpleMgmtInstance): def __init__(self, *args, **kwargs): super(DetailedMgmtInstance, self).__init__(*args, **kwargs) self.volume = None self.volume_used = None self.volume_total = None self.root_history = None @classmethod def load(cls, context, id, include_deleted=False): instance = load_mgmt_instance(cls, context, id, include_deleted) client = clients.create_cinder_client(context) try: instance.volume = client.volumes.get(instance.volume_id) except Exception: instance.volume = None # Populate the volume_used attribute from the guest agent. instance_models.load_guest_info(instance, context, id) instance.root_history = mysql_models.RootHistory.load(context=context, instance_id=id) return instance class MgmtInstance(instance_models.Instance): def get_diagnostics(self): return self.get_guest().get_diagnostics() def stop_db(self): return self.get_guest().stop_db() def get_hwinfo(self): return self.get_guest().get_hwinfo() def rpc_ping(self): return self.get_guest().rpc_ping() class MgmtInstances(instance_models.Instances): @staticmethod def load_status_from_existing(context, db_infos, servers): def load_instance(context, db, status, server=None): return SimpleMgmtInstance(context, db, server, status) if context is None: raise TypeError(_("Argument context not defined.")) find_server = instance_models.create_server_list_matcher(servers) instances = instance_models.Instances._load_servers_status( load_instance, context, db_infos, find_server) _load_servers(instances, find_server) return instances def _load_servers(instances, find_server): for instance in instances: db = instance.db_info instance.server = None try: server = find_server(db.id, db.compute_instance_id) instance.server = server except Exception as ex: LOG.exception(ex) return instances def publish_exist_events(transformer, admin_context): notifier = rpc.get_notifier("taskmanager") notifications = transformer() # clear out admin_context.auth_token so it does not get logged admin_context.auth_token = None for notification in notifications: notifier.info(admin_context, "trove.instance.exists", notification) class NotificationTransformer(object): def __init__(self, **kwargs): pass @staticmethod def _get_audit_period(): now = timeutils.utcnow() start_time = now - datetime.timedelta( seconds=CONF.exists_notification_interval) audit_start = timeutils.isotime(start_time) audit_end = timeutils.isotime(now) return audit_start, audit_end def _get_service_id(self, datastore_manager, id_map): if datastore_manager in id_map: datastore_manager_id = id_map[datastore_manager] else: datastore_manager_id = cfg.UNKNOWN_SERVICE_ID LOG.error("Datastore ID for Manager (%s) is not configured", datastore_manager) return datastore_manager_id def transform_instance(self, instance, audit_start, audit_end): payload = { 'audit_period_beginning': audit_start, 'audit_period_ending': audit_end, 'created_at': instance.created, 'display_name': instance.name, 'instance_id': instance.id, 'instance_name': instance.name, 'instance_type_id': instance.flavor_id, 'launched_at': instance.created, 'nova_instance_id': instance.server_id, 'region': CONF.region, 'state_description': instance.status.lower(), 'state': instance.status.lower(), 'tenant_id': instance.tenant_id } payload['service_id'] = self._get_service_id( instance.datastore_version.manager, CONF.notification_service_id) return payload def __call__(self): audit_start, audit_end = NotificationTransformer._get_audit_period() messages = [] db_infos = instance_models.DBInstance.find_all(deleted=False) for db_info in db_infos: try: service_status = instance_models.InstanceServiceStatus.find_by( instance_id=db_info.id) except exception.ModelNotFoundError: # There is a small window of opportunity during when the db # resource for an instance exists, but no InstanceServiceStatus # for it has yet been created. We skip sending the notification # message for all such instances. These instance are too new # and will get picked up the next round of notifications. LOG.debug("InstanceServiceStatus not found for %s. " "Will wait to send notification.", db_info.id) continue instance = SimpleMgmtInstance(None, db_info, None, service_status) message = self.transform_instance(instance, audit_start, audit_end) messages.append(message) return messages class NovaNotificationTransformer(NotificationTransformer): def __init__(self, **kwargs): super(NovaNotificationTransformer, self).__init__(**kwargs) self.context = kwargs['context'] self.nova_client = clients.create_admin_nova_client(self.context) self._flavor_cache = {} def _lookup_flavor(self, flavor_id): if flavor_id in self._flavor_cache: LOG.debug("Flavor cache hit for %s", flavor_id) return self._flavor_cache[flavor_id] # fetch flavor resource from nova LOG.info("Flavor cache miss for %s", flavor_id) flavor = self.nova_client.flavors.get(flavor_id) self._flavor_cache[flavor_id] = flavor.name if flavor else 'unknown' return self._flavor_cache[flavor_id] def __call__(self): audit_start, audit_end = NotificationTransformer._get_audit_period() instances = load_mgmt_instances(self.context, deleted=False, client=self.nova_client) messages = [] for instance in filter( lambda inst: inst.status != 'SHUTDOWN' and inst.server, instances): message = { 'instance_type': self._lookup_flavor(instance.flavor_id), 'user_id': instance.server.user_id } message.update(self.transform_instance(instance, audit_start, audit_end)) messages.append(message) return messages ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/instances/service.py0000644000175000017500000002164200000000000024457 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.backup.models import Backup import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification from trove.common import wsgi from trove.extensions.mgmt.instances import models from trove.extensions.mgmt.instances import views from trove.extensions.mgmt.instances.views import DiagnosticsView from trove.extensions.mgmt.instances.views import HwInfoView from trove.extensions.mysql import models as mysql_models from trove.instance import models as instance_models from trove.instance.service import InstanceController LOG = logging.getLogger(__name__) class MgmtInstanceController(InstanceController): """Controller for instance functionality.""" schemas = apischema.mgmt_instance @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] return action_schema.get(action_type, {}) @admin_context def index(self, req, tenant_id, detailed=False): """Return all instances.""" LOG.info("Indexing a database instance for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n", { "tenant_id": tenant_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] deleted = None deleted_q = req.GET.get('deleted', '').lower() if deleted_q in ['true']: deleted = True elif deleted_q in ['false']: deleted = False clustered_q = req.GET.get('include_clustered', '').lower() include_clustered = clustered_q == 'true' try: instances = models.load_mgmt_instances( context, deleted=deleted, include_clustered=include_clustered) except nova_exceptions.ClientException as e: LOG.exception(e) return wsgi.Result(str(e), 403) view_cls = views.MgmtInstancesView return wsgi.Result(view_cls(instances, req=req).data(), 200) @admin_context def show(self, req, tenant_id, id): """Return a single instance.""" LOG.info("Showing a database instance %(id)s for tenant " "'%(tenant_id)s'\n" "req : '%(req)s'\n\n", { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] deleted_q = req.GET.get('deleted', '').lower() include_deleted = deleted_q == 'true' server = models.DetailedMgmtInstance.load(context, id, include_deleted) root_history = mysql_models.RootHistory.load(context=context, instance_id=id) return wsgi.Result( views.MgmtInstanceDetailView( server, req=req, root_history=root_history).data(), 200) @admin_context def action(self, req, body, tenant_id, id): LOG.info("Committing an ACTION against a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n", { "tenant_id": tenant_id, "req": req, "id": id}) if not body: raise exception.BadRequest(_("Invalid request body.")) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) _actions = { 'stop': self._action_stop, 'reboot': self._action_reboot, 'migrate': self._action_migrate, 'reset-task-status': self._action_reset_task_status } selected_action = None for key in body: if key in _actions: if selected_action is not None: msg = _("Only one action can be specified per request.") raise exception.BadRequest(msg) selected_action = _actions[key] else: msg = _("Invalid instance action: %s") % key raise exception.BadRequest(msg) if selected_action: return selected_action(context, instance, req, body) else: raise exception.BadRequest(_("Invalid request body.")) def _action_stop(self, context, instance, req, body): LOG.debug("Stopping MySQL on instance %s.", instance.id) instance.stop_db() return wsgi.Result(None, 202) def _action_reboot(self, context, instance, req, body): LOG.debug("Rebooting instance %s.", instance.id) context.notification = notification.DBaaSInstanceReboot( context, request=req ) with StartNotification(context, instance_id=instance.id): instance.reboot() return wsgi.Result(None, 202) def _action_migrate(self, context, instance, req, body): LOG.debug("Migrating instance %s.", instance.id) LOG.debug("body['migrate']= %s", body['migrate']) host = body['migrate'].get('host', None) context.notification = notification.DBaaSInstanceMigrate(context, request=req) with StartNotification(context, host=host): instance.migrate(host) return wsgi.Result(None, 202) def _action_reset_task_status(self, context, instance, req, body): LOG.debug("Setting Task-Status to NONE on instance %s.", instance.id) instance.reset_task_status() LOG.debug("Failing backups for instance %s.", instance.id) Backup.fail_for_instance(instance.id) return wsgi.Result(None, 202) @admin_context def root(self, req, tenant_id, id): """Return the date and time root was enabled on an instance, if ever. """ LOG.info("Showing root history for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n", { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] try: instance_models.Instance.load(context=context, id=id) except exception.TroveError as e: LOG.exception(e) return wsgi.Result(str(e), 404) rhv = views.RootHistoryView(id) reh = mysql_models.RootHistory.load(context=context, instance_id=id) if reh: rhv = views.RootHistoryView(reh.id, enabled=reh.created, user_id=reh.user) return wsgi.Result(rhv.data(), 200) @admin_context def hwinfo(self, req, tenant_id, id): """Return a single instance hardware info.""" LOG.info("Showing hardware info for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n", { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) hwinfo = instance.get_hwinfo() return wsgi.Result(HwInfoView(id, hwinfo).data(), 200) @admin_context def diagnostics(self, req, tenant_id, id): """Return instance diagnostics for a single instance.""" LOG.info("Showing diagnostic info for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n", { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) diagnostics = instance.get_diagnostics() return wsgi.Result(DiagnosticsView(id, diagnostics).data(), 200) @admin_context def rpc_ping(self, req, tenant_id, id): """Checks if instance is reachable via rpc.""" LOG.info("Sending RPC PING for a database " "instance %(id)s for tenant '%(tenant_id)s'\n" "req : '%(req)s'\n\n", { "tenant_id": tenant_id, "req": req, "id": id}) context = req.environ[wsgi.CONTEXT_KEY] instance = models.MgmtInstance.load(context=context, id=id) instance.rpc_ping() return wsgi.Result(None, 204) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/instances/views.py0000644000175000017500000001361500000000000024155 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.instance.views import InstanceDetailView class MgmtInstanceView(InstanceDetailView): def __init__(self, instance, req=None): super(MgmtInstanceView, self).__init__(instance, req) def data(self): result = super(MgmtInstanceView, self).data() if self.instance.server is None: result['instance']['server'] = None else: server = self.instance.server result['instance']['server'] = { 'id': server.id, 'name': server.name, 'status': server.status, 'tenant_id': server.tenant_id, } if hasattr(server, 'host'): result['instance']['server']['host'] = server.host else: result['instance']['server']['host'] = server.hostId if hasattr(server, 'deleted'): result['instance']['server']['deleted'] = server.deleted if hasattr(server, 'deleted_at'): result['instance']['server']['deleted_at'] = server.deleted_at if hasattr(server, 'local_id'): result['instance']['server']['local_id'] = server.local_id try: service_status = self.instance.datastore_status.status.api_status except AttributeError: service_status = None result['instance']['service_status'] = service_status result['instance']['tenant_id'] = self.instance.tenant_id result['instance']['deleted'] = bool(self.instance.deleted) result['instance']['deleted_at'] = self.instance.deleted_at result['instance']['task_description'] = self.instance.task_description return result class MgmtInstanceDetailView(MgmtInstanceView): """Works with a full-blown instance.""" def __init__(self, instance, req, root_history=None): super(MgmtInstanceDetailView, self).__init__(instance, req=req) self.root_history = root_history def data(self): result = super(MgmtInstanceDetailView, self).data() if self.instance.server is not None: server = self.instance.server result['instance']['server'].update( {'addresses': server.addresses}) elif self.instance.server_id: result['instance']['server'] = {"id": self.instance.server_id} if self.root_history: result['instance']['root_enabled'] = self.root_history.created result['instance']['root_enabled_by'] = self.root_history.user if self.instance.volume: volume = self.instance.volume result['instance']['volume'] = { "attachments": volume.attachments, "availability_zone": volume.availability_zone, "created_at": volume.created_at, "id": volume.id, "size": volume.size, "status": volume.status, "used": self.instance.volume_used or None, "total": self.instance.volume_total or None, } elif self.instance.volume_id: result['instance']['volume'] = {"id": self.instance.volume_id} else: result['instance']['volume'] = None description = self.instance.datastore_status.status.description result['instance']['guest_status'] = {"state_description": description} return result class MgmtInstancesView(object): """Shows a list of MgmtInstance objects.""" def __init__(self, instances, req=None): self.instances = instances self.req = req def data(self): data = [] # These are model instances for instance in self.instances: data.append(self.data_for_instance(instance)) return {'instances': data} def data_for_instance(self, instance): view = MgmtInstanceView(instance, req=self.req) return view.data()['instance'] class RootHistoryView(object): def __init__(self, instance_id, enabled='Never', user_id='Nobody'): self.instance_id = instance_id self.enabled = enabled self.user = user_id def data(self): return { 'root_history': { 'id': self.instance_id, 'enabled': self.enabled, 'user': self.user, } } class HwInfoView(object): def __init__(self, instance_id, hwinfo): self.instance_id = instance_id self.hwinfo = hwinfo def data(self): return { 'hwinfo': { 'mem_total': self.hwinfo['mem_total'], 'num_cpus': self.hwinfo['num_cpus'], } } class DiagnosticsView(object): def __init__(self, instance_id, diagnostics): self.instance_id = instance_id self.diagnostics = diagnostics def data(self): return { 'diagnostics': { 'version': self.diagnostics['version'], 'threads': self.diagnostics['threads'], 'fdSize': self.diagnostics['fd_size'], 'vmSize': self.diagnostics['vm_size'], 'vmPeak': self.diagnostics['vm_peak'], 'vmRss': self.diagnostics['vm_rss'], 'vmHwm': self.diagnostics['vm_hwm'], } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/mgmt/quota/0000755000175000017500000000000000000000000021602 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/quota/__init__.py0000644000175000017500000000000000000000000023701 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/quota/service.py0000644000175000017500000000553700000000000023626 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.auth import admin_context from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.mgmt.quota import views from trove.quota.models import Quota from trove.quota.quota import QUOTAS as quota_engine LOG = logging.getLogger(__name__) class QuotaController(wsgi.Controller): """Controller for quota functionality.""" @admin_context def show(self, req, tenant_id, id): """Return all quotas for this tenant.""" LOG.info("Indexing quota info for tenant '%(id)s'\n" "req : '%(req)s'\n\n", {"id": id, "req": req}) usages = quota_engine.get_all_quota_usages_by_tenant(id) limits = quota_engine.get_all_quotas_by_tenant(id) for key in usages.keys(): setattr(usages[key], "limit", limits[key].hard_limit) return wsgi.Result(views.QuotaUsageView(usages).data(), 200) @admin_context def update(self, req, body, tenant_id, id): LOG.info("Updating quota limits for tenant '%(id)s'\n" "req : '%(req)s'\n\n", {"id": id, "req": req}) if not body: raise exception.BadRequest(_("Invalid request body.")) quotas = {} quota = None registered_resources = quota_engine.resources for resource, limit in body['quotas'].items(): if limit is None: continue elif limit < -1: raise exception.QuotaLimitTooSmall(limit=limit, resource=resource) if resource == "xmlns": continue if resource not in registered_resources: raise exception.QuotaResourceUnknown(unknown=resource) try: quota = Quota.find_by(tenant_id=id, resource=resource) quota.hard_limit = limit quota.save() except exception.ModelNotFoundError: quota = Quota.create(tenant_id=id, resource=resource, hard_limit=limit) quotas[resource] = quota return wsgi.Result(views.QuotaView(quotas).data(), 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/quota/views.py0000644000175000017500000000243200000000000023312 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class QuotaView(object): def __init__(self, quotas): self.quotas = quotas def data(self): rtn = {} for resource_name, quota in self.quotas.items(): rtn[resource_name] = quota.hard_limit return {'quotas': rtn} class QuotaUsageView(object): def __init__(self, usages): self.usages = usages def data(self): return {'quotas': [{'resource': resource, 'in_use': usage['in_use'], 'reserved': usage['reserved'], 'limit': usage['limit'] } for resource, usage in self.usages.items()]} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/mgmt/upgrade/0000755000175000017500000000000000000000000022100 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/upgrade/__init__.py0000644000175000017500000000000000000000000024177 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/upgrade/models.py0000644000175000017500000000316600000000000023743 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.clients import guest_client class UpgradeMessageSender(object): """ This class handles the business logic for sending an rpc message to the guest """ @staticmethod def create(context, instance_id, instance_version, location, metadata=None): instance_id = UpgradeMessageSender._validate(instance_id, 36) if instance_version: instance_version = UpgradeMessageSender._validate( instance_version, 255) if location: location = UpgradeMessageSender._validate(location, 255) def _create_resources(): guest_client(context, instance_id).upgrade( instance_version, location, metadata) return _create_resources @staticmethod def _validate(s, max_length): if s is None: raise ValueError() s = s.strip() length = len(s) if length < 1 or length > max_length: raise ValueError() return s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mgmt/upgrade/service.py0000644000175000017500000000322200000000000024111 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import trove.common.apischema as apischema from trove.common.auth import admin_context from trove.common import wsgi from trove.extensions.mgmt.upgrade.models import UpgradeMessageSender LOG = logging.getLogger(__name__) class UpgradeController(wsgi.Controller): """ Controller for guest agent upgrade """ schemas = apischema.upgrade @admin_context def create(self, req, body, tenant_id, instance_id): LOG.info("Sending upgrade notifications\nreq : '%(req)s'\n" "Admin tenant_id: %(tenant_id)s", {"tenant_id": tenant_id, "req": req}) context = req.environ.get(wsgi.CONTEXT_KEY) upgrade = body['upgrade'] instance_version = upgrade.get('instance_version') location = upgrade.get('location') metadata = upgrade.get('metadata') send = UpgradeMessageSender.create( context, instance_id, instance_version, location, metadata) send() return wsgi.Result(None, 202) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/mongodb/0000755000175000017500000000000000000000000021132 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mongodb/__init__.py0000644000175000017500000000000000000000000023231 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mongodb/service.py0000644000175000017500000000345100000000000023147 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.common import exception from trove.extensions.common.service import ClusterRootController from trove.instance.models import DBInstance CONF = cfg.CONF MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mongodb' class MongoDBRootController(ClusterRootController): def delete(self, req, tenant_id, instance_id): raise exception.DatastoreOperationNotSupported( operation='disable_root', datastore=MANAGER) def _block_cluster_instance_actions(self): return True def _find_query_router_ids(self, tenant_id, cluster_id): args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'deleted': False, 'type': 'query_router'} query_router_instances = DBInstance.find_all(**args).all() return [db_instance.id for db_instance in query_router_instances] def _get_cluster_instance_id(self, tenant_id, cluster_id): instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id) single_instance_id = self._find_query_router_ids(tenant_id, cluster_id)[0] return single_instance_id, instance_ids ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/mysql/0000755000175000017500000000000000000000000020652 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mysql/__init__.py0000644000175000017500000000000000000000000022751 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mysql/common.py0000644000175000017500000000630100000000000022514 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves.urllib.parse import unquote from trove.common.db.mysql import models as guest_models from trove.common import exception def populate_validated_databases(dbs): """ Create a serializable request with user provided data for creating new databases. """ try: databases = [] unique_identities = set() for database in dbs: mydb = guest_models.MySQLSchema(name=database.get('name', '')) mydb.check_reserved() if mydb.name in unique_identities: raise exception.DatabaseInitialDatabaseDuplicateError() unique_identities.add(mydb.name) mydb.character_set = database.get('character_set', '') mydb.collate = database.get('collate', '') databases.append(mydb.serialize()) return databases except ValueError as ve: # str(ve) contains user input and may include '%' which can cause a # format str vulnerability. Escape the '%' to avoid this. This is # okay to do since we're not using dict args here in any case. safe_string = str(ve).replace('%', '%%') raise exception.BadRequest(safe_string) def populate_users(users, initial_databases=None): """Create a serializable request containing users.""" users_data = [] unique_identities = set() for user in users: u = guest_models.MySQLUser(name=user.get('name', ''), host=user.get('host', '%')) u.check_reserved() user_identity = (u.name, u.host) if user_identity in unique_identities: raise exception.DatabaseInitialUserDuplicateError() unique_identities.add(user_identity) u.password = user.get('password', '') user_dbs = user.get('databases', '') # user_db_names guaranteed unique and non-empty by apischema user_db_names = [user_db.get('name', '') for user_db in user_dbs] for user_db_name in user_db_names: if (initial_databases is not None and user_db_name not in initial_databases): raise exception.DatabaseForUserNotInDatabaseListError( user=u.name, database=user_db_name) u.databases = user_db_name users_data.append(u.serialize()) return users_data def unquote_user_host(user_hostname): unquoted = unquote(user_hostname) if '@' not in unquoted: return unquoted, '%' if unquoted.endswith('@'): return unquoted, '%' splitup = unquoted.split('@') host = splitup[-1] user = '@'.join(splitup[:-1]) return user, host ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mysql/models.py0000644000175000017500000002402100000000000022506 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Model classes that extend the instances functionality for MySQL instances. """ from trove.common import cfg from trove.common.clients import create_guest_client from trove.common.db.mysql import models as guest_models from trove.common import exception from trove.common.notification import StartNotification from trove.common import utils from trove.extensions.common.models import load_and_verify from trove.extensions.common.models import RootHistory CONF = cfg.CONF def persisted_models(): return {'root_enabled_history': RootHistory} class User(object): _data_fields = ['name', 'host', 'password', 'databases'] def __init__(self, name, host, password, databases): self.name = name self.host = host self.password = password self.databases = databases @classmethod def load(cls, context, instance_id, username, hostname, root_user=False): load_and_verify(context, instance_id) validate = guest_models.MySQLUser(name=username, host=hostname) if root_user: validate.make_root() validate.check_reserved() client = create_guest_client(context, instance_id) found_user = client.get_user(username=username, hostname=hostname) if not found_user: return None database_names = [{'name': db['_name']} for db in found_user['_databases']] return cls(found_user['_name'], found_user['_host'], found_user['_password'], database_names) @classmethod def create(cls, context, instance_id, users): # Load InstanceServiceStatus to verify if it's running load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) for user in users: user_name = user['_name'] host_name = user['_host'] userhost = "%s@%s" % (user_name, host_name) existing_users, _nadda = Users.load_with_client( client, limit=1, marker=userhost, include_marker=True) if (len(existing_users) > 0 and str(existing_users[0].name) == str(user_name) and str(existing_users[0].host) == str(host_name)): raise exception.UserAlreadyExists(name=user_name, host=host_name) return client.create_user(users) @classmethod def delete(cls, context, instance_id, user): load_and_verify(context, instance_id) with StartNotification(context, instance_id=instance_id, username=user): create_guest_client(context, instance_id).delete_user(user) @classmethod def access(cls, context, instance_id, username, hostname): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) databases = client.list_access(username, hostname) dbs = [] for db in databases: dbs.append(Schema(name=db['_name'], collate=db['_collate'], character_set=db['_character_set'])) return UserAccess(dbs) @classmethod def grant(cls, context, instance_id, username, hostname, databases): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) client.grant_access(username, hostname, databases) @classmethod def revoke(cls, context, instance_id, username, hostname, database): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) client.revoke_access(username, hostname, database) @classmethod def change_password(cls, context, instance_id, users): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) change_users = [] for user in users: change_user = {'name': user.name, 'host': user.host, 'password': user.password, } change_users.append(change_user) client.change_passwords(change_users) @classmethod def update_attributes(cls, context, instance_id, username, hostname, user_attrs): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) user_changed = user_attrs.get('name') host_changed = user_attrs.get('host') user = user_changed or username host = host_changed or hostname validate = guest_models.MySQLUser(name=user, host=host) validate.check_reserved() userhost = "%s@%s" % (user, host) if user_changed or host_changed: existing_users, _nadda = Users.load_with_client( client, limit=1, marker=userhost, include_marker=True) if (len(existing_users) > 0 and existing_users[0].name == user and existing_users[0].host == host): raise exception.UserAlreadyExists(name=user, host=host) client.update_attributes(username, hostname, user_attrs) class UserAccess(object): _data_fields = ['databases'] def __init__(self, databases): self.databases = databases def load_via_context(cls, context, instance_id): """Creates guest and fetches pagination arguments from the context.""" load_and_verify(context, instance_id) limit = utils.pagination_limit(context.limit, cls.DEFAULT_LIMIT) client = create_guest_client(context, instance_id) # The REST API standard dictates that we *NEVER* include the marker. return cls.load_with_client(client=client, limit=limit, marker=context.marker, include_marker=False) class Users(object): DEFAULT_LIMIT = CONF.users_page_size @classmethod def load(cls, context, instance_id): return load_via_context(cls, context, instance_id) @classmethod def load_with_client(cls, client, limit, marker, include_marker): user_list, next_marker = client.list_users( limit=limit, marker=marker, include_marker=include_marker) model_users = [] for user in user_list: mysql_user = guest_models.MySQLUser.deserialize(user, verify=False) if mysql_user.name in cfg.get_ignored_users(): continue # TODO(hub-cap): databases are not being returned in the # reference agent dbs = [] for db in mysql_user.databases: dbs.append({'name': db['_name']}) model_users.append(User(mysql_user.name, mysql_user.host, mysql_user.password, dbs)) return model_users, next_marker class Schema(object): _data_fields = ['name', 'collate', 'character_set'] def __init__(self, name, collate, character_set): self.name = name self.collate = collate self.character_set = character_set @classmethod def create(cls, context, instance_id, schemas): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) for schema in schemas: schema_name = schema['_name'] existing_schema, _nadda = Schemas.load_with_client( client, limit=1, marker=schema_name, include_marker=True) if (len(existing_schema) > 0 and str(existing_schema[0].name) == str(schema_name)): raise exception.DatabaseAlreadyExists(name=schema_name) return client.create_database(schemas) @classmethod def delete(cls, context, instance_id, schema): load_and_verify(context, instance_id) create_guest_client(context, instance_id).delete_database(schema) class Schemas(object): DEFAULT_LIMIT = CONF.databases_page_size @classmethod def load(cls, context, instance_id): return load_via_context(cls, context, instance_id) @classmethod def load_with_client(cls, client, limit, marker, include_marker): schemas, next_marker = client.list_databases( limit=limit, marker=marker, include_marker=include_marker) model_schemas = [] for schema in schemas: mysql_schema = guest_models.MySQLSchema.deserialize(schema, verify=False) if mysql_schema.name in cfg.get_ignored_dbs(): continue model_schemas.append(Schema(mysql_schema.name, mysql_schema.collate, mysql_schema.character_set)) return model_schemas, next_marker @classmethod def find(cls, context, instance_id, schema_id): load_and_verify(context, instance_id) client = create_guest_client(context, instance_id) model_schemas, _ = cls.load_with_client(client, 1, schema_id, True) if model_schemas and model_schemas[0].name == schema_id: return model_schemas[0] return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mysql/service.py0000644000175000017500000004166600000000000022701 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import strutils import webob.exc import trove.common.apischema as apischema from trove.common import cfg from trove.common.db.mysql import models as guest_models from trove.common import exception from trove.common.i18n import _ from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common.utils import correct_id_with_req from trove.common import wsgi from trove.extensions.common.service import ExtensionController from trove.extensions.mysql.common import populate_users from trove.extensions.mysql.common import populate_validated_databases from trove.extensions.mysql.common import unquote_user_host from trove.extensions.mysql import models from trove.extensions.mysql import views LOG = logging.getLogger(__name__) import_class = importutils.import_class CONF = cfg.CONF class UserController(ExtensionController): """Controller for instance functionality.""" schemas = apischema.user @classmethod def get_schema(cls, action, body): action_schema = super(UserController, cls).get_schema(action, body) if 'update_all' == action: update_type = list(body.keys())[0] action_schema = action_schema.get(update_type, {}) return action_schema def index(self, req, tenant_id, instance_id): """Return all users.""" LOG.info("Listing users for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:index', instance_id) users, next_marker = models.Users.load(context, instance_id) view = views.UsersView(users) paged = pagination.SimplePaginatedDataView(req.url, 'users', view, next_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id, instance_id): """Creates a set of users.""" LOG.info("Creating users for instance '%(id)s'\n" "req : '%(req)s'\n\n" "body: '%(body)s'\n'n", {"id": instance_id, "req": strutils.mask_password(req), "body": strutils.mask_password(body)}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:create', instance_id) context.notification = notification.DBaaSUserCreate(context, request=req) users = body['users'] with StartNotification(context, instance_id=instance_id, username=",".join([user['name'] for user in users])): try: model_users = populate_users(users) models.User.create(context, instance_id, model_users) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("User create error: %(e)s") % {'e': e}) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, id): LOG.info("Delete instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:delete', instance_id) id = correct_id_with_req(id, req) username, host = unquote_user_host(id) user = None context.notification = notification.DBaaSUserDelete(context, request=req) with StartNotification(context, instance_id=instance_id, username=username): try: user = guest_models.MySQLUser(name=username, host=host) found_user = models.User.load(context, instance_id, username, host) if not found_user: user = None except (ValueError, AttributeError) as e: raise exception.BadRequest(_("User delete error: %(e)s") % {'e': e}) if not user: raise exception.UserNotFound(uuid=id) models.User.delete(context, instance_id, user.serialize()) return wsgi.Result(None, 202) def show(self, req, tenant_id, instance_id, id): """Return a single user.""" LOG.info("Showing a user for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:show', instance_id) id = correct_id_with_req(id, req) username, host = unquote_user_host(id) user = None try: user = models.User.load(context, instance_id, username, host) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("User show error: %(e)s") % {'e': e}) if not user: raise exception.UserNotFound(uuid=id) view = views.UserView(user) return wsgi.Result(view.data(), 200) def update(self, req, body, tenant_id, instance_id, id): """Change attributes for one user.""" LOG.info("Updating user attributes for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": strutils.mask_password(req)}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:update', instance_id) id = correct_id_with_req(id, req) username, hostname = unquote_user_host(id) user = None user_attrs = body['user'] context.notification = notification.DBaaSUserUpdateAttributes( context, request=req) with StartNotification(context, instance_id=instance_id, username=username): try: user = models.User.load(context, instance_id, username, hostname) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("Error loading user: %(e)s") % {'e': e}) if not user: raise exception.UserNotFound(uuid=id) try: models.User.update_attributes(context, instance_id, username, hostname, user_attrs) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("User update error: %(e)s") % {'e': e}) return wsgi.Result(None, 202) def update_all(self, req, body, tenant_id, instance_id): """Change the password of one or more users.""" LOG.info("Updating user password for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": strutils.mask_password(req)}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action(context, 'user:update_all', instance_id) context.notification = notification.DBaaSUserChangePassword( context, request=req) users = body['users'] model_users = [] with StartNotification(context, instance_id=instance_id, username=",".join([user['name'] for user in users])): for user in users: try: mu = guest_models.MySQLUser(name=user['name'], host=user.get('host'), password=user['password']) found_user = models.User.load(context, instance_id, mu.name, mu.host) if not found_user: user_and_host = mu.name if mu.host: user_and_host += '@' + mu.host raise exception.UserNotFound(uuid=user_and_host) model_users.append(mu) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("Error loading user: %(e)s") % {'e': e}) try: models.User.change_password(context, instance_id, model_users) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("User password update error: " "%(e)s") % {'e': e}) return wsgi.Result(None, 202) class UserAccessController(ExtensionController): """Controller for adding and removing database access for a user.""" schemas = apischema.user @classmethod def get_schema(cls, action, body): schema = {} if 'update_all' == action: schema = cls.schemas.get(action).get('databases') return schema def _get_user(self, context, instance_id, user_id): username, hostname = unquote_user_host(user_id) try: user = models.User.load(context, instance_id, username, hostname) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("Error loading user: %(e)s") % {'e': e}) if not user: raise exception.UserNotFound(uuid=user_id) return user def index(self, req, tenant_id, instance_id, user_id): """Show permissions for the given user.""" LOG.info("Showing user access for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action( context, 'user_access:index', instance_id) # Make sure this user exists. user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error("No such user: %(user)s ", {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) access = models.User.access(context, instance_id, username, hostname) view = views.UserAccessView(access.databases) return wsgi.Result(view.data(), 200) def update(self, req, body, tenant_id, instance_id, user_id): """Grant access for a user to one or more databases.""" LOG.info("Granting user access for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action( context, 'user_access:update', instance_id) context.notification = notification.DBaaSUserGrant( context, request=req) user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error("No such user: %(user)s ", {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) databases = [db['name'] for db in body['databases']] with StartNotification(context, instance_id=instance_id, username=username, database=databases): models.User.grant(context, instance_id, username, hostname, databases) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, user_id, id): """Revoke access for a user.""" LOG.info("Revoking user access for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action( context, 'user_access:delete', instance_id) context.notification = notification.DBaaSUserRevoke( context, request=req) user_id = correct_id_with_req(user_id, req) user = self._get_user(context, instance_id, user_id) if not user: LOG.error("No such user: %(user)s ", {'user': user}) raise exception.UserNotFound(uuid=user) username, hostname = unquote_user_host(user_id) access = models.User.access(context, instance_id, username, hostname) databases = [db.name for db in access.databases] with StartNotification(context, instance_id=instance_id, username=username, database=databases): if id not in databases: raise exception.DatabaseNotFound(uuid=id) models.User.revoke(context, instance_id, username, hostname, id) return wsgi.Result(None, 202) class SchemaController(ExtensionController): """Controller for instance functionality.""" schemas = apischema.dbschema def index(self, req, tenant_id, instance_id): """Return all schemas.""" LOG.info("Listing schemas for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action( context, 'database:index', instance_id) schemas, next_marker = models.Schemas.load(context, instance_id) view = views.SchemasView(schemas) paged = pagination.SimplePaginatedDataView(req.url, 'databases', view, next_marker) return wsgi.Result(paged.data(), 200) def create(self, req, body, tenant_id, instance_id): """Creates a set of schemas.""" LOG.info("Creating schema for instance '%(id)s'\n" "req : '%(req)s'\n\n" "body: '%(body)s'\n'n", {"id": instance_id, "req": req, "body": body}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action( context, 'database:create', instance_id) schemas = body['databases'] context.notification = notification.DBaaSDatabaseCreate(context, request=req) with StartNotification(context, instance_id=instance_id, dbname=".".join([db['name'] for db in schemas])): try: model_schemas = populate_validated_databases(schemas) models.Schema.create(context, instance_id, model_schemas) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("Database create error: %(e)s") % {'e': e}) return wsgi.Result(None, 202) def delete(self, req, tenant_id, instance_id, id): LOG.info("Deleting schema for instance '%(id)s'\n" "req : '%(req)s'\n\n", {"id": instance_id, "req": req}) context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action( context, 'database:delete', instance_id) context.notification = notification.DBaaSDatabaseDelete( context, request=req) with StartNotification(context, instance_id=instance_id, dbname=id): try: schema = guest_models.MySQLSchema(name=id) schema.check_delete() if not models.Schemas.find(context, instance_id, id): raise exception.DatabaseNotFound(uuid=id) models.Schema.delete(context, instance_id, schema.serialize()) except (ValueError, AttributeError) as e: raise exception.BadRequest(_("Database delete error: %(e)s") % {'e': e}) return wsgi.Result(None, 202) def show(self, req, tenant_id, instance_id, id): context = req.environ[wsgi.CONTEXT_KEY] self.authorize_target_action( context, 'database:show', instance_id) raise webob.exc.HTTPNotImplemented() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/mysql/views.py0000644000175000017500000000355400000000000022370 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class UserView(object): def __init__(self, user): self.user = user def data(self): user_dict = { "name": self.user.name, "host": self.user.host, "databases": self.user.databases } return {"user": user_dict} class UsersView(object): def __init__(self, users): self.users = users def data(self): userlist = [{"name": user.name, "host": user.host, "databases": user.databases} for user in self.users] return {"users": userlist} class UserAccessView(object): def __init__(self, databases): self.databases = databases def data(self): dbs = [{"name": db.name} for db in self.databases] return {"databases": dbs} class SchemaView(object): def __init__(self, schema): self.schema = schema def data(self): return {"name": self.schema.name} class SchemasView(object): def __init__(self, schemas): self.schemas = schemas def data(self): data = [] # These are model instances for schema in self.schemas: data.append(SchemaView(schema).data()) return {"databases": data} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/pxc/0000755000175000017500000000000000000000000020277 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/pxc/__init__.py0000644000175000017500000000000000000000000022376 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/pxc/service.py0000644000175000017500000000212400000000000022310 0ustar00coreycorey00000000000000# Copyright [2016] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.common import exception from trove.extensions.common.service import ClusterRootController CONF = cfg.CONF MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'pxc' class PxcRootController(ClusterRootController): def root_delete(self, req, tenant_id, instance_id, is_cluster): raise exception.DatastoreOperationNotSupported( operation='disable_root', datastore=MANAGER) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/redis/0000755000175000017500000000000000000000000020613 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/redis/__init__.py0000644000175000017500000000000000000000000022712 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/redis/models.py0000644000175000017500000000206300000000000022451 0ustar00coreycorey00000000000000# Copyright 2017 Eayun, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common.clients import create_guest_client from trove.extensions.common.models import load_and_verify from trove.extensions.common.models import Root class RedisRoot(Root): @classmethod def get_auth_password(cls, context, instance_id): load_and_verify(context, instance_id) password = create_guest_client(context, instance_id).get_root_password() return password ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/redis/service.py0000644000175000017500000001661200000000000022633 0ustar00coreycorey00000000000000# Copyright 2017 Eayun, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import wsgi from trove.extensions.common.service import DefaultRootController from trove.extensions.redis.models import RedisRoot from trove.extensions.redis.views import RedisRootCreatedView from trove.instance.models import DBInstance LOG = logging.getLogger(__name__) CONF = cfg.CONF MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'redis' class RedisRootController(DefaultRootController): def root_create(self, req, body, tenant_id, instance_id, is_cluster): """Enable authentication for a redis instance and its replicas if any """ self._validate_can_perform_action(tenant_id, instance_id, is_cluster, "enable_root") password = DefaultRootController._get_password_from_body(body) slave_instances = self._get_slaves(tenant_id, instance_id) return self._instance_root_create(req, instance_id, password, slave_instances) def root_delete(self, req, tenant_id, instance_id, is_cluster): """Disable authentication for a redis instance and its replicas if any """ self._validate_can_perform_action(tenant_id, instance_id, is_cluster, "disable_root") slave_instances = self._get_slaves(tenant_id, instance_id) return self._instance_root_delete(req, instance_id, slave_instances) def _instance_root_create(self, req, instance_id, password, slave_instances=None): LOG.info("Enabling authentication for instance '%s'.", instance_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] original_auth_password = self._get_original_auth_password( context, instance_id) # Do root-enable and roll back once if operation fails. try: root = RedisRoot.create(context, instance_id, password) if not password: password = root.password except exception.TroveError: self._rollback_once(req, instance_id, original_auth_password) raise exception.TroveError( _("Failed to do root-enable for instance " "'%(instance_id)s'.") % {'instance_id': instance_id} ) failed_slaves = [] for slave_id in slave_instances: try: LOG.info("Enabling authentication for slave instance " "'%s'.", slave_id) RedisRoot.create(context, slave_id, password) except exception.TroveError: failed_slaves.append(slave_id) return wsgi.Result( RedisRootCreatedView(root, failed_slaves).data(), 200) def _instance_root_delete(self, req, instance_id, slave_instances=None): LOG.info("Disabling authentication for instance '%s'.", instance_id) LOG.info("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] is_root_enabled = RedisRoot.load(context, instance_id) if not is_root_enabled: raise exception.RootHistoryNotFound() original_auth_password = self._get_original_auth_password( context, instance_id) # Do root-disable and roll back once if operation fails. try: RedisRoot.delete(context, instance_id) except exception.TroveError: self._rollback_once(req, instance_id, original_auth_password) raise exception.TroveError( _("Failed to do root-disable for instance " "'%(instance_id)s'.") % {'instance_id': instance_id} ) failed_slaves = [] for slave_id in slave_instances: try: LOG.info("Disabling authentication for slave instance " "'%s'.", slave_id) RedisRoot.delete(context, slave_id) except exception.TroveError: failed_slaves.append(slave_id) if len(failed_slaves) > 0: result = { 'failed_slaves': failed_slaves } return wsgi.Result(result, 200) return wsgi.Result(None, 204) @staticmethod def _rollback_once(req, instance_id, original_auth_password): LOG.info("Rolling back enable/disable authentication " "for instance '%s'.", instance_id) context = req.environ[wsgi.CONTEXT_KEY] try: if not original_auth_password: # Instance never did root-enable before. RedisRoot.delete(context, instance_id) else: # Instance has done root-enable successfully before. # So roll back with original password. RedisRoot.create(context, instance_id, original_auth_password) except exception.TroveError: LOG.exception("Rolling back failed for instance '%s'", instance_id) @staticmethod def _is_slave(tenant_id, instance_id): args = {'id': instance_id, 'tenant_id': tenant_id} instance_info = DBInstance.find_by(**args) return instance_info.slave_of_id @staticmethod def _get_slaves(tenant_id, instance_or_cluster_id, deleted=False): LOG.info("Getting non-deleted slaves of instance '%s', " "if any.", instance_or_cluster_id) args = {'slave_of_id': instance_or_cluster_id, 'tenant_id': tenant_id, 'deleted': deleted} db_infos = DBInstance.find_all(**args) slaves = [] for db_info in db_infos: slaves.append(db_info.id) return slaves @staticmethod def _get_original_auth_password(context, instance_id): # Check if instance did root-enable before and get original password. password = None if RedisRoot.load(context, instance_id): try: password = RedisRoot.get_auth_password(context, instance_id) except exception.TroveError: raise exception.TroveError( _("Failed to get original auth password of instance " "'%(instance_id)s'.") % {'instance_id': instance_id} ) return password def _validate_can_perform_action(self, tenant_id, instance_id, is_cluster, operation): if is_cluster: raise exception.ClusterOperationNotSupported( operation=operation) is_slave = self._is_slave(tenant_id, instance_id) if is_slave: raise exception.SlaveOperationNotSupported( operation=operation) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/redis/views.py0000644000175000017500000000205500000000000022324 0ustar00coreycorey00000000000000# Copyright 2017 Eayun, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.extensions.common.views import UserView class RedisRootCreatedView(UserView): def __init__(self, user, failed_slaves): self.failed_slaves = failed_slaves super(RedisRootCreatedView, self).__init__(user) def data(self): user_dict = { "name": self.user.name, "password": self.user.password } return {"user": user_dict, "failed_slaves": self.failed_slaves} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/routes/0000755000175000017500000000000000000000000021026 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/routes/__init__.py0000644000175000017500000000000000000000000023125 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/routes/mgmt.py0000644000175000017500000000600500000000000022345 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import extensions from trove.extensions.mgmt.clusters.service import MgmtClusterController from trove.extensions.mgmt.configuration import service as conf_service from trove.extensions.mgmt.datastores.service import DatastoreVersionController from trove.extensions.mgmt.instances.service import MgmtInstanceController from trove.extensions.mgmt.quota.service import QuotaController from trove.extensions.mgmt.upgrade.service import UpgradeController class Mgmt(extensions.ExtensionDescriptor): def get_name(self): return "Mgmt" def get_description(self): return "MGMT services such as details diagnostics" def get_alias(self): return "Mgmt" def get_namespace(self): return "http://TBD" def get_updated(self): return "2011-01-22T13:25:27-06:00" def get_resources(self): resources = [] instances = extensions.ResourceExtension( '{tenant_id}/mgmt/instances', MgmtInstanceController(), member_actions={'root': 'GET', 'diagnostics': 'GET', 'hwinfo': 'GET', 'rpc_ping': 'GET', 'action': 'POST'}) resources.append(instances) clusters = extensions.ResourceExtension( '{tenant_id}/mgmt/clusters', MgmtClusterController(), member_actions={'action': 'POST'}) resources.append(clusters) quota = extensions.ResourceExtension( '{tenant_id}/mgmt/quotas', QuotaController(), member_actions={}) resources.append(quota) upgrade = extensions.ResourceExtension( '{tenant_id}/mgmt/instances/{instance_id}/upgrade', UpgradeController(), member_actions={}) resources.append(upgrade) datastore_configuration_parameters = extensions.ResourceExtension( '{tenant_id}/mgmt/datastores/versions/{version_id}/parameters', conf_service.ConfigurationsParameterController(), member_actions={}) resources.append(datastore_configuration_parameters) datastore_version = extensions.ResourceExtension( '{tenant_id}/mgmt/datastore-versions', DatastoreVersionController(), member_actions={}) resources.append(datastore_version) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/routes/mysql.py0000644000175000017500000000542100000000000022547 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import extensions from trove.extensions.common import service as common_service from trove.extensions.mysql import service as mysql_service class Mysql(extensions.ExtensionDescriptor): def get_name(self): return "Mysql" def get_description(self): return "Non essential MySQL services such as users and schemas" def get_alias(self): return "MYSQL" def get_namespace(self): return "http://TBD" def get_updated(self): return "2011-01-22T13:25:27-06:00" def get_resources(self): resources = [] resource = extensions.ResourceExtension( 'databases', mysql_service.SchemaController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/instances'}) resources.append(resource) resource = extensions.ResourceExtension( 'users', mysql_service.UserController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/instances'}, member_actions={'update': 'PUT'}, collection_actions={'update_all': 'PUT'}) resources.append(resource) collection_url = '{tenant_id}/instances/:instance_id/users' resource = extensions.ResourceExtension( 'databases', mysql_service.UserAccessController(), parent={'member_name': 'user', 'collection_name': collection_url}, collection_actions={'update': 'PUT'}) resources.append(resource) resource = extensions.ResourceExtension( 'root', common_service.RootController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/instances'}, collection_actions={'delete': 'DELETE'}) resources.append(resource) resource = extensions.ResourceExtension( 'root', common_service.RootController(), parent={'member_name': 'instance', 'collection_name': '{tenant_id}/clusters'}) resources.append(resource) return resources ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/security_group/0000755000175000017500000000000000000000000022570 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/security_group/__init__.py0000644000175000017500000000000000000000000024667 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/security_group/models.py0000644000175000017500000001413400000000000024430 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Model classes for Security Groups and Security Group Rules on instances. """ from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.models import NetworkRemoteModelBase from trove.db.models import DatabaseModelBase CONF = cfg.CONF LOG = logging.getLogger(__name__) def persisted_models(): return { 'security_groups': SecurityGroup, 'security_group_rules': SecurityGroupRule, 'security_group_instance_associations': SecurityGroupInstanceAssociation, } class SecurityGroup(DatabaseModelBase): _data_fields = ['name', 'description', 'user', 'tenant_id', 'created', 'updated', 'deleted', 'deleted_at'] _table_name = 'security_groups' @property def instance_id(self): return SecurityGroupInstanceAssociation.\ get_instance_id_by_security_group_id(self.id) @classmethod def get_security_group_by_id_or_instance_id(cls, id, tenant_id): try: return SecurityGroup.find_by(id=id, tenant_id=tenant_id, deleted=False) except exception.ModelNotFoundError: return SecurityGroupInstanceAssociation.\ get_security_group_by_instance_id(id) def get_rules(self): return SecurityGroupRule.find_all(group_id=self.id, deleted=False) def delete(self, context, region_name): try: sec_group_rules = self.get_rules() if sec_group_rules: for rule in sec_group_rules: rule.delete(context, region_name) RemoteSecurityGroup.delete(self.id, context, region_name) super(SecurityGroup, self).delete() except exception.TroveError: LOG.exception('Failed to delete security group.') raise exception.TroveError("Failed to delete Security Group") @classmethod def delete_for_instance(cls, instance_id, context, region_name): try: association = SecurityGroupInstanceAssociation.find_by( instance_id=instance_id, deleted=False) if association: sec_group = association.get_security_group() if sec_group: sec_group.delete(context, region_name) association.delete() except (exception.ModelNotFoundError, exception.TroveError): pass class SecurityGroupRule(DatabaseModelBase): _data_fields = ['group_id', 'parent_group_id', 'protocol', 'from_port', 'to_port', 'cidr', 'created', 'updated', 'deleted', 'deleted_at'] _table_name = 'security_group_rules' def get_security_group(self, tenant_id): return SecurityGroup.find_by(id=self.group_id, tenant_id=tenant_id, deleted=False) def delete(self, context, region_name): try: # Delete Remote Security Group Rule RemoteSecurityGroup.delete_rule(self.id, context, region_name) super(SecurityGroupRule, self).delete() except exception.TroveError: LOG.exception('Failed to delete remote security group rule.') raise exception.SecurityGroupRuleDeletionError( "Failed to delete Remote Security Group Rule") class SecurityGroupInstanceAssociation(DatabaseModelBase): _data_fields = ['security_group_id', 'instance_id', 'created', 'updated', 'deleted', 'deleted_at'] _table_name = 'security_group_instance_associations' def get_security_group(self): return SecurityGroup.find_by(id=self.security_group_id, deleted=False) @classmethod def get_security_group_by_instance_id(cls, id): association = SecurityGroupInstanceAssociation.find_by( instance_id=id, deleted=False) return association.get_security_group() @classmethod def get_instance_id_by_security_group_id(cls, secgroup_id): association = SecurityGroupInstanceAssociation.find_by( security_group_id=secgroup_id, deleted=False) return association.instance_id class RemoteSecurityGroup(NetworkRemoteModelBase): _data_fields = ['id', 'name', 'description', 'rules'] def __init__(self, security_group=None, id=None, context=None, region_name=None): if id is None and security_group is None: msg = _("Security Group does not have id defined!") raise exception.InvalidModelError(msg) elif security_group is None: region = region_name or CONF.service_credentials.region_name driver = self.get_driver(context, region) self._data_object = driver.get_sec_group_by_id(group_id=id) else: self._data_object = security_group @classmethod def delete(cls, sec_group_id, context, region_name): """Deletes a Security Group.""" driver = cls.get_driver(context, region_name) driver.delete_security_group(sec_group_id) @classmethod def delete_rule(cls, sec_group_rule_id, context, region_name): """Deletes a rule from an existing security group.""" driver = cls.get_driver(context, region_name) driver.delete_security_group_rule(sec_group_rule_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/extensions/vertica/0000755000175000017500000000000000000000000021142 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/vertica/__init__.py0000644000175000017500000000000000000000000023241 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/extensions/vertica/service.py0000644000175000017500000000277300000000000023165 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.common import exception from trove.extensions.common.service import ClusterRootController from trove.instance.models import DBInstance CONF = cfg.CONF MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'vertica' class VerticaRootController(ClusterRootController): def delete(self, req, tenant_id, instance_id): raise exception.DatastoreOperationNotSupported( operation='disable_root', datastore=MANAGER) def _get_cluster_instance_id(self, tenant_id, cluster_id): instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id) args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type': 'master'} master_instance = DBInstance.find_by(**args) master_instance_id = master_instance.id return master_instance_id, instance_ids ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7561104 trove-12.1.0.dev92/trove/flavor/0000755000175000017500000000000000000000000016577 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/flavor/__init__.py0000644000175000017500000000000000000000000020676 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/flavor/models.py0000644000175000017500000000464700000000000020447 0ustar00coreycorey00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes that form the core of instance flavor functionality.""" from novaclient import exceptions as nova_exceptions from trove.common.clients import create_nova_client from trove.common import exception from trove.common.models import NovaRemoteModelBase class Flavor(object): _data_fields = ['id', 'links', 'name', 'ram', 'vcpus', 'disk', 'ephemeral'] def __init__(self, flavor=None, context=None, flavor_id=None): if flavor: self.flavor = flavor return if flavor_id and context: try: client = create_nova_client(context) self.flavor = client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.NotFound(uuid=flavor_id) except nova_exceptions.ClientException as e: raise exception.TroveError(str(e)) return msg = ("Flavor is not defined, and" " context and flavor_id were not specified.") raise exception.InvalidModelError(errors=msg) @property def id(self): return self.flavor.id @property def name(self): return self.flavor.name @property def ram(self): return self.flavor.ram @property def vcpus(self): return self.flavor.vcpus @property def links(self): return self.flavor.links @property def disk(self): return self.flavor.disk @property def ephemeral(self): return self.flavor.ephemeral class Flavors(NovaRemoteModelBase): def __init__(self, context): nova_flavors = create_nova_client(context).flavors.list() self.flavors = [Flavor(flavor=item) for item in nova_flavors] def __iter__(self): for item in self.flavors: yield item ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/flavor/service.py0000644000175000017500000000371000000000000020612 0ustar00coreycorey00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from trove.common import exception from trove.common import policy from trove.common import wsgi from trove.flavor import models from trove.flavor import views class FlavorController(wsgi.Controller): """Controller for flavor functionality.""" def show(self, req, tenant_id, id): """Return a single flavor.""" context = req.environ[wsgi.CONTEXT_KEY] self._validate_flavor_id(id) flavor = models.Flavor(context=context, flavor_id=id) # Flavors do not bind to a particular tenant. # Only authorize the current tenant. policy.authorize_on_tenant(context, 'flavor:show') # Pass in the request to build accurate links. return wsgi.Result(views.FlavorView(flavor, req).data(), 200) def index(self, req, tenant_id): """Return all flavors.""" context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'flavor:index') flavors = models.Flavors(context=context) return wsgi.Result(views.FlavorsView(flavors, req).data(), 200) def _validate_flavor_id(self, id): if isinstance(id, six.string_types): return try: if int(id) != float(id): raise exception.NotFound(uuid=id) except ValueError: raise exception.NotFound(uuid=id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/flavor/views.py0000644000175000017500000000413200000000000020306 0ustar00coreycorey00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.common.views import create_links CONF = cfg.CONF class FlavorView(object): def __init__(self, flavor, req=None): self.flavor = flavor self.req = req def data(self): # If the flavor id is not an int, we simply return # no id and rely on str_id instead. if isinstance(self.flavor.id, int) or ( self.flavor.id.isdigit() and not self.flavor.id.startswith('0')): f_id = int(self.flavor.id) else: f_id = None flavor = { 'id': f_id, 'links': self._build_links(), 'name': self.flavor.name, 'ram': self.flavor.ram, 'vcpus': self.flavor.vcpus, 'disk': self.flavor.disk, 'ephemeral': self.flavor.ephemeral, 'str_id': str(self.flavor.id), } if not CONF.trove_volume_support and CONF.device_path is not None: flavor['local_storage'] = self.flavor.ephemeral return {"flavor": flavor} def _build_links(self): return create_links("flavors", self.req, self.flavor.id) class FlavorsView(object): view = FlavorView def __init__(self, flavors, req=None): self.flavors = flavors self.req = req def data(self): data = [] for flavor in self.flavors: data.append(self.view(flavor, req=self.req).data()['flavor']) return {"flavors": data} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/0000755000175000017500000000000000000000000017454 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/__init__.py0000644000175000017500000000000000000000000021553 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/api.py0000644000175000017500000006370200000000000020607 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all request to the Platform or Guest VM """ from eventlet import Timeout from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc.client import RemoteError from trove.common import cfg from trove.common import exception from trove.common.notification import NotificationCastWrapper from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class API(object): """API for interacting with the guest manager. API version history: * 1.0 - Initial version. When updating this API, also update API_LATEST_VERSION """ # API_LATEST_VERSION should bump the minor number each time # a method signature is added or changed API_LATEST_VERSION = '1.0' # API_BASE_VERSION should only change on major version upgrade API_BASE_VERSION = '1.0' VERSION_ALIASES = { 'icehouse': '1.0', 'juno': '1.0', 'kilo': '1.0', 'liberty': '1.0', 'mitaka': '1.0', 'newton': '1.0', 'latest': API_LATEST_VERSION } def __init__(self, context, id): self.context = context self.id = id super(API, self).__init__() self.agent_low_timeout = CONF.agent_call_low_timeout self.agent_high_timeout = CONF.agent_call_high_timeout self.agent_snapshot_timeout = CONF.agent_replication_snapshot_timeout version_cap = self.VERSION_ALIASES.get( CONF.upgrade_levels.guestagent, CONF.upgrade_levels.guestagent) self.target = messaging.Target(topic=self._get_routing_key(), version=version_cap) self.client = self.get_client(self.target, version_cap) def get_client(self, target, version_cap, serializer=None): from trove.instance.models import get_instance_encryption_key instance_key = get_instance_encryption_key(self.id) return rpc.get_client(target, key=instance_key, version_cap=version_cap, serializer=serializer) def _call(self, method_name, timeout_sec, version, **kwargs): LOG.debug("Calling %(name)s with timeout %(timeout)s", {'name': method_name, 'timeout': timeout_sec}) try: cctxt = self.client.prepare(version=version, timeout=timeout_sec) result = cctxt.call(self.context, method_name, **kwargs) LOG.debug("Result is %s.", result) return result except RemoteError as r: LOG.exception("Error calling %s", method_name) raise exception.GuestError(original_message=r.value) except Exception as e: LOG.exception("Error calling %s", method_name) raise exception.GuestError(original_message=str(e)) except Timeout: raise exception.GuestTimeout() def _cast(self, method_name, version, **kwargs): LOG.debug("Calling %s asynchronously", method_name) try: with NotificationCastWrapper(self.context, 'guest'): cctxt = self.client.prepare(version=version) cctxt.cast(self.context, method_name, **kwargs) except RemoteError as r: LOG.exception("Error calling %s", method_name) raise exception.GuestError(original_message=r.value) except Exception as e: LOG.exception("Error calling %s", method_name) raise exception.GuestError(original_message=str(e)) def _get_routing_key(self): """Create the routing key based on the container id.""" return "guestagent.%s" % self.id def change_passwords(self, users): """Make an asynchronous call to change the passwords of one or more users. """ LOG.debug("Changing passwords for users on instance %s.", self.id) version = self.API_BASE_VERSION self._cast("change_passwords", version=version, users=users) def update_attributes(self, username, hostname, user_attrs): """Update user attributes.""" LOG.debug("Changing user attributes on instance %s.", self.id) version = self.API_BASE_VERSION self._cast("update_attributes", version=version, username=username, hostname=hostname, user_attrs=user_attrs) def create_user(self, users): """Make an asynchronous call to create a new database user""" LOG.debug("Creating Users for instance %s.", self.id) version = self.API_BASE_VERSION self._cast("create_user", version=version, users=users) def get_user(self, username, hostname): """Make a synchronous call to get a single database user.""" LOG.debug("Getting a user %(username)s on instance %(id)s.", {'username': username, 'id': self.id}) version = self.API_BASE_VERSION return self._call("get_user", self.agent_low_timeout, version=version, username=username, hostname=hostname) def list_access(self, username, hostname): """Show all the databases to which a user has more than USAGE.""" LOG.debug("Showing user %(username)s grants on instance %(id)s.", {'username': username, 'id': self.id}) version = self.API_BASE_VERSION return self._call("list_access", self.agent_low_timeout, version=version, username=username, hostname=hostname) def grant_access(self, username, hostname, databases): """Grant a user permission to use a given database.""" LOG.debug("Granting access to databases %(databases)s for user " "%(username)s on instance %(id)s.", {'username': username, 'databases': databases, 'id': self.id}) version = self.API_BASE_VERSION return self._call("grant_access", self.agent_low_timeout, version=version, username=username, hostname=hostname, databases=databases) def revoke_access(self, username, hostname, database): """Remove a user's permission to use a given database.""" LOG.debug("Revoking access from database %(database)s for user " "%(username)s on instance %(id)s.", {'username': username, 'database': database, 'id': self.id}) version = self.API_BASE_VERSION return self._call("revoke_access", self.agent_low_timeout, version=version, username=username, hostname=hostname, database=database) def list_users(self, limit=None, marker=None, include_marker=False): """Make a synchronous call to list database users.""" LOG.debug("Listing Users for instance %s.", self.id) version = self.API_BASE_VERSION return self._call("list_users", self.agent_high_timeout, version=version, limit=limit, marker=marker, include_marker=include_marker) def delete_user(self, user): """Make an asynchronous call to delete an existing database user.""" LOG.debug("Deleting user %(user)s for instance %(instance_id)s.", {'user': user, 'instance_id': self.id}) version = self.API_BASE_VERSION self._cast("delete_user", version=version, user=user) def create_database(self, databases): """Make an asynchronous call to create a new database within the specified container """ LOG.debug("Creating databases for instance %s.", self.id) version = self.API_BASE_VERSION self._cast("create_database", version=version, databases=databases) def list_databases(self, limit=None, marker=None, include_marker=False): """Make a synchronous call to list databases.""" LOG.debug("Listing databases for instance %s.", self.id) version = self.API_BASE_VERSION return self._call("list_databases", self.agent_low_timeout, version=version, limit=limit, marker=marker, include_marker=include_marker) def delete_database(self, database): """Make an asynchronous call to delete an existing database within the specified container """ LOG.debug("Deleting database %(database)s for " "instance %(instance_id)s.", {'database': database, 'instance_id': self.id}) version = self.API_BASE_VERSION self._cast("delete_database", version=version, database=database) def get_root_password(self): """Make a synchronous call to get root password of instance. """ LOG.debug("Get root password of instance %s.", self.id) version = self.API_BASE_VERSION return self._call("get_root_password", self.agent_high_timeout, version=version) def enable_root(self): """Make a synchronous call to enable the root user for access from anywhere """ LOG.debug("Enable root user for instance %s.", self.id) version = self.API_BASE_VERSION return self._call("enable_root", self.agent_high_timeout, version=version) def enable_root_with_password(self, root_password=None): """Make a synchronous call to enable the root user for access from anywhere """ LOG.debug("Enable root user for instance %s.", self.id) version = self.API_BASE_VERSION return self._call("enable_root_with_password", self.agent_high_timeout, version=version, root_password=root_password) def disable_root(self): """Make a synchronous call to disable the root user for access from anywhere """ LOG.debug("Disable root user for instance %s.", self.id) version = self.API_BASE_VERSION return self._call("disable_root", self.agent_low_timeout, version=version) def is_root_enabled(self): """Make a synchronous call to check if root access is available for the container """ LOG.debug("Check root access for instance %s.", self.id) version = self.API_BASE_VERSION return self._call("is_root_enabled", self.agent_low_timeout, version=version) def get_hwinfo(self): """Make a synchronous call to get hardware info for the container""" LOG.debug("Check hwinfo on instance %s.", self.id) version = self.API_BASE_VERSION return self._call("get_hwinfo", self.agent_low_timeout, version=version) def get_diagnostics(self): """Make a synchronous call to get diagnostics for the container""" LOG.debug("Check diagnostics on instance %s.", self.id) version = self.API_BASE_VERSION return self._call("get_diagnostics", self.agent_low_timeout, version=version) def rpc_ping(self): """Make a synchronous RPC call to check if we can ping the instance.""" LOG.debug("Check RPC ping on instance %s.", self.id) version = self.API_BASE_VERSION return self._call("rpc_ping", self.agent_low_timeout, version=version) def prepare(self, memory_mb, packages, databases, users, device_path='/dev/vdb', mount_point='/mnt/volume', backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): """Make an asynchronous call to prepare the guest as a database container optionally includes a backup id for restores """ LOG.debug("Sending the call to prepare the Guest.") version = self.API_BASE_VERSION # Taskmanager is a publisher, guestagent is a consumer. Usually # consumer creates a queue, but in this case we have to make sure # "prepare" doesn't get lost if for some reason guest was delayed and # didn't create a queue on time. self._create_guest_queue() packages = packages.split() self._cast( "prepare", version=version, packages=packages, databases=databases, memory_mb=memory_mb, users=users, device_path=device_path, mount_point=mount_point, backup_info=backup_info, config_contents=config_contents, root_password=root_password, overrides=overrides, cluster_config=cluster_config, snapshot=snapshot, modules=modules) def _create_guest_queue(self): """Call to construct, start and immediately stop rpc server in order to create a queue to communicate with the guestagent. This is method do nothing in case a queue is already created by the guest """ from trove.instance.models import DBInstance server = None target = messaging.Target(topic=self._get_routing_key(), server=self.id, version=self.API_BASE_VERSION) try: instance = DBInstance.get_by(id=self.id) instance_key = instance.key if instance else None server = rpc.get_server(target, [], key=instance_key) server.start() finally: if server is not None: server.stop() server.wait() def pre_upgrade(self): """Prepare the guest for upgrade.""" LOG.debug("Sending the call to prepare the guest for upgrade.") version = self.API_BASE_VERSION return self._call("pre_upgrade", self.agent_high_timeout, version=version) def post_upgrade(self, upgrade_info): """Recover the guest after upgrading the guest's image.""" LOG.debug("Recover the guest after upgrading the guest's image.") version = self.API_BASE_VERSION LOG.debug("Recycling the client ...") version_cap = self.VERSION_ALIASES.get( CONF.upgrade_levels.guestagent, CONF.upgrade_levels.guestagent) self.client = self.get_client(self.target, version_cap) self._call("post_upgrade", self.agent_high_timeout, version=version, upgrade_info=upgrade_info) def restart(self): """Restart the database server.""" LOG.debug("Sending the call to restart the database process " "on the Guest.") version = self.API_BASE_VERSION self._call("restart", self.agent_high_timeout, version=version) def start_db_with_conf_changes(self, config_contents): """Start the database server.""" LOG.debug("Sending the call to start the database process on " "the Guest with a timeout of %s.", self.agent_high_timeout) version = self.API_BASE_VERSION self._call("start_db_with_conf_changes", self.agent_high_timeout, version=version, config_contents=config_contents) def reset_configuration(self, configuration): """Ignore running state of the database server; just change the config file to a new flavor. """ LOG.debug("Sending the call to change the database conf file on the " "Guest with a timeout of %s.", self.agent_high_timeout) version = self.API_BASE_VERSION self._call("reset_configuration", self.agent_high_timeout, version=version, configuration=configuration) def stop_db(self, do_not_start_on_reboot=False): """Stop the database server.""" LOG.debug("Sending the call to stop the database process " "on the Guest.") version = self.API_BASE_VERSION self._call("stop_db", self.agent_high_timeout, version=version, do_not_start_on_reboot=do_not_start_on_reboot) def upgrade(self, instance_version, location, metadata=None): """Make an asynchronous call to self upgrade the guest agent.""" LOG.debug("Sending an upgrade call to nova-guest.") version = self.API_BASE_VERSION self._cast("upgrade", version=version, instance_version=instance_version, location=location, metadata=metadata) def get_volume_info(self): """Make a synchronous call to get volume info for the container.""" LOG.debug("Check Volume Info on instance %s.", self.id) version = self.API_BASE_VERSION return self._call("get_filesystem_stats", self.agent_low_timeout, version=version, fs_path=None) def update_guest(self): """Make a synchronous call to update the guest agent.""" LOG.debug("Updating guest agent on instance %s.", self.id) version = self.API_BASE_VERSION self._call("update_guest", self.agent_high_timeout, version=version) def create_backup(self, backup_info): """Make async call to create a full backup of this instance.""" LOG.debug("Create Backup %(backup_id)s " "for instance %(instance_id)s.", {'backup_id': backup_info['id'], 'instance_id': self.id}) version = self.API_BASE_VERSION self._cast("create_backup", version=version, backup_info=backup_info) def mount_volume(self, device_path=None, mount_point=None): """Mount the volume.""" LOG.debug("Mount volume %(mount)s on instance %(id)s.", { 'mount': mount_point, 'id': self.id}) version = self.API_BASE_VERSION self._call("mount_volume", self.agent_low_timeout, version=version, device_path=device_path, mount_point=mount_point) def unmount_volume(self, device_path=None, mount_point=None): """Unmount the volume.""" LOG.debug("Unmount volume %(device)s on instance %(id)s.", { 'device': device_path, 'id': self.id}) version = self.API_BASE_VERSION self._call("unmount_volume", self.agent_low_timeout, version=version, device_path=device_path, mount_point=mount_point) def resize_fs(self, device_path=None, mount_point=None): """Resize the filesystem.""" LOG.debug("Resize device %(device)s on instance %(id)s.", { 'device': device_path, 'id': self.id}) version = self.API_BASE_VERSION self._call("resize_fs", self.agent_high_timeout, version=version, device_path=device_path, mount_point=mount_point) def update_overrides(self, overrides, remove=False): """Update the overrides.""" LOG.debug("Updating overrides values %(overrides)s on instance " "%(id)s.", {'overrides': overrides, 'id': self.id}) version = self.API_BASE_VERSION self._call("update_overrides", self.agent_high_timeout, version=version, overrides=overrides, remove=remove) def apply_overrides(self, overrides): LOG.debug("Applying overrides values %(overrides)s on instance " "%(id)s.", {'overrides': overrides, 'id': self.id}) version = self.API_BASE_VERSION self._call("apply_overrides", self.agent_high_timeout, version=version, overrides=overrides) def backup_required_for_replication(self): LOG.debug("Checking backup requirement for replication") version = self.API_BASE_VERSION return self._call("backup_required_for_replication", self.agent_low_timeout, version=version) def get_replication_snapshot(self, snapshot_info=None, replica_source_config=None): LOG.debug("Retrieving replication snapshot from instance %s.", self.id) version = self.API_BASE_VERSION return self._call("get_replication_snapshot", self.agent_snapshot_timeout, version=version, snapshot_info=snapshot_info, replica_source_config=replica_source_config) def attach_replication_slave(self, snapshot, replica_config=None): LOG.debug("Configuring instance %s to replicate from %s.", self.id, snapshot.get('master').get('id')) version = self.API_BASE_VERSION self._cast("attach_replication_slave", version=version, snapshot=snapshot, slave_config=replica_config) def detach_replica(self, for_failover=False): LOG.debug("Detaching replica %s from its replication source.", self.id) version = self.API_BASE_VERSION return self._call("detach_replica", self.agent_high_timeout, version=version, for_failover=for_failover) def get_replica_context(self): LOG.debug("Getting replica context.") version = self.API_BASE_VERSION return self._call("get_replica_context", self.agent_high_timeout, version=version) def attach_replica(self, replica_info, slave_config): LOG.debug("Attaching replica %s.", replica_info) version = self.API_BASE_VERSION self._call("attach_replica", self.agent_high_timeout, version=version, replica_info=replica_info, slave_config=slave_config) def make_read_only(self, read_only): LOG.debug("Executing make_read_only(%s)", read_only) version = self.API_BASE_VERSION self._call("make_read_only", self.agent_high_timeout, version=version, read_only=read_only) def enable_as_master(self, replica_source_config): LOG.debug("Executing enable_as_master") version = self.API_BASE_VERSION self._call("enable_as_master", self.agent_high_timeout, version=version, replica_source_config=replica_source_config) # DEPRECATED: Maintain for API Compatibility def get_txn_count(self): LOG.debug("Executing get_txn_count.") version = self.API_BASE_VERSION return self._call("get_txn_count", self.agent_high_timeout, version=version) def get_last_txn(self): LOG.debug("Executing get_last_txn.") version = self.API_BASE_VERSION return self._call("get_last_txn", self.agent_high_timeout, version=version) def get_latest_txn_id(self): LOG.debug("Executing get_latest_txn_id.") version = self.API_BASE_VERSION return self._call("get_latest_txn_id", self.agent_high_timeout, version=version) def wait_for_txn(self, txn): LOG.debug("Executing wait_for_txn.") version = self.API_BASE_VERSION self._call("wait_for_txn", self.agent_high_timeout, version=version, txn=txn) def cleanup_source_on_replica_detach(self, replica_info): LOG.debug("Cleaning up master %s on detach of replica.", self.id) version = self.API_BASE_VERSION self._call("cleanup_source_on_replica_detach", self.agent_high_timeout, version=version, replica_info=replica_info) def demote_replication_master(self): LOG.debug("Demoting instance %s to non-master.", self.id) version = self.API_BASE_VERSION self._call("demote_replication_master", self.agent_high_timeout, version=version) def guest_log_list(self): LOG.debug("Retrieving guest log list for %s.", self.id) version = self.API_BASE_VERSION result = self._call("guest_log_list", self.agent_high_timeout, version=version) LOG.debug("guest_log_list returns %s", result) return result def guest_log_action(self, log_name, enable, disable, publish, discard): LOG.debug("Processing guest log '%s' for %s.", log_name, self.id) version = self.API_BASE_VERSION return self._call("guest_log_action", self.agent_high_timeout, version=version, log_name=log_name, enable=enable, disable=disable, publish=publish, discard=discard) def module_list(self, include_contents): LOG.debug("Querying modules on %s (contents: %s).", self.id, include_contents) version = self.API_BASE_VERSION result = self._call("module_list", self.agent_high_timeout, version=version, include_contents=include_contents) return result def module_apply(self, modules): LOG.debug("Applying modules to %s.", self.id) version = self.API_BASE_VERSION return self._call("module_apply", self.agent_high_timeout, version=version, modules=modules) def module_remove(self, module): LOG.debug("Removing modules from %s.", self.id) version = self.API_BASE_VERSION return self._call("module_remove", self.agent_high_timeout, version=version, module=module) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/backup/0000755000175000017500000000000000000000000020721 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/backup/__init__.py0000644000175000017500000000331400000000000023033 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.guestagent.backup.backupagent import BackupAgent AGENT = BackupAgent() def backup(context, backup_info): """ Main entry point for starting a backup based on the given backup id. This will create a backup for this DB instance and will then store the backup in a configured repository (e.g. Swift) :param context: the context token which contains the users details :param backup_id: the id of the persisted backup object """ return AGENT.execute_backup(context, backup_info) def restore(context, backup_info, restore_location): """ Main entry point for restoring a backup based on the given backup id. This will transfer backup data to this instance an will carry out the appropriate restore procedure (eg. mysqldump) :param context: the context token which contains the users details :param backup_id: the id of the persisted backup object """ return AGENT.execute_restore(context, backup_info, restore_location) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/backup/backupagent.py0000644000175000017500000001673400000000000023572 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import timeutils from trove.backup.state import BackupState from trove.common import cfg from trove.common.i18n import _ from trove.common.strategies.storage import get_storage_strategy from trove.conductor import api as conductor_api from trove.guestagent.dbaas import get_filesystem_volume_stats from trove.guestagent.strategies.backup.base import BackupError from trove.guestagent.strategies.backup.base import UnknownBackupType from trove.guestagent.strategies.backup import get_backup_strategy from trove.guestagent.strategies.restore import get_restore_strategy LOG = logging.getLogger(__name__) CONF = cfg.CONF CONFIG_MANAGER = CONF.get('mysql' if not CONF.datastore_manager else CONF.datastore_manager) STRATEGY = CONFIG_MANAGER.backup_strategy BACKUP_NAMESPACE = CONFIG_MANAGER.backup_namespace RESTORE_NAMESPACE = CONFIG_MANAGER.restore_namespace RUNNER = get_backup_strategy(STRATEGY, BACKUP_NAMESPACE) EXTRA_OPTS = CONF.backup_runner_options.get(STRATEGY, '') # Try to get the incremental strategy or return the default 'backup_strategy' INCREMENTAL = CONFIG_MANAGER.backup_incremental_strategy.get( STRATEGY, STRATEGY) INCREMENTAL_RUNNER = get_backup_strategy(INCREMENTAL, BACKUP_NAMESPACE) class BackupAgent(object): def _get_restore_runner(self, backup_type): """Returns the RestoreRunner associated with this backup type.""" try: runner = get_restore_strategy(backup_type, RESTORE_NAMESPACE) except ImportError: raise UnknownBackupType(_("Unknown Backup type: %(type)s in " "namespace %(ns)s") % {"type": backup_type, "ns": RESTORE_NAMESPACE}) return runner def stream_backup_to_storage(self, context, backup_info, runner, storage, parent_metadata={}, extra_opts=EXTRA_OPTS): backup_id = backup_info['id'] conductor = conductor_api.API(context) # Store the size of the filesystem before the backup. mount_point = CONFIG_MANAGER.mount_point stats = get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, } conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) try: with runner(filename=backup_id, extra_opts=extra_opts, **parent_metadata) as bkup: LOG.debug("Starting backup %s.", backup_id) meta = {} meta['datastore'] = backup_info['datastore'] meta['datastore_version'] = backup_info['datastore_version'] success, note, checksum, location = storage.save( bkup.manifest, bkup, metadata=meta) backup_state.update({ 'checksum': checksum, 'location': location, 'note': note, 'success': success, 'backup_type': bkup.backup_type, }) LOG.debug("Backup %(backup_id)s completed status: " "%(success)s.", backup_state) LOG.debug("Backup %(backup_id)s file swift checksum: " "%(checksum)s.", backup_state) LOG.debug("Backup %(backup_id)s location: " "%(location)s.", backup_state) if not success: raise BackupError(note) backup_state.update({'state': BackupState.COMPLETED}) return meta except Exception: LOG.exception( "Error saving backup: %(backup_id)s.", backup_state) backup_state.update({'state': BackupState.FAILED}) raise finally: LOG.info("Completed backup %(backup_id)s.", backup_state) conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts( microsecond=True), **backup_state) LOG.info("Updated state for %s to %s.", backup_id, backup_state) def execute_backup(self, context, backup_info, runner=RUNNER, extra_opts=EXTRA_OPTS, incremental_runner=INCREMENTAL_RUNNER): LOG.info("Running backup %(id)s.", backup_info) storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) # Check if this is an incremental backup and grab the parent metadata parent_metadata = {} if backup_info.get('parent'): runner = incremental_runner LOG.debug("Using incremental backup runner: %s.", runner.__name__) parent = backup_info['parent'] parent_metadata = storage.load_metadata(parent['location'], parent['checksum']) # The parent could be another incremental backup so we need to # reset the location and checksum to *this* parents info parent_metadata.update({ 'parent_location': parent['location'], 'parent_checksum': parent['checksum'] }) self.stream_backup_to_storage(context, backup_info, runner, storage, parent_metadata, extra_opts) def execute_restore(self, context, backup_info, restore_location): try: restore_runner = self._get_restore_runner(backup_info['type']) storage = get_storage_strategy( CONF.storage_strategy, CONF.storage_namespace)(context) runner = restore_runner(storage, location=backup_info['location'], checksum=backup_info['checksum'], restore_location=restore_location) backup_info['restore_location'] = restore_location LOG.info("Restoring instance from backup %(id)s to " "%(restore_location)s", backup_info) content_size = runner.restore() LOG.info("Restore from backup %(id)s completed successfully " "to %(restore_location)s", backup_info) LOG.debug("Restore size: %s", content_size) except Exception: LOG.exception("Error restoring backup %(id)s", backup_info) raise else: LOG.debug("Restored backup %(id)s", backup_info) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/common/0000755000175000017500000000000000000000000020744 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/common/__init__.py0000644000175000017500000000000000000000000023043 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/common/configuration.py0000644000175000017500000005305600000000000024176 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import os import re import six from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode class ConfigurationManager(object): """ ConfigurationManager is responsible for management of datastore configuration. Its base functionality includes reading and writing configuration files. It is responsible for validating user inputs and requests. When supplied an override strategy it allows the user to manage configuration overrides as well. """ # Configuration group names. The names determine the order in which the # groups get applied. System groups are divided into two camps; pre-user # and post-user. In general system overrides will get applied over the # user group, unless specified otherwise (i.e. SYSTEM_POST_USER_GROUP # will be used). SYSTEM_PRE_USER_GROUP = '10-system' USER_GROUP = '20-user' SYSTEM_POST_USER_GROUP = '50-system' DEFAULT_STRATEGY_OVERRIDES_SUB_DIR = 'overrides' DEFAULT_CHANGE_ID = 'common' def __init__(self, base_config_path, owner, group, codec, requires_root=False, override_strategy=None): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration files. :type owner string :param group Group of the configuration files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the manager requires superuser privileges. :type requires_root boolean :param override_strategy Strategy used to manage configuration overrides (e.g. ImportOverrideStrategy). Defaults to OneFileOverrideStrategy if None. This strategy should be compatible with very much any datastore. It is recommended each datastore defines its strategy explicitly to avoid upgrade compatibility issues in case the default implementation changes in the future. :type override_strategy ConfigurationOverrideStrategy """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._value_cache = None if not override_strategy: # Use OneFile strategy by default. Store the revisions in a # sub-directory at the location of the configuration file. revision_dir = guestagent_utils.build_file_path( os.path.dirname(base_config_path), self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self._override_strategy = OneFileOverrideStrategy(revision_dir) else: self._override_strategy = override_strategy self._override_strategy.configure( base_config_path, owner, group, codec, requires_root) def get_value(self, key, default=None): """Return the current value at a given key or 'default'. """ if self._value_cache is None: self.refresh_cache() return self._value_cache.get(key, default) def parse_configuration(self): """Read contents of the configuration file (applying overrides if any) and parse it into a dict. :returns: Configuration file as a Python dict. """ base_options = operating_system.read_file( self._base_config_path, codec=self._codec, as_root=self._requires_root) updates = self._override_strategy.parse_updates() guestagent_utils.update_dict(updates, base_options) return base_options def save_configuration(self, options): """Write given contents to the base configuration file. Remove all existing overrides (both system and user). :param contents Contents of the configuration file. :type contents string or dict """ if isinstance(options, dict): # Serialize a dict of options for writing. self.save_configuration(self._codec.serialize(options)) else: self._override_strategy.remove(self.USER_GROUP) self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP) self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP) operating_system.write_file( self._base_config_path, options, as_root=self._requires_root) operating_system.chown( self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root) self.refresh_cache() def has_system_override(self, change_id): """Return whether a given 'system' change exists. """ return (self._override_strategy.exists(self.SYSTEM_POST_USER_GROUP, change_id) or self._override_strategy.exists(self.SYSTEM_PRE_USER_GROUP, change_id)) def apply_system_override(self, options, change_id=DEFAULT_CHANGE_ID, pre_user=False): """Apply a 'system' change to the configuration. System overrides are always applied after all user changes so that they override any user-defined setting. :param options Configuration changes. :type options string or dict """ group_name = ( self.SYSTEM_PRE_USER_GROUP if pre_user else self.SYSTEM_POST_USER_GROUP) self._apply_override(group_name, change_id, options) def apply_user_override(self, options, change_id=DEFAULT_CHANGE_ID): """Apply a 'user' change to the configuration. The 'system' values will be re-applied over this override. :param options Configuration changes. :type options string or dict """ self._apply_override(self.USER_GROUP, change_id, options) def get_user_override(self, change_id=DEFAULT_CHANGE_ID): """Get the user overrides""" return self._override_strategy.get(self.USER_GROUP, change_id) def _apply_override(self, group_name, change_id, options): if not isinstance(options, dict): # Deserialize the options into a dict if not already. self._apply_override( group_name, change_id, self._codec.deserialize(options)) else: self._override_strategy.apply(group_name, change_id, options) self.refresh_cache() def remove_system_override(self, change_id=DEFAULT_CHANGE_ID): """Revert a 'system' configuration change. """ self._remove_override(self.SYSTEM_POST_USER_GROUP, change_id) self._remove_override(self.SYSTEM_PRE_USER_GROUP, change_id) def remove_user_override(self, change_id=DEFAULT_CHANGE_ID): """Revert a 'user' configuration change. """ self._remove_override(self.USER_GROUP, change_id) def _remove_override(self, group_name, change_id): self._override_strategy.remove(group_name, change_id) self.refresh_cache() def refresh_cache(self): self._value_cache = self.parse_configuration() @six.add_metaclass(abc.ABCMeta) class ConfigurationOverrideStrategy(object): """ConfigurationOverrideStrategy handles configuration files. The strategy provides functionality to enumerate, apply and remove configuration overrides. """ @abc.abstractmethod def configure(self, *args, **kwargs): """Configure this strategy. A strategy needs to be configured before it can be used. It would typically be configured by the ConfigurationManager. """ @abc.abstractmethod def exists(self, group_name, change_id): """Return whether a given revision exists. """ @abc.abstractmethod def apply(self, group_name, change_id, options): """Apply given options on the most current configuration revision. Update if a file with the same id already exists. :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string :param options Configuration changes. :type options dict """ @abc.abstractmethod def remove(self, group_name, change_id=None): """Rollback a given configuration override. Remove the whole group if 'change_id' is None. :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string """ @abc.abstractmethod def get(self, group_name, change_id=None): """Return the contents of a given configuration override :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string """ def parse_updates(self): """Return all updates applied to the base revision as a single dict. Return an empty dict if the base file is always the most current version of configuration. :returns: Updates to the base revision as a Python dict. """ return {} class ImportOverrideStrategy(ConfigurationOverrideStrategy): """Import strategy keeps overrides in separate files that get imported into the base configuration file which never changes itself. An override file is simply deleted when the override is removed. We keep two sets of override files in a separate directory. - User overrides - configuration overrides applied by the user via the Trove API. - System overrides - 'internal' configuration changes applied by the guestagent. The name format of override files is: '--.' where 'set prefix' is to used to order user/system sets, 'n' is an index used to keep track of the order in which overrides within their set got applied. """ FILE_NAME_PATTERN = r'%s-([0-9]+)-%s\.%s$' def __init__(self, revision_dir, revision_ext): """ :param revision_dir Path to the directory for import files. :type revision_dir string :param revision_ext Extension of revision files. :type revision_ext string """ self._revision_dir = revision_dir self._revision_ext = revision_ext def configure(self, base_config_path, owner, group, codec, requires_root): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration and revision files. :type owner string :param group Group of the configuration and revision files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the strategy requires superuser privileges. :type requires_root boolean """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root def exists(self, group_name, change_id): return self._find_revision_file(group_name, change_id) is not None def apply(self, group_name, change_id, options): self._initialize_import_directory() revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file( revision_file, codec=self._codec, as_root=self._requires_root) options = guestagent_utils.update_dict(options, current) operating_system.write_file( revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown( revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root) def _initialize_import_directory(self): """Lazy-initialize the directory for imported revision files. """ if not os.path.exists(self._revision_dir): operating_system.create_directory( self._revision_dir, user=self._owner, group=self._group, force=True, as_root=self._requires_root) def remove(self, group_name, change_id=None): removed = set() if change_id: # Remove a given file. revision_file = self._find_revision_file(group_name, change_id) if revision_file: removed.add(revision_file) else: # Remove the entire group. removed = self._collect_revision_files(group_name) for path in removed: operating_system.remove(path, force=True, as_root=self._requires_root) def get(self, group_name, change_id): revision_file = self._find_revision_file(group_name, change_id) return operating_system.read_file(revision_file, codec=self._codec, as_root=self._requires_root) def parse_updates(self): parsed_options = {} for path in self._collect_revision_files(): options = operating_system.read_file(path, codec=self._codec, as_root=self._requires_root) guestagent_utils.update_dict(options, parsed_options) return parsed_options @property def has_revisions(self): """Return True if there currently are any revision files. """ return (operating_system.exists( self._revision_dir, is_directory=True, as_root=self._requires_root) and (len(self._collect_revision_files()) > 0)) def _get_last_file_index(self, group_name): """Get the index of the most current file in a given group. """ current_files = self._collect_revision_files(group_name) if current_files: name_pattern = self._build_rev_name_pattern(group_name=group_name) last_file_name = os.path.basename(current_files[-1]) last_index_match = re.match(name_pattern, last_file_name) if last_index_match: return int(last_index_match.group(1)) return 0 def _collect_revision_files(self, group_name='.+'): """Collect and return a sorted list of paths to existing revision files. The files should be sorted in the same order in which they were applied. """ name_pattern = self._build_rev_name_pattern(group_name=group_name) return sorted(operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root)) def _find_revision_file(self, group_name, change_id): name_pattern = self._build_rev_name_pattern(group_name, change_id) found = operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root) return next(iter(found), None) def _build_rev_name_pattern(self, group_name='.+', change_id='.+'): return self.FILE_NAME_PATTERN % (group_name, change_id, self._revision_ext) class OneFileOverrideStrategy(ConfigurationOverrideStrategy): """This is a strategy for datastores that do not support multiple configuration files. It uses the Import Strategy to keep the overrides internally. When an override is applied or removed a new configuration file is generated by applying all changes on a saved-off base revision. """ BASE_REVISION_NAME = 'base' REVISION_EXT = 'rev' def __init__(self, revision_dir): """ :param revision_dir Path to the directory for import files. :type revision_dir string """ self._revision_dir = revision_dir self._import_strategy = ImportOverrideStrategy(revision_dir, self.REVISION_EXT) def configure(self, base_config_path, owner, group, codec, requires_root): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration and revision files. :type owner string :param group Group of the configuration and revision files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the strategy requires superuser privileges. :type requires_root boolean """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._base_revision_file = guestagent_utils.build_file_path( self._revision_dir, self.BASE_REVISION_NAME, self.REVISION_EXT) self._import_strategy.configure( base_config_path, owner, group, codec, requires_root) def exists(self, group_name, change_id): return self._import_strategy.exists(group_name, change_id) def apply(self, group_name, change_id, options): self._import_strategy.apply(group_name, change_id, options) self._regenerate_base_configuration() def remove(self, group_name, change_id=None): if self._import_strategy.has_revisions: self._import_strategy.remove(group_name, change_id=change_id) self._regenerate_base_configuration() if not self._import_strategy.has_revisions: # The base revision file is no longer needed if there are no # overrides. It will be regenerated based on the current # configuration file on the first 'apply()'. operating_system.remove(self._base_revision_file, force=True, as_root=self._requires_root) def get(self, group_name, change_id): return self._import_strategy.get(group_name, change_id) def _regenerate_base_configuration(self): """Gather all configuration changes and apply them in order on the base revision. Write the results to the configuration file. """ if not os.path.exists(self._base_revision_file): # Initialize the file with the current configuration contents if it # does not exist. operating_system.copy( self._base_config_path, self._base_revision_file, force=True, preserve=True, as_root=self._requires_root) base_revision = operating_system.read_file( self._base_revision_file, codec=self._codec, as_root=self._requires_root) changes = self._import_strategy.parse_updates() updated_revision = guestagent_utils.update_dict(changes, base_revision) operating_system.write_file( self._base_config_path, updated_revision, codec=self._codec, as_root=self._requires_root) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/common/guestagent_utils.py0000644000175000017500000001051600000000000024707 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import re import six from trove.common import pagination def update_dict(updates, target): """Recursively update a target dictionary with given updates. Updates are provided as a dictionary of key-value pairs where a value can also be a nested dictionary in which case its key is treated as a sub-section of the outer key. If a list value is encountered the update is applied iteratively on all its items. :returns: Will always return a dictionary of results (may be empty). """ if target is None: target = {} if isinstance(target, list): for index, item in enumerate(target): target[index] = update_dict(updates, item) return target if updates is not None: for k, v in updates.items(): if isinstance(v, collections.Mapping): target[k] = update_dict(v, target.get(k, {})) else: target[k] = updates[k] return target def expand_dict(target, namespace_sep='.'): """Expand a flat dict to a nested one. This is an inverse of 'flatten_dict'. :seealso: flatten_dict """ nested = {} for k, v in target.items(): sub = nested keys = k.split(namespace_sep) for key in keys[:-1]: sub = sub.setdefault(key, {}) sub[keys[-1]] = v return nested def flatten_dict(target, namespace_sep='.'): """Flatten a nested dict. Return a one-level dict with all sub-level keys joined by a namespace separator. The following nested dict: {'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}} would be flattened to: {'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10} """ def flatten(target, keys, namespace_sep): flattened = {} if isinstance(target, collections.Mapping): for k, v in target.items(): flattened.update( flatten(v, keys + [k], namespace_sep)) else: ns = namespace_sep.join(keys) flattened[ns] = target return flattened return flatten(target, [], namespace_sep) def build_file_path(base_dir, base_name, *extensions): """Build a path to a file in a given directory. The file may have an extension(s). :returns: Path such as: 'base_dir/base_name.ext1.ext2.ext3' """ file_name = os.extsep.join([base_name] + list(extensions)) return os.path.expanduser(os.path.join(base_dir, file_name)) def to_bytes(value): """Convert numbers with a byte suffix to bytes. """ if isinstance(value, six.string_types): pattern = re.compile(r'^(\d+)([K,M,G]{1})$') match = pattern.match(value) if match: value = match.group(1) suffix = match.group(2) factor = { 'K': 1024, 'M': 1024 ** 2, 'G': 1024 ** 3, }[suffix] return int(round(factor * float(value))) return value def paginate_list(li, limit=None, marker=None, include_marker=False): """Paginate a list of objects based on the name attribute. :returns: Page sublist and a marker (name of the last item). """ return pagination.paginate_object_list( li, 'name', limit=limit, marker=marker, include_marker=include_marker) def serialize_list(li, limit=None, marker=None, include_marker=False): """ Paginate (by name) and serialize a given object list. :returns: A serialized and paginated version of a given list. """ page, next_name = paginate_list(li, limit=limit, marker=marker, include_marker=include_marker) return [item.serialize() for item in page], next_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/common/operating_system.py0000644000175000017500000007324500000000000024725 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import operator import os import pwd import re import stat import tempfile from functools import reduce from oslo_concurrency.processutils import UnknownArgumentError from trove.common import exception from trove.common.i18n import _ from trove.common.stream_codecs import IdentityCodec from trove.common import utils REDHAT = 'redhat' DEBIAN = 'debian' SUSE = 'suse' def read_file(path, codec=IdentityCodec(), as_root=False, decode=True): """ Read a file into a Python data structure digestible by 'write_file'. :param path: Path to the read config file. :type path: string :param codec: A codec used to transform the data. :type codec: StreamCodec :param as_root: Execute as root. :type as_root: boolean :param decode: Should the codec decode the data. :type decode: boolean :returns: A dictionary of key-value pairs. :raises: :class:`UnprocessableEntity` if file doesn't exist. :raises: :class:`UnprocessableEntity` if codec not given. """ if path and exists(path, is_directory=False, as_root=as_root): if decode: open_flag = 'r' convert_func = codec.deserialize else: open_flag = 'rb' convert_func = codec.serialize if as_root: return _read_file_as_root(path, open_flag, convert_func) with open(path, open_flag) as fp: return convert_func(fp.read()) raise exception.UnprocessableEntity(_("File does not exist: %s") % path) def exists(path, is_directory=False, as_root=False): """Check a given path exists. :param path Path to be checked. :type path string :param is_directory: Check that the path exists and is a directory. Check for a regular file otherwise. :type is_directory: boolean :param as_root: Execute as root. :type as_root: boolean """ found = (not is_directory and os.path.isfile(path) or (is_directory and os.path.isdir(path))) # Only check as root if we can't see it as the regular user, since # this is more expensive if not found and as_root: test_flag = '-d' if is_directory else '-f' cmd = 'test %s %s && echo 1 || echo 0' % (test_flag, path) stdout, _ = utils.execute_with_timeout( cmd, shell=True, check_exit_code=False, run_as_root=True, root_helper='sudo') found = bool(int(stdout)) return found def find_executable(executable, path=None): """Finds a location of an executable in the locations listed in 'path' :param executable File to search. :type executable string :param path Lookup directories separated by a path separartor. :type path string """ if path is None: path = os.environ.get('PATH', os.defpath) dirs = path.split(os.pathsep) for directory in dirs: exec_path = os.path.join(directory, executable) if os.path.isfile(exec_path) and os.access(exec_path, os.X_OK): return exec_path return None def _read_file_as_root(path, open_flag, convert_func): """Read a file as root. :param path Path to the written file. :type path string :param open_flag: The flag for opening a file :type open_flag: string :param convert_func: The function for converting data. :type convert_func: callable """ with tempfile.NamedTemporaryFile(open_flag) as fp: copy(path, fp.name, force=True, dereference=True, as_root=True) chmod(fp.name, FileMode.ADD_READ_ALL(), as_root=True) return convert_func(fp.read()) def write_file(path, data, codec=IdentityCodec(), as_root=False, encode=True): """Write data into file using a given codec. Overwrite any existing contents. The written file can be read back into its original form by 'read_file'. :param path Path to the written config file. :type path string :param data: An object representing the file contents. :type data: object :param codec: A codec used to transform the data. :type codec: StreamCodec :param as_root: Execute as root. :type as_root: boolean :param encode: Should the codec encode the data. :type encode: boolean :raises: :class:`UnprocessableEntity` if path not given. """ if path: if encode: open_flag = 'w' convert_func = codec.serialize else: open_flag = 'wb' convert_func = codec.deserialize if as_root: _write_file_as_root(path, data, open_flag, convert_func) else: with open(path, open_flag) as fp: fp.write(convert_func(data)) fp.flush() else: raise exception.UnprocessableEntity(_("Invalid path: %s") % path) def _write_file_as_root(path, data, open_flag, convert_func): """Write a file as root. Overwrite any existing contents. :param path Path to the written file. :type path string :param data: An object representing the file contents. :type data: StreamCodec :param open_flag: The flag for opening a file :type open_flag: string :param convert_func: The function for converting data. :type convert_func: callable """ # The files gets removed automatically once the managing object goes # out of scope. with tempfile.NamedTemporaryFile(open_flag, delete=False) as fp: fp.write(convert_func(data)) fp.flush() fp.close() # Release the resource before proceeding. copy(fp.name, path, force=True, as_root=True) class FileMode(object): """ Represent file permissions (or 'modes') that can be applied on a filesystem path by functions such as 'chmod'. The way the modes get applied is generally controlled by the operation ('reset', 'add', 'remove') group to which they belong. All modes are represented as octal numbers. Modes are combined in a 'bitwise OR' (|) operation. Multiple modes belonging to a single operation are combined into a net value for that operation which can be retrieved by one of the 'get_*_mode' methods. Objects of this class are compared by the net values of their individual operations. :seealso: chmod :param reset: List of (octal) modes that will be set, other bits will be cleared. :type reset: list :param add: List of (octal) modes that will be added to the current mode. :type add: list :param remove: List of (octal) modes that will be removed from the current mode. :type remove: list """ @classmethod def SET_ALL_RWX(cls): return cls(reset=[stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO]) # =0777 @classmethod def SET_FULL(cls): return cls.SET_ALL_RWX() @classmethod def SET_GRP_RW_OTH_R(cls): return cls(reset=[stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH]) # =0064 @classmethod def SET_USR_RO(cls): return cls(reset=[stat.S_IRUSR]) # =0400 @classmethod def SET_USR_RW(cls): return cls(reset=[stat.S_IRUSR | stat.S_IWUSR]) # =0600 @classmethod def SET_USR_RWX(cls): return cls(reset=[stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR]) # =0700 @classmethod def ADD_ALL_R(cls): return cls(add=[stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH]) # +0444 @classmethod def ADD_READ_ALL(cls): return cls.ADD_ALL_R() @classmethod def ADD_USR_RW_GRP_RW(cls): return cls(add=[stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP]) # +0660 @classmethod def ADD_USR_RW_GRP_RW_OTH_R(cls): return cls(add=[stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH]) # +0664 @classmethod def ADD_GRP_RW(cls): return cls(add=[stat.S_IRGRP | stat.S_IWGRP]) # +0060 @classmethod def ADD_GRP_RX(cls): return cls(add=[stat.S_IRGRP | stat.S_IXGRP]) # +0050 @classmethod def ADD_GRP_RX_OTH_RX(cls): return cls(add=[stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH]) # +0055 def __init__(self, reset=None, add=None, remove=None): self._reset = list(reset) if reset is not None else [] self._add = list(add) if add is not None else [] self._remove = list(remove) if remove is not None else [] def get_reset_mode(self): """Get the net (combined) mode that will be set. """ return self._combine_modes(self._reset) def get_add_mode(self): """Get the net (combined) mode that will be added. """ return self._combine_modes(self._add) def get_remove_mode(self): """Get the net (combined) mode that will be removed. """ return self._combine_modes(self._remove) def _combine_modes(self, modes): return reduce(operator.or_, modes) if modes else None def has_any(self): """Check if any modes are specified. """ return bool(self._reset or self._add or self._remove) def __hash__(self): return hash((self.get_reset_mode(), self.get_add_mode(), self.get_remove_mode())) def __eq__(self, other): if other and isinstance(other, FileMode): if other is self: return True return (other.get_reset_mode() == self.get_reset_mode() and other.get_add_mode() == self.get_add_mode() and other.get_remove_mode() == self.get_remove_mode()) return False def __repr__(self): args = [] if self._reset: args.append('reset=[{:03o}]'.format(self.get_reset_mode())) if self._add: args.append('add=[{:03o}]'.format(self.get_add_mode())) if self._remove: args.append('remove=[{:03o}]'.format(self.get_remove_mode())) return 'Modes({:s})'.format(', '.join(args)) def get_os(): if os.path.isfile("/etc/redhat-release"): return REDHAT elif os.path.isfile("/etc/SuSE-release"): return SUSE else: return DEBIAN def file_discovery(file_candidates): for file in file_candidates: if os.path.isfile(file): return file return '' def start_service(service_candidates, **kwargs): _execute_service_command(service_candidates, 'cmd_start', **kwargs) def stop_service(service_candidates, **kwargs): _execute_service_command(service_candidates, 'cmd_stop', **kwargs) def enable_service_on_boot(service_candidates, **kwargs): _execute_service_command(service_candidates, 'cmd_enable', **kwargs) def disable_service_on_boot(service_candidates, **kwargs): _execute_service_command(service_candidates, 'cmd_disable', **kwargs) def _execute_service_command(service_candidates, command_key, **kwargs): """ :param service_candidates List of possible system service names. :type service_candidates list :param command_key One of the actions returned by 'service_discovery'. :type command_key string :param timeout: Number of seconds if specified, default if not. There is no timeout if set to None. :type timeout: integer :raises: :class:`UnknownArgumentError` if passed unknown args. :raises: :class:`UnprocessableEntity` if no candidate names given. :raises: :class:`RuntimeError` if command not found. """ exec_args = {} if 'timeout' in kwargs: exec_args['timeout'] = kwargs.pop('timeout') if kwargs: raise UnknownArgumentError(_("Got unknown keyword args: %r") % kwargs) if service_candidates: service = service_discovery(service_candidates) if command_key in service: utils.execute_with_timeout(service[command_key], shell=True, **exec_args) else: raise RuntimeError(_("Service control command not available: %s") % command_key) else: raise exception.UnprocessableEntity(_("Candidate service names not " "specified.")) def service_discovery(service_candidates): """ This function discovers how to start, stop, enable and disable services in the current environment. "service_candidates" is an array with possible system service names. Works for upstart, systemd, sysvinit. """ result = {} for service in service_candidates: result['service'] = service # check upstart if os.path.isfile("/etc/init/%s.conf" % service): result['type'] = 'upstart' # upstart returns error code when service already started/stopped result['cmd_start'] = "sudo start %s || true" % service result['cmd_stop'] = "sudo stop %s || true" % service result['cmd_enable'] = ("sudo sed -i '/^manual$/d' " "/etc/init/%s.conf" % service) result['cmd_disable'] = ("sudo sh -c 'echo manual >> " "/etc/init/%s.conf'" % service) break # check sysvinit if os.path.isfile("/etc/init.d/%s" % service): result['type'] = 'sysvinit' result['cmd_start'] = "sudo service %s start" % service result['cmd_stop'] = "sudo service %s stop" % service if os.path.isfile("/usr/sbin/update-rc.d"): result['cmd_enable'] = "sudo update-rc.d %s defaults; sudo " \ "update-rc.d %s enable" % (service, service) result['cmd_disable'] = "sudo update-rc.d %s defaults; sudo " \ "update-rc.d %s disable" % (service, service) elif os.path.isfile("/sbin/chkconfig"): result['cmd_enable'] = "sudo chkconfig %s on" % service result['cmd_disable'] = "sudo chkconfig %s off" % service break # check systemd service_path = "/lib/systemd/system/%s.service" % service if os.path.isfile(service_path): result['type'] = 'systemd' result['cmd_start'] = "sudo systemctl start %s" % service result['cmd_stop'] = "sudo systemctl stop %s" % service # currently "systemctl enable" doesn't work for symlinked units # as described in https://bugzilla.redhat.com/1014311, therefore # replacing a symlink with its real path if os.path.islink(service_path): real_path = os.path.realpath(service_path) unit_file_name = os.path.basename(real_path) result['cmd_enable'] = ("sudo systemctl enable %s" % unit_file_name) result['cmd_disable'] = ("sudo systemctl disable %s" % unit_file_name) else: result['cmd_enable'] = "sudo systemctl enable %s" % service result['cmd_disable'] = "sudo systemctl disable %s" % service break return result def create_directory(dir_path, user=None, group=None, force=True, **kwargs): """Create a given directory and update its ownership (recursively) to the given user and group if any. seealso:: _execute_shell_cmd for valid optional keyword arguments. :param dir_path: Path to the created directory. :type dir_path: string :param user: Owner. :type user: string :param group: Group. :type group: string :param force: No error if existing, make parent directories as needed. :type force: boolean :raises: :class:`UnprocessableEntity` if dir_path not given. """ if dir_path: _create_directory(dir_path, force, **kwargs) if user or group: chown(dir_path, user, group, **kwargs) else: raise exception.UnprocessableEntity( _("Cannot create a blank directory.")) def chown(path, user, group, recursive=True, force=False, **kwargs): """Changes the owner and group of a given file. seealso:: _execute_shell_cmd for valid optional keyword arguments. :param path: Path to the modified file. :type path: string :param user: Owner. :type user: string :param group: Group. :type group: string :param recursive: Operate on files and directories recursively. :type recursive: boolean :param force: Suppress most error messages. :type force: boolean :raises: :class:`UnprocessableEntity` if path not given. :raises: :class:`UnprocessableEntity` if owner/group not given. """ if not path: raise exception.UnprocessableEntity( _("Cannot change ownership of a blank file or directory.")) if not user and not group: raise exception.UnprocessableEntity( _("Please specify owner or group, or both.")) owner_group_modifier = _build_user_group_pair(user, group) options = (('f', force), ('R', recursive)) _execute_shell_cmd('chown', options, owner_group_modifier, path, **kwargs) def _build_user_group_pair(user, group): return "%s:%s" % tuple((v if v else '') for v in (user, group)) def _create_directory(dir_path, force=True, **kwargs): """Create a given directory. :param dir_path: Path to the created directory. :type dir_path: string :param force: No error if existing, make parent directories as needed. :type force: boolean """ options = (('p', force),) _execute_shell_cmd('mkdir', options, dir_path, **kwargs) def chmod(path, mode, recursive=True, force=False, **kwargs): """Changes the mode of a given file. :seealso: Modes for more information on the representation of modes. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param path: Path to the modified file. :type path: string :param mode: File permissions (modes). The modes will be applied in the following order: reset (=), add (+), remove (-) :type mode: FileMode :param recursive: Operate on files and directories recursively. :type recursive: boolean :param force: Suppress most error messages. :type force: boolean :raises: :class:`UnprocessableEntity` if path not given. :raises: :class:`UnprocessableEntity` if no mode given. """ if path: options = (('f', force), ('R', recursive)) shell_modes = _build_shell_chmod_mode(mode) _execute_shell_cmd('chmod', options, shell_modes, path, **kwargs) else: raise exception.UnprocessableEntity( _("Cannot change mode of a blank file.")) def change_user_group(user, group, append=True, add_group=True, **kwargs): """Adds a user to groups by using the usermod linux command with -a and -G options. seealso:: _execute_shell_cmd for valid optional keyword arguments. :param user: Username. :type user: string :param group: Group names. :type group: comma separated string :param append: Adds user to a group. :type append: boolean :param add_group: Lists the groups that the user is a member of. While adding a new groups to an existing user with '-G' option alone, will remove all existing groups that user belongs. Therefore, always add the '-a' (append) with '-G' option to add or append new groups. :type add_group: boolean :raises: :class:`UnprocessableEntity` if user or group not given. """ if not user: raise exception.UnprocessableEntity(_("Missing user.")) elif not group: raise exception.UnprocessableEntity(_("Missing group.")) options = (('a', append), ('G', add_group)) _execute_shell_cmd('usermod', options, group, user, **kwargs) def _build_shell_chmod_mode(mode): """ Build a shell representation of given mode. :seealso: Modes for more information on the representation of modes. :param mode: File permissions (modes). :type mode: FileModes :raises: :class:`UnprocessableEntity` if no mode given. :returns: Following string for any non-empty modes: '=,+,-' """ # Handle methods passed in as constant fields. if inspect.ismethod(mode): mode = mode() if mode and mode.has_any(): text_modes = (('=', mode.get_reset_mode()), ('+', mode.get_add_mode()), ('-', mode.get_remove_mode())) return ','.join( ['{0:s}{1:03o}'.format(item[0], item[1]) for item in text_modes if item[1]]) else: raise exception.UnprocessableEntity(_("No file mode specified.")) def remove(path, force=False, recursive=True, **kwargs): """Remove a given file or directory. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param path: Path to the removed file. :type path: string :param force: Ignore nonexistent files. :type force: boolean :param recursive: Remove directories and their contents recursively. :type recursive: boolean :raises: :class:`UnprocessableEntity` if path not given. """ if path: options = (('f', force), ('R', recursive)) _execute_shell_cmd('rm', options, path, **kwargs) else: raise exception.UnprocessableEntity(_("Cannot remove a blank file.")) def move(source, destination, force=False, **kwargs): """Move a given file or directory to a new location. Move attempts to preserve the original ownership, permissions and timestamps. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param source: Path to the source location. :type source: string :param destination: Path to the destination location. :type destination: string :param force: Do not prompt before overwriting. :type force: boolean :raises: :class:`UnprocessableEntity` if source or destination not given. """ if not source: raise exception.UnprocessableEntity(_("Missing source path.")) elif not destination: raise exception.UnprocessableEntity(_("Missing destination path.")) options = (('f', force),) _execute_shell_cmd('mv', options, source, destination, **kwargs) def copy(source, destination, force=False, preserve=False, recursive=True, dereference=False, **kwargs): """Copy a given file or directory to another location. Copy does NOT attempt to preserve ownership, permissions and timestamps unless the 'preserve' option is enabled. :seealso: _execute_shell_cmd for valid optional keyword arguments. :param source: Path to the source location. :type source: string :param destination: Path to the destination location. :type destination: string :param force: If an existing destination file cannot be opened, remove it and try again. :type force: boolean :param preserve: Preserve mode, ownership and timestamps. :type preserve: boolean :param recursive: Copy directories recursively. :type recursive: boolean :param dereference: Follow symbolic links when copying from them. :type dereference: boolean :raises: :class:`UnprocessableEntity` if source or destination not given. """ if not source: raise exception.UnprocessableEntity(_("Missing source path.")) elif not destination: raise exception.UnprocessableEntity(_("Missing destination path.")) options = (('f', force), ('p', preserve), ('R', recursive), ('L', dereference)) _execute_shell_cmd('cp', options, source, destination, **kwargs) def get_bytes_free_on_fs(path): """ Returns the number of bytes free for the filesystem that path is on """ v = os.statvfs(path) return v.f_bsize * v.f_bavail def list_files_in_directory(root_dir, recursive=False, pattern=None, include_dirs=False, as_root=False): """ Return absolute paths to all files in a given root directory. :param root_dir Path to the root directory. :type root_dir string :param recursive Also descend into sub-directories if True. :type recursive boolean :param pattern Return only names matching the pattern. :type pattern string :param include_dirs Include paths to individual sub-directories. :type include_dirs boolean """ if as_root: cmd_args = [root_dir, '-noleaf'] if not recursive: cmd_args.extend(['-maxdepth', '0']) if not include_dirs: cmd_args.extend(['-type', 'f']) if pattern: cmd_args.extend(['-regextype', 'posix-extended', '-regex', os.path.join('.*', pattern) + '$']) files = _execute_shell_cmd('find', [], *cmd_args, as_root=True) return {fp for fp in files.splitlines()} return {os.path.abspath(os.path.join(root, name)) for (root, dirs, files) in os.walk(root_dir, topdown=True) if recursive or (root == root_dir) for name in (files + (dirs if include_dirs else [])) if not pattern or re.match(pattern, name)} def _execute_shell_cmd(cmd, options, *args, **kwargs): """Execute a given shell command passing it given options (flags) and arguments. Takes optional keyword arguments: :param as_root: Execute as root. :type as_root: boolean :param timeout: Number of seconds if specified, default if not. There is no timeout if set to None. :type timeout: integer :raises: class:`UnknownArgumentError` if passed unknown args. """ exec_args = {} if kwargs.pop('as_root', False): exec_args['run_as_root'] = True exec_args['root_helper'] = 'sudo' if 'timeout' in kwargs: exec_args['timeout'] = kwargs.pop('timeout') if kwargs: raise UnknownArgumentError(_("Got unknown keyword args: %r") % kwargs) cmd_flags = _build_command_options(options) cmd_args = cmd_flags + list(args) stdout, stderr = utils.execute_with_timeout(cmd, *cmd_args, **exec_args) return stdout def _build_command_options(options): """Build a list of flags from given pairs (option, is_enabled). Each option is prefixed with a single '-'. Include only options for which is_enabled=True. """ return ['-' + item[0] for item in options if item[1]] def get_device(path, as_root=False): """Get the device that a given path exists on.""" stdout = _execute_shell_cmd('df', [], path, as_root=as_root) return stdout.splitlines()[1].split()[0] def is_mount(path): """Check if the given directory path is a mountpoint. Try the standard ismount first. This fails if the path is not accessible though, so resort to checking as the root user (which is slower). """ if os.access(path, os.R_OK): return os.path.ismount(path) if not exists(path, is_directory=True, as_root=True): return False directory_dev = get_device(path, as_root=True) parent_dev = get_device(os.path.join(path, '..'), as_root=True) return directory_dev != parent_dev def get_current_user(): """Returns name of the current OS user""" return pwd.getpwuid(os.getuid())[0] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/common/sql_query.py0000644000175000017500000002662000000000000023350 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Intermediary class for building SQL queries for use by the guest agent. Do not hard-code strings into the guest agent; use this module to build them for you. """ class Query(object): def __init__(self, columns=None, tables=None, where=None, order=None, group=None, limit=None): self.columns = columns or [] self.tables = tables or [] self.where = where or [] self.order = order or [] self.group = group or [] self.limit = limit def __repr__(self): return str(self) @property def _columns(self): if not self.columns: return "SELECT *" return "SELECT %s" % (", ".join(self.columns)) @property def _tables(self): return "FROM %s" % (", ".join(self.tables)) @property def _where(self): if not self.where: return "" return "WHERE %s" % (" AND ".join(self.where)) @property def _order(self): if not self.order: return "" return "ORDER BY %s" % (", ".join(self.order)) @property def _group_by(self): if not self.group: return "" return "GROUP BY %s" % (", ".join(self.group)) @property def _limit(self): if not self.limit: return "" return "LIMIT %s" % str(self.limit) def __str__(self): query = [ self._columns, self._tables, self._where, self._order, self._group_by, self._limit, ] query = [q for q in query if q] return " ".join(query) + ";" class Grant(object): PERMISSIONS = ["ALL", "ALL PRIVILEGES", "ALTER ROUTINE", "ALTER", "CREATE ROUTINE", "CREATE TEMPORARY TABLES", "CREATE USER", "CREATE VIEW", "CREATE", "DELETE", "DROP", "EVENT", "EXECUTE", "FILE", "INDEX", "INSERT", "LOCK TABLES", "PROCESS", "REFERENCES", "RELOAD", "REPLICATION CLIENT", "REPLICATION SLAVE", "SELECT", "SHOW DATABASES", "SHOW VIEW", "SHUTDOWN", "SUPER", "TRIGGER", "UPDATE", "USAGE", ] def __init__(self, permissions=None, database=None, table=None, user=None, host=None, clear=None, hashed=None, grant_option=False): self.permissions = permissions or [] self.database = database self.table = table self.user = user self.host = host self.clear = clear self.hashed = hashed self.grant_option = grant_option def __repr__(self): return str(self) @property def _permissions(self): if not self.permissions: return "USAGE" if "ALL" in self.permissions: return "ALL PRIVILEGES" if "ALL PRIVILEGES" in self.permissions: return "ALL PRIVILEGES" filtered = [perm for perm in set(self.permissions) if perm in self.PERMISSIONS] return ", ".join(sorted(filtered)) @property def _database(self): if not self.database: return "*" return "`%s`" % self.database @property def _table(self): if self.table: return "'%s'" % self.table return "*" @property def _user(self): return self.user or "" @property def _identity(self): if self.clear: return "IDENTIFIED BY '%s'" % self.clear if self.hashed: return "IDENTIFIED BY PASSWORD '%s'" % self.hashed return "" @property def _host(self): return self.host or "%" @property def _user_host(self): return "`%s`@`%s`" % (self._user, self._host) @property def _what(self): # Permissions to be granted to the user. return "GRANT %s" % self._permissions @property def _where(self): # Database and table to which the user is granted permissions. return "ON %s.%s" % (self._database, self._table) @property def _whom(self): # User and host to be granted permission. Optionally, password, too. whom = [("TO %s" % self._user_host), self._identity, ] whom = [w for w in whom if w] return " ".join(whom) @property def _with(self): clauses = [] if self.grant_option: clauses.append("GRANT OPTION") if not clauses: return "" return "WITH %s" % ", ".join(clauses) def __str__(self): query = [self._what, self._where, self._whom, self._with, ] query = [q for q in query if q] return " ".join(query) + ";" class Revoke(Grant): def __init__(self, permissions=None, database=None, table=None, user=None, host=None, clear=None, hashed=None): self.permissions = permissions or [] self.database = database self.table = table self.user = user self.host = host self.clear = clear self.hashed = hashed def __str__(self): query = [self._what, self._where, self._whom, ] query = [q for q in query if q] return " ".join(query) + ";" @property def _permissions(self): if not self.permissions: return "ALL" if "ALL" in self.permissions: return "ALL" if "ALL PRIVILEGES" in self.permissions: return "ALL" filtered = [perm for perm in self.permissions if perm in self.PERMISSIONS] return ", ".join(sorted(filtered)) @property def _what(self): # Permissions to be revoked from the user. return "REVOKE %s" % self._permissions @property def _whom(self): # User and host from whom to revoke permission. # Optionally, password, too. whom = [("FROM %s" % self._user_host), self._identity, ] whom = [w for w in whom if w] return " ".join(whom) class CreateDatabase(object): def __init__(self, database, charset=None, collate=None): self.database = database self.charset = charset self.collate = collate def __repr__(self): return str(self) @property def _charset(self): if not self.charset: return "" return "CHARACTER SET = '%s'" % self.charset @property def _collate(self): if not self.collate: return "" return "COLLATE = '%s'" % self.collate def __str__(self): query = [("CREATE DATABASE IF NOT EXISTS `%s`" % self.database), self._charset, self._collate, ] query = [q for q in query if q] return " ".join(query) + ";" class DropDatabase(object): def __init__(self, database): self.database = database def __repr__(self): return str(self) def __str__(self): return "DROP DATABASE `%s`;" % self.database class CreateUser(object): def __init__(self, user, host=None, clear=None, hashed=None): self.user = user self.host = host self.clear = clear # A clear password self.hashed = hashed # A hashed password def __repr__(self): return str(self) @property def keyArgs(self): return {'user': self.user, 'host': self._host, } @property def _host(self): if not self.host: return "%" return self.host @property def _identity(self): if self.clear: return "IDENTIFIED BY '%s'" % self.clear if self.hashed: return "IDENTIFIED BY PASSWORD '%s'" % self.hashed return "" def __str__(self): query = ["CREATE USER :user@:host", self._identity, ] query = [q for q in query if q] return " ".join(query) + ";" class RenameUser(object): def __init__(self, user, host=None, new_user=None, new_host=None): self.user = user self.host = host or '%' self.new_user = new_user self.new_host = new_host def __repr__(self): return str(self) def __str__(self): properties = {'old_name': self.user, 'old_host': self.host, 'new_name': self.new_user or self.user, 'new_host': self.new_host or self.host} return ("RENAME USER '%(old_name)s'@'%(old_host)s' TO " "'%(new_name)s'@'%(new_host)s';" % properties) class SetPassword(object): def __init__(self, user, host=None, new_password=None): self.user = user self.host = host or '%' self.new_password = new_password or '' def __repr__(self): return str(self) def __str__(self): properties = {'user_name': self.user, 'user_host': self.host, 'new_password': self.new_password} return ("SET PASSWORD FOR '%(user_name)s'@'%(user_host)s' = " "PASSWORD('%(new_password)s');" % properties) class DropUser(object): def __init__(self, user, host='%'): self.user = user self.host = host def __repr__(self): return str(self) def __str__(self): return "DROP USER `%s`@`%s`;" % (self.user, self.host) class SetServerVariable(object): def __init__(self, key, value): self.key = key self.value = value def __repr__(self): return str(self) def __str__(self): if self.value is True: return "SET GLOBAL %s=%s" % (self.key, 1) elif self.value is False: return "SET GLOBAL %s=%s" % (self.key, 0) elif self.value is None: return "SET GLOBAL %s" % (self.key) elif isinstance(self.value, str): return "SET GLOBAL %s='%s'" % (self.key, self.value) else: return "SET GLOBAL %s=%s" % (self.key, self.value) # Miscellaneous queries that need no parameters. FLUSH = "FLUSH PRIVILEGES;" ROOT_ENABLED = ("SELECT User FROM mysql.user " "WHERE User = 'root' AND Host != 'localhost';") REMOVE_ANON = "DELETE FROM mysql.user WHERE User = '';" REMOVE_ROOT = ("DELETE FROM mysql.user " "WHERE User = 'root' AND Host != 'localhost';") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/0000755000175000017500000000000000000000000021442 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/__init__.py0000644000175000017500000000000000000000000023541 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/0000755000175000017500000000000000000000000024137 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/__init__.py0000644000175000017500000000000000000000000026236 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/cassandra/0000755000175000017500000000000000000000000026076 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/cassandra/__init__.py0000644000175000017500000000000000000000000030175 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/cassandra/manager.py0000644000175000017500000003267300000000000030075 0ustar00coreycorey00000000000000# Copyright 2013 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from oslo_log import log as logging from trove.common import cfg from trove.common import instance as trove_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import service from trove.guestagent.datastore import manager from trove.guestagent import guest_log from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(manager.Manager): GUEST_LOG_DEFS_SYSTEM_LABEL = 'system' def __init__(self, manager_name='cassandra'): super(Manager, self).__init__(manager_name) self._app = None self._admin = None @property def status(self): return self.app.status @property def app(self): if self._app is None: self._app = self.build_app() return self._app def build_app(self): return service.CassandraApp() @property def admin(self): if self._admin is None: self._admin = self.app.build_admin() return self._admin @property def configuration_manager(self): return self.app.configuration_manager def get_datastore_log_defs(self): system_log_file = self.validate_log_file( self.app.cassandra_system_log_file, self.app.cassandra_owner) return { self.GUEST_LOG_DEFS_SYSTEM_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: self.app.cassandra_owner, self.GUEST_LOG_FILE_LABEL: system_log_file } } def guest_log_enable(self, context, log_name, disable): if disable: LOG.debug("Disabling system log.") self.app.set_logging_level('OFF') else: log_level = CONF.get(self.manager_name).get('system_log_level') LOG.debug("Enabling system log with logging level: %s", log_level) self.app.set_logging_level(log_level) return False def restart(self, context): self.app.restart() def start_db_with_conf_changes(self, context, config_contents): self.app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def reset_configuration(self, context, configuration): self.app.reset_configuration(configuration) def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) self.app.init_storage_structure(mount_point) if config_contents or device_path or backup_info: # FIXME(pmalik) Once the cassandra bug # https://issues.apache.org/jira/browse/CASSANDRA-2356 # is fixed, this code may have to be revisited. # # Cassandra generates system keyspaces on the first start. # The stored properties include the 'cluster_name', which once # saved cannot be easily changed without removing the system # tables. It is crucial that the service does not boot up in # the middle of the configuration procedure. # We wait here for the service to come up, stop it properly and # remove the generated keyspaces before proceeding with # configuration. If it does not start up within the time limit # we assume it is not going to and proceed with configuration # right away. LOG.debug("Waiting for database first boot.") if (self.app.status.wait_for_real_status_to_change_to( trove_instance.ServiceStatuses.RUNNING, CONF.state_change_wait_time, False)): LOG.debug("Stopping database prior to initial configuration.") self.app.stop_db() self.app._remove_system_tables() LOG.debug("Starting initial configuration.") if config_contents: LOG.debug("Applying configuration.") self.app.configuration_manager.save_configuration( config_contents) cluster_name = None if cluster_config: cluster_name = cluster_config.get('id', None) self.app.apply_initial_guestagent_configuration( cluster_name=cluster_name) if cluster_config: self.app.write_cluster_topology( cluster_config['dc'], cluster_config['rack'], prefer_local=True) if device_path: LOG.debug("Preparing data volume.") device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync exiting data LOG.debug("Migrating existing data.") device.migrate_data(mount_point) # mount the volume LOG.debug("Mounting new volume.") device.mount(mount_point) if not cluster_config: if backup_info: self._perform_restore(backup_info, context, mount_point) LOG.debug("Starting database with configuration changes.") self.app.start_db(update_db=False) if not self.app.has_user_config(): LOG.debug("Securing superuser access.") self.app.secure() self.app.restart() self._admin = self.app.build_admin() if not cluster_config and self.is_root_enabled(context): self.status.report_root(context) def pre_upgrade(self, context): data_dir = self.app.cassandra_data_dir mount_point, _data = os.path.split(data_dir) save_etc_dir = "%s/etc" % mount_point home_save = "%s/trove_user" % mount_point self.app.status.begin_restart() self.app.drain() self.app.stop_db() operating_system.copy("%s/." % self.app.cassandra_conf_dir, save_etc_dir, preserve=True, as_root=True) operating_system.copy("%s/." % os.path.expanduser('~'), home_save, preserve=True, as_root=True) self.unmount_volume(context, mount_point=mount_point) return { 'mount_point': mount_point, 'save_etc_dir': save_etc_dir, 'home_save': home_save } def post_upgrade(self, context, upgrade_info): self.app.stop_db() if 'device' in upgrade_info: self.mount_volume(context, mount_point=upgrade_info['mount_point'], device_path=upgrade_info['device'], write_to_fstab=True) operating_system.chown(path=upgrade_info['mount_point'], user=self.app.cassandra_owner, group=self.app.cassandra_owner, recursive=True, as_root=True) self._restore_home_directory(upgrade_info['home_save']) self._restore_directory(upgrade_info['save_etc_dir'], self.app.cassandra_conf_dir) self._reset_app() self.app.start_db() self.app.upgrade_sstables() self.app.status.end_restart() def change_passwords(self, context, users): with EndNotification(context): self.admin.change_passwords(context, users) def update_attributes(self, context, username, hostname, user_attrs): with EndNotification(context): self.admin.update_attributes(context, username, hostname, user_attrs) def create_database(self, context, databases): with EndNotification(context): self.admin.create_database(context, databases) def create_user(self, context, users): with EndNotification(context): self.admin.create_user(context, users) def delete_database(self, context, database): with EndNotification(context): self.admin.delete_database(context, database) def delete_user(self, context, user): with EndNotification(context): self.admin.delete_user(context, user) def get_user(self, context, username, hostname): return self.admin.get_user(context, username, hostname) def grant_access(self, context, username, hostname, databases): self.admin.grant_access(context, username, hostname, databases) def revoke_access(self, context, username, hostname, database): self.admin.revoke_access(context, username, hostname, database) def list_access(self, context, username, hostname): return self.admin.list_access(context, username, hostname) def list_databases(self, context, limit=None, marker=None, include_marker=False): return self.admin.list_databases(context, limit, marker, include_marker) def list_users(self, context, limit=None, marker=None, include_marker=False): return self.admin.list_users(context, limit, marker, include_marker) def enable_root(self, context): return self.app.enable_root() def enable_root_with_password(self, context, root_password=None): return self.app.enable_root(root_password=root_password) def disable_root(self, context): self.app.enable_root(root_password=None) def is_root_enabled(self, context): return self.app.is_root_enabled() def _perform_restore(self, backup_info, context, restore_location): LOG.info("Restoring database from backup %s.", backup_info['id']) try: backup.restore(context, backup_info, restore_location) self.app._apply_post_restore_updates(backup_info) except Exception as e: LOG.error(e) LOG.error("Error performing restore from backup %s.", backup_info['id']) self.app.status.set_status(trove_instance.ServiceStatuses.FAILED) raise LOG.info("Restored database successfully.") def create_backup(self, context, backup_info): """ Entry point for initiating a backup for this instance. The call currently blocks guestagent until the backup is finished. :param backup_info: a dictionary containing the db instance id of the backup task, location, type, and other data. """ with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self.app.remove_overrides() else: self.app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): """Configuration changes are made in the config YAML file and require restart, so this is a no-op. """ pass def get_data_center(self, context): return self.app.get_data_center() def get_rack(self, context): return self.app.get_rack() def set_seeds(self, context, seeds): self.app.set_seeds(seeds) def get_seeds(self, context): return self.app.get_seeds() def set_auto_bootstrap(self, context, enabled): self.app.set_auto_bootstrap(enabled) def node_cleanup_begin(self, context): self.app.node_cleanup_begin() def node_cleanup(self, context): self.app.node_cleanup() def node_decommission(self, context): self.app.node_decommission() def cluster_secure(self, context, password): os_admin = self.app.cluster_secure(password) self._admin = self.app.build_admin() return os_admin def get_admin_credentials(self, context): return self.app.get_admin_credentials() def store_admin_credentials(self, context, admin_credentials): self.app.store_admin_credentials(admin_credentials) self._admin = self.app.build_admin() def _reset_app(self): """ A function for reseting app and admin properties. It is useful when we want to force reload application. Possible usages: loading new configuration files, loading new datastore password """ self._app = None self._admin = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/cassandra/service.py0000644000175000017500000014535700000000000030127 0ustar00coreycorey00000000000000# Copyright 2013 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import stat from cassandra.auth import PlainTextAuthProvider from cassandra.cluster import Cluster from cassandra.cluster import NoHostAvailable from cassandra import OperationTimedOut from cassandra.policies import ConstantReconnectionPolicy from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common.db.cassandra import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.stream_codecs import IniCodec from trove.common.stream_codecs import PropertiesCodec from trove.common.stream_codecs import SafeYamlCodec from trove.common.stream_codecs import XmlCodec from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore import service from trove.guestagent import pkg LOG = logging.getLogger(__name__) CONF = cfg.CONF packager = pkg.Package() class CassandraApp(object): """Prepares DBaaS on a Guest container.""" _ADMIN_USER = 'os_admin' _CONF_AUTH_SEC = 'authentication' _CONF_USR_KEY = 'username' _CONF_PWD_KEY = 'password' _CONF_DIR_MODS = stat.S_IRWXU _CONF_FILE_MODS = stat.S_IRUSR CASSANDRA_CONF_FILE = "cassandra.yaml" CASSANDRA_TOPOLOGY_FILE = 'cassandra-rackdc.properties' CASSANDRA_LOGBACK_FILE = "logback.xml" _TOPOLOGY_CODEC = PropertiesCodec( delimiter='=', unpack_singletons=True, string_mappings={ 'true': True, 'false': False}) CASSANDRA_KILL_CMD = "sudo killall java || true" def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time self.status = CassandraAppStatus(self.get_current_superuser()) revision_dir = guestagent_utils.build_file_path( os.path.dirname(self.cassandra_conf), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( self.cassandra_conf, self.cassandra_owner, self.cassandra_owner, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) lb_revision_dir = guestagent_utils.build_file_path( os.path.dirname(self.cassandra_logback), 'logback-overrides') self.logback_conf_manager = ConfigurationManager( self.cassandra_logback, self.cassandra_owner, self.cassandra_owner, XmlCodec(), requires_root=True, override_strategy=OneFileOverrideStrategy(lb_revision_dir)) @property def service_candidates(self): return ['cassandra'] @property def cassandra_conf_dir(self): return { operating_system.REDHAT: "/etc/cassandra/default.conf/", operating_system.DEBIAN: "/etc/cassandra/", operating_system.SUSE: "/etc/cassandra/default.conf/" }[operating_system.get_os()] @property def cassandra_conf(self): return guestagent_utils.build_file_path(self.cassandra_conf_dir, self.CASSANDRA_CONF_FILE) @property def cassandra_topology(self): return guestagent_utils.build_file_path(self.cassandra_conf_dir, self.CASSANDRA_TOPOLOGY_FILE) @property def cassandra_owner(self): return 'cassandra' @property def cassandra_data_dir(self): return guestagent_utils.build_file_path( self.cassandra_working_dir, 'data') @property def cassandra_working_dir(self): return "/var/lib/cassandra" @property def cassandra_system_log_file(self): return guestagent_utils.build_file_path( self.cassandra_log_dir, 'system', 'log') @property def cassandra_log_dir(self): return "/var/log/cassandra" @property def cassandra_logback(self): return guestagent_utils.build_file_path(self.cassandra_conf_dir, self.CASSANDRA_LOGBACK_FILE) @property def default_superuser_password(self): return "cassandra" @property def default_superuser_pwd_hash(self): # Default 'salted_hash' value for 'cassandra' user on Cassandra 2.1. return "$2a$10$wPEVuXBU7WE2Uwzqq3t19ObRJyoKztzC/Doyfr0VtDmVXC4GDAV3e" @property def cqlsh_conf_path(self): return "~/.cassandra/cqlshrc" def build_admin(self): return CassandraAdmin(self.get_current_superuser()) def install_if_needed(self, packages): """Prepare the guest machine with a Cassandra server installation.""" LOG.info("Preparing Guest as a Cassandra Server") if not packager.pkg_is_installed(packages): self._install_db(packages) LOG.debug("Cassandra install_if_needed complete") def init_storage_structure(self, mount_point): try: operating_system.create_directory(mount_point, as_root=True) except exception.ProcessExecutionError: LOG.exception("Error while initiating storage structure.") def start_db(self, update_db=False, enable_on_boot=True): self.status.start_db_service( self.service_candidates, self.state_change_wait_time, enable_on_boot=enable_on_boot, update_db=update_db) def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( self.service_candidates, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( self.service_candidates, self.state_change_wait_time) def _install_db(self, packages): """Install Cassandra server""" LOG.debug("Installing Cassandra server.") packager.pkg_install(packages, None, 10000) LOG.debug("Finished installing Cassandra server") def _remove_system_tables(self): """ Clean up the system keyspace. System tables are initialized on the first boot. They store certain properties, such as 'cluster_name', that cannot be easily changed once afterwards. The system keyspace needs to be cleaned up first. The tables will be regenerated on the next startup. Make sure to also cleanup the commitlog and caches to avoid startup errors due to inconsistencies. The service should not be running at this point. """ if self.status.is_running: raise RuntimeError(_("Cannot remove system tables. " "The service is still running.")) LOG.info('Removing existing system tables.') system_keyspace_dir = guestagent_utils.build_file_path( self.cassandra_data_dir, 'system') commitlog_file = guestagent_utils.build_file_path( self.cassandra_working_dir, 'commitlog') chaches_dir = guestagent_utils.build_file_path( self.cassandra_working_dir, 'saved_caches') operating_system.remove(system_keyspace_dir, force=True, recursive=True, as_root=True) operating_system.remove(commitlog_file, force=True, recursive=True, as_root=True) operating_system.remove(chaches_dir, force=True, recursive=True, as_root=True) operating_system.create_directory( system_keyspace_dir, user=self.cassandra_owner, group=self.cassandra_owner, force=True, as_root=True) operating_system.create_directory( commitlog_file, user=self.cassandra_owner, group=self.cassandra_owner, force=True, as_root=True) operating_system.create_directory( chaches_dir, user=self.cassandra_owner, group=self.cassandra_owner, force=True, as_root=True) def _apply_post_restore_updates(self, backup_info): """The service should not be running at this point. The restored database files carry some properties over from the original instance that need to be updated with appropriate values for the new instance. These include: - Reset the 'cluster_name' property to match the new unique ID of this instance. This is to ensure that the restored instance is a part of a new single-node cluster rather than forming a one with the original node. - Reset the administrator's password. The original password from the parent instance may be compromised or long lost. A general procedure is: - update the configuration property with the current value so that the service can start up - reset the superuser password - restart the service - change the cluster name - restart the service :seealso: _reset_admin_password :seealso: change_cluster_name """ if self.status.is_running: raise RuntimeError(_("Cannot reset the cluster name. " "The service is still running.")) LOG.debug("Applying post-restore updates to the database.") try: # Change the 'cluster_name' property to the current in-database # value so that the database can start up. self._update_cluster_name_property(backup_info['instance_id']) # Reset the superuser password so that we can log-in. self._reset_admin_password() # Start the database and update the 'cluster_name' to the # new value. self.start_db(update_db=False) self.change_cluster_name(CONF.guest_id) finally: self.stop_db() # Always restore the initial state of the service. def cluster_secure(self, password): return self.secure(password=password).serialize() def secure(self, update_user=None, password=None): """Configure the Trove administrative user. Update an existing user if given. Create a new one using the default database credentials otherwise and drop the built-in user when finished. """ LOG.info('Configuring Trove superuser.') if password is None: password = utils.generate_random_password() admin_username = update_user.name if update_user else self._ADMIN_USER os_admin = models.CassandraUser(admin_username, password) if update_user: CassandraAdmin(update_user).alter_user_password(os_admin) else: cassandra = models.CassandraUser( models.CassandraUser.root_username, self.default_superuser_password) CassandraAdmin(cassandra)._create_superuser(os_admin) CassandraAdmin(os_admin).drop_user(cassandra) self._update_admin_credentials(os_admin) return os_admin def _update_admin_credentials(self, user): self.__create_cqlsh_config({self._CONF_AUTH_SEC: {self._CONF_USR_KEY: user.name, self._CONF_PWD_KEY: user.password}}) # Update the internal status with the new user. self.status = CassandraAppStatus(user) def store_admin_credentials(self, admin_credentials): user = models.CassandraUser.deserialize(admin_credentials) self._update_admin_credentials(user) def get_admin_credentials(self): return self.get_current_superuser().serialize() def _reset_admin_password(self): """ Reset the password of the Trove's administrative superuser. The service should not be running at this point. A general password reset procedure is: - disable user authentication and remote access - restart the service - update the password in the 'system_auth.credentials' table - re-enable authentication and make the host reachable - restart the service """ if self.status.is_running: raise RuntimeError(_("Cannot reset the administrative password. " "The service is still running.")) try: # Disable automatic startup in case the node goes down before # we have the superuser secured. operating_system.disable_service_on_boot(self.service_candidates) self.__disable_remote_access() self.__disable_authentication() # We now start up the service and immediately re-enable # authentication in the configuration file (takes effect after # restart). # Then we reset the superuser password to its default value # and restart the service to get user functions back. self.start_db(update_db=False, enable_on_boot=False) self.__enable_authentication() os_admin = self.__reset_user_password_to_default(self._ADMIN_USER) self.status = CassandraAppStatus(os_admin) self.restart() # Now change the administrative password to a new secret value. self.secure(update_user=os_admin) finally: self.stop_db() # Always restore the initial state of the service. # At this point, we should have a secured database with new Trove-only # superuser password. # Proceed to re-enable remote access and automatic startup. self.__enable_remote_access() operating_system.enable_service_on_boot(self.service_candidates) def __reset_user_password_to_default(self, username): LOG.debug("Resetting the password of user '%(user)s' to '%(pw)s'.", {'user': username, 'pw': self.default_superuser_password}) user = models.CassandraUser(username, self.default_superuser_password) with CassandraLocalhostConnection(user) as client: client.execute( "UPDATE system_auth.credentials SET salted_hash=%s " "WHERE username='{}';", (user.name,), (self.default_superuser_pwd_hash,)) return user def change_cluster_name(self, cluster_name): """Change the 'cluster_name' property of an exesting running instance. Cluster name is stored in the database and is required to match the configuration value. Cassandra fails to start otherwise. """ if not self.status.is_running: raise RuntimeError(_("Cannot change the cluster name. " "The service is not running.")) LOG.debug("Changing the cluster name to '%s'.", cluster_name) # Update the in-database value. self.__reset_cluster_name(cluster_name) # Update the configuration property. self._update_cluster_name_property(cluster_name) self.restart() def __reset_cluster_name(self, cluster_name): # Reset the in-database value stored locally on this node. current_superuser = self.get_current_superuser() with CassandraLocalhostConnection(current_superuser) as client: client.execute( "UPDATE system.local SET cluster_name = '{}' " "WHERE key='local';", (cluster_name,)) # Newer version of Cassandra require a flush to ensure the changes # to the local system keyspace persist. self.flush_tables('system', 'local') def __create_cqlsh_config(self, sections): config_path = self._get_cqlsh_conf_path() config_dir = os.path.dirname(config_path) if not os.path.exists(config_dir): os.mkdir(config_dir, self._CONF_DIR_MODS) else: os.chmod(config_dir, self._CONF_DIR_MODS) operating_system.write_file(config_path, sections, codec=IniCodec()) os.chmod(config_path, self._CONF_FILE_MODS) def get_current_superuser(self): """ Build the Trove superuser. Use the stored credentials. If not available fall back to the defaults. """ if self.has_user_config(): return self._load_current_superuser() LOG.warning( "Trove administrative user has not been configured yet. " "Using the built-in default: %s", models.CassandraUser.root_username) return models.CassandraUser(models.CassandraUser.root_username, self.default_superuser_password) def has_user_config(self): """ Return TRUE if there is a client configuration file available on the guest. """ return os.path.exists(self._get_cqlsh_conf_path()) def _load_current_superuser(self): config = operating_system.read_file(self._get_cqlsh_conf_path(), codec=IniCodec()) return models.CassandraUser( config[self._CONF_AUTH_SEC][self._CONF_USR_KEY], config[self._CONF_AUTH_SEC][self._CONF_PWD_KEY] ) def apply_initial_guestagent_configuration(self, cluster_name=None): """Update guestagent-controlled configuration properties. These changes to the default template are necessary in order to make the database service bootable and accessible in the guestagent context. :param cluster_name: The 'cluster_name' configuration property. Use the unique guest id by default. :type cluster_name: string """ self.configuration_manager.apply_system_override( {'data_file_directories': [self.cassandra_data_dir]}) self._make_host_reachable() self._update_cluster_name_property(cluster_name or CONF.guest_id) # A single-node instance may use the SimpleSnitch # (keyspaces use SimpleStrategy). # A network-aware snitch has to be used otherwise. if cluster_name is None: updates = {'endpoint_snitch': 'SimpleSnitch'} else: updates = {'endpoint_snitch': 'GossipingPropertyFileSnitch'} self.configuration_manager.apply_system_override(updates) def _make_host_reachable(self): """ Some of these settings may be overridden by user defined configuration groups. authenticator and authorizer - Necessary to enable users and permissions. rpc_address - Enable remote connections on all interfaces. broadcast_rpc_address - RPC address to broadcast to drivers and other clients. Must be set if rpc_address = 0.0.0.0 and can never be 0.0.0.0 itself. listen_address - The address on which the node communicates with other nodes. Can never be 0.0.0.0. seed_provider - A list of discovery contact points. """ self.__enable_authentication() self.__enable_remote_access() def __enable_remote_access(self): updates = { 'rpc_address': "0.0.0.0", 'broadcast_rpc_address': netutils.get_my_ipv4(), 'listen_address': netutils.get_my_ipv4(), 'seed_provider': {'parameters': [{'seeds': netutils.get_my_ipv4()}] } } self.configuration_manager.apply_system_override(updates) def __disable_remote_access(self): updates = { 'rpc_address': "127.0.0.1", 'listen_address': '127.0.0.1', 'seed_provider': {'parameters': [{'seeds': '127.0.0.1'}] } } self.configuration_manager.apply_system_override(updates) def __enable_authentication(self): updates = { 'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator', 'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer' } self.configuration_manager.apply_system_override(updates) def __disable_authentication(self): updates = { 'authenticator': 'org.apache.cassandra.auth.AllowAllAuthenticator', 'authorizer': 'org.apache.cassandra.auth.AllowAllAuthorizer' } self.configuration_manager.apply_system_override(updates) def _update_cluster_name_property(self, name): """This 'cluster_name' property prevents nodes from one logical cluster from talking to another. All nodes in a cluster must have the same value. """ self.configuration_manager.apply_system_override({'cluster_name': name}) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def write_cluster_topology(self, data_center, rack, prefer_local=True): LOG.info('Saving Cassandra cluster topology configuration.') config = {'dc': data_center, 'rack': rack, 'prefer_local': prefer_local} operating_system.write_file(self.cassandra_topology, config, codec=self._TOPOLOGY_CODEC, as_root=True) operating_system.chown( self.cassandra_topology, self.cassandra_owner, self.cassandra_owner, as_root=True) operating_system.chmod( self.cassandra_topology, FileMode.ADD_READ_ALL, as_root=True) def start_db_with_conf_changes(self, config_contents): LOG.debug("Starting database with configuration changes.") if self.status.is_running: raise RuntimeError(_("The service is still running.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration() self.start_db(True) def reset_configuration(self, configuration): LOG.debug("Resetting configuration.") config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) def _get_cqlsh_conf_path(self): return os.path.expanduser(self.cqlsh_conf_path) def get_data_center(self): config = operating_system.read_file(self.cassandra_topology, codec=self._TOPOLOGY_CODEC) return config['dc'] def get_rack(self): config = operating_system.read_file(self.cassandra_topology, codec=self._TOPOLOGY_CODEC) return config['rack'] def set_seeds(self, seeds): LOG.debug("Setting seed nodes: %s", seeds) updates = { 'seed_provider': {'parameters': [{'seeds': ','.join(seeds)}] } } self.configuration_manager.apply_system_override(updates) def get_seeds(self): """Return a list of seed node IPs if any. The seed IPs are stored as a comma-separated string in the seed-provider parameters: [{'class_name': '', 'parameters': [{'seeds': ','}, ...]}] """ def find_first(key, dict_list): for item in dict_list: if key in item: return item[key] return [] sp_property = self.configuration_manager.get_value('seed_provider', []) seeds_str = find_first('seeds', find_first('parameters', sp_property)) return seeds_str.split(',') if seeds_str else [] def set_auto_bootstrap(self, enabled): """Auto-bootstrap makes new (non-seed) nodes automatically migrate the right data to themselves. The feature has to be turned OFF when initializing a fresh cluster without data. It must be turned back ON once the cluster is initialized. """ LOG.debug("Setting auto-bootstrapping: %s", enabled) updates = {'auto_bootstrap': enabled} self.configuration_manager.apply_system_override(updates) def node_cleanup_begin(self): """Suspend periodic status updates and mark the instance busy throughout the operation. """ self.status.begin_restart() self.status.set_status(rd_instance.ServiceStatuses.BLOCKED) def node_cleanup(self): """Cassandra does not automatically remove data from nodes that lose part of their partition range to a newly added node. Cleans up keyspaces and partition keys no longer belonging to the node. Do not treat cleanup failures as fatal. Resume the heartbeat after finishing and let it signal the true state of the instance to the caller. """ LOG.debug("Running node cleanup.") # nodetool -h -p -u -pw cleanup try: self._run_nodetool_command('cleanup') self.status.set_status(rd_instance.ServiceStatuses.RUNNING) except Exception: LOG.exception("The node failed to complete its cleanup.") finally: self.status.end_restart() def node_decommission(self): """Causes a live node to decommission itself, streaming its data to the next node on the ring. Shutdown the database after successfully finishing the operation, or leave the node in a failed state otherwise. Suspend periodic status updates, so that the caller can poll for the database shutdown. """ LOG.debug("Decommissioning the node.") # nodetool -h -p -u -pw decommission self.status.begin_restart() try: self._run_nodetool_command('decommission') except Exception: LOG.exception("The node failed to decommission itself.") self.status.set_status(rd_instance.ServiceStatuses.FAILED) return finally: # Cassandra connections have ability to automatically discover and # fallback to other cluster nodes whenever a node goes down. # Reset the status after decomissioning to ensure the heartbeat # connection talks to this node only. self.status = CassandraAppStatus(self.get_current_superuser()) try: self.stop_db(update_db=True, do_not_start_on_reboot=True) finally: self.status.end_restart() def flush_tables(self, keyspace, *tables): """Flushes one or more tables from the memtable. """ LOG.debug("Flushing tables.") # nodetool -h -p -u -pw flush -- # ( ... ) self._run_nodetool_command('flush', keyspace, *tables) def set_logging_level(self, log_level): """Set the log Cassandra's system log verbosity level. """ # Apply the change at runtime. self._run_nodetool_command('setlogginglevel', 'root', log_level) # Persist the change. self.logback_conf_manager.apply_system_override( {'configuration': {'root': {'@level': log_level}}}) def drain(self): """Drains Cassandra node so that it can upgraded safely. """ LOG.debug("Draining node.") self._run_nodetool_command('drain') def upgrade_sstables(self): """Upgrades sstables to match new datastore version. """ LOG.debug("Upgrading sstables.") self._run_nodetool_command('upgradesstables') def _run_nodetool_command(self, cmd, *args, **kwargs): """Execute a nodetool command on this node. """ return utils.execute('nodetool', '-h', 'localhost', cmd, *args, **kwargs) def enable_root(self, root_password=None): """Cassandra's 'root' user is called 'cassandra'. Create a new superuser if it does not exist and grant it full superuser-level access to all keyspaces. """ cassandra = models.CassandraUser.root(password=root_password) admin = self.build_admin() if self.is_root_enabled(): admin.alter_user_password(cassandra) else: admin._create_superuser(cassandra) return cassandra.serialize() def is_root_enabled(self): """The Trove administrative user ('os_admin') should normally be the only superuser in the system. """ found = self.build_admin().list_superusers() return len([user for user in found if user.name != self._ADMIN_USER]) > 0 class CassandraAppStatus(service.BaseDbStatus): def __init__(self, superuser): """ :param superuser: User account the Status uses for connecting to the database. :type superuser: CassandraUser """ super(CassandraAppStatus, self).__init__() self.__user = superuser self.__client = None @property def client(self): if self.__client is None: self.__client = CassandraLocalhostConnection(self.__user) return self.__client def _get_actual_db_status(self): try: if self.client.local_node_is_up(): return rd_instance.ServiceStatuses.RUNNING except NoHostAvailable: return rd_instance.ServiceStatuses.SHUTDOWN except Exception: LOG.exception("Error getting Cassandra status.") return rd_instance.ServiceStatuses.SHUTDOWN def cleanup_stalled_db_services(self): utils.execute_with_timeout(CassandraApp.CASSANDRA_KILL_CMD, shell=True) class CassandraAdmin(object): """Handles administrative tasks on the Cassandra database. In Cassandra only SUPERUSERS can create other users and grant permissions to database resources. Trove uses the 'cassandra' superuser to perform its administrative tasks. The users it creates are all 'normal' (NOSUPERUSER) accounts. The permissions it can grant are also limited to non-superuser operations. This is to prevent anybody from creating a new superuser via the Trove API. """ # Non-superuser grant modifiers. __NO_SUPERUSER_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT') _KS_NAME_REGEX = re.compile('^$') def __init__(self, user): self.__admin_user = user self.__client = None @property def client(self): if self.__client is None: self.__client = CassandraLocalhostConnection(self.__admin_user) return self.__client def create_user(self, context, users): """ Create new non-superuser accounts. New users are by default granted full access to all database resources. """ for item in users: self._create_user_and_grant(self.client, self._deserialize_user(item)) def _create_user_and_grant(self, client, user): """ Create new non-superuser account and grant it full access to its databases. """ self._create_user(client, user) for db in user.databases: self._grant_full_access_on_keyspace( client, self._deserialize_keyspace(db), user) def _create_user(self, client, user): # Create only NOSUPERUSER accounts here. LOG.debug("Creating a new user '%s'.", user.name) client.execute("CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;", (user.name,), (user.password,)) def _create_superuser(self, user): """Create a new superuser account and grant it full superuser-level access to all keyspaces. """ LOG.debug("Creating a new superuser '%s'.", user.name) self.client.execute("CREATE USER '{}' WITH PASSWORD %s SUPERUSER;", (user.name,), (user.password,)) self.client.execute( "GRANT ALL PERMISSIONS ON ALL KEYSPACES TO '{}';", (user.name,)) def delete_user(self, context, user): self.drop_user(self._deserialize_user(user)) def drop_user(self, user): self._drop_user(self.client, user) def _drop_user(self, client, user): LOG.debug("Deleting user '%s'.", user.name) client.execute("DROP USER '{}';", (user.name, )) def get_user(self, context, username, hostname): user = self._find_user(self.client, username) return user.serialize() if user is not None else None def _find_user(self, client, username): """ Lookup a user with a given username. Omit user names on the ignore list. Return a new Cassandra user instance or None if no match is found. """ return next((user for user in self._get_listed_users(client) if user.name == username), None) def list_users(self, context, limit=None, marker=None, include_marker=False): """ List all non-superuser accounts. Omit names on the ignored list. Return an empty set if None. """ return guestagent_utils.serialize_list( self._get_listed_users(self.client), limit=limit, marker=marker, include_marker=include_marker) def _get_listed_users(self, client): """ Return a set of unique user instances. Omit user names on the ignore list. """ return self._get_users( client, lambda user: user.name not in self.ignore_users) def _get_users(self, client, matcher=None): """ :param matcher Filter expression. :type matcher callable """ acl = self._get_acl(client) return {self._build_user(user.name, acl) for user in client.execute("LIST USERS;") if not matcher or matcher(user)} def _load_user(self, client, username, check_reserved=True): if check_reserved: models.CassandraUser(username).check_reserved() acl = self._get_acl(client, username=username) return self._build_user(username, acl) def _build_user(self, username, acl): user = models.CassandraUser(username) for ks, permissions in acl.get(username, {}).items(): if permissions: user.databases.append(models.CassandraSchema(ks).serialize()) return user def _get_acl(self, client, username=None): """Return the ACL for a database user. Return ACLs for all users if no particular username is specified. The ACL has the following format: {username #1: {keyspace #1: {access mod(s)...}, keyspace #2: {...}}, username #2: {keyspace #1: {...}, keyspace #3: {...}} } """ def build_list_query(username): query_tokens = ["LIST ALL PERMISSIONS"] if username: query_tokens.extend(["OF", "'%s'" % username]) query_tokens.append("NORECURSIVE;") return ' '.join(query_tokens) def parse_keyspace_name(resource): """Parse a keyspace name from a resource string. The resource string has the following form: where 'object' is one of the database objects (keyspace, table...). Return the name as a singleton set. Return an empty set if no match is found. """ match = self._KS_NAME_REGEX.match(resource) if match: return {match.group(1)} return {} def update_acl(username, keyspace, permission, acl): permissions = acl.get(username, {}).get(keyspace) if permissions is None: guestagent_utils.update_dict({user: {keyspace: {permission}}}, acl) else: permissions.add(permission) all_keyspace_names = None acl = dict() for item in client.execute(build_list_query(username)): user = item.username resource = item.resource permission = item.permission if user and resource and permission: if resource == '': # Cache the full keyspace list to improve performance and # ensure consistent results for all users. if all_keyspace_names is None: all_keyspace_names = { item.name for item in self._get_available_keyspaces(client) } keyspaces = all_keyspace_names else: keyspaces = parse_keyspace_name(resource) for keyspace in keyspaces: update_acl(user, keyspace, permission, acl) return acl def list_superusers(self): """List all system users existing in the database.""" return self._get_users(self.client, lambda user: user.super) def grant_access(self, context, username, hostname, databases): """ Grant full access on keyspaces to a given username. """ user = models.CassandraUser(username) for db in databases: self._grant_full_access_on_keyspace( self.client, models.CassandraSchema(db), user) def revoke_access(self, context, username, hostname, database): """ Revoke all permissions on any database resources from a given username. """ user = models.CassandraUser(username) self._revoke_all_access_on_keyspace( self.client, models.CassandraSchema(database), user) def _grant_full_access_on_keyspace(self, client, keyspace, user, check_reserved=True): """ Grant all non-superuser permissions on a keyspace to a given user. """ if check_reserved: user.check_reserved() keyspace.check_reserved() for access in self.__NO_SUPERUSER_MODIFIERS: self._grant_permission_on_keyspace(client, access, keyspace, user) def _grant_permission_on_keyspace(self, client, modifier, keyspace, user): """ Grant a non-superuser permission on a keyspace to a given user. Raise an exception if the caller attempts to grant a superuser access. """ LOG.debug("Granting '%(mod)s' access on '%(keyspace_name)s' to " "user '%(user)s'.", {'mod': modifier, 'keyspace_name': keyspace.name, 'user': user.name}) if modifier in self.__NO_SUPERUSER_MODIFIERS: client.execute("GRANT {} ON KEYSPACE \"{}\" TO '{}';", (modifier, keyspace.name, user.name)) else: raise exception.UnprocessableEntity( "Invalid permission modifier (%s). Allowed values are: '%s'" % (modifier, ', '.join(self.__NO_SUPERUSER_MODIFIERS))) def _revoke_all_access_on_keyspace(self, client, keyspace, user, check_reserved=True): if check_reserved: user.check_reserved() keyspace.check_reserved() LOG.debug("Revoking all permissions on '%(keyspace_name)s' " "from user '%(user)s'.", {'keyspace_name': keyspace.name, 'user': user.name}) client.execute("REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';", (keyspace.name, user.name)) def update_attributes(self, context, username, hostname, user_attrs): user = self._load_user(self.client, username) new_name = user_attrs.get('name') new_password = user_attrs.get('password') self._update_user(self.client, user, new_name, new_password) def _update_user(self, client, user, new_username, new_password): """ Update a user of a given username. Updatable attributes include username and password. If a new username and password are given a new user with those attributes is created and all permissions from the original user get transferred to it. The original user is then dropped therefore revoking its permissions. If only new password is specified the existing user gets altered with that password. """ if new_username is not None and user.name != new_username: if new_password is not None: self._rename_user(client, user, new_username, new_password) else: raise exception.UnprocessableEntity( _("Updating username requires specifying a password " "as well.")) elif new_password is not None and user.password != new_password: user.password = new_password self._alter_user_password(client, user) def _rename_user(self, client, user, new_username, new_password): """ Rename a given user also updating its password. Transfer the current permissions to the new username. Drop the old username therefore revoking its permissions. """ LOG.debug("Renaming user '%(old)s' to '%(new)s'", {'old': user.name, 'new': new_username}) new_user = models.CassandraUser(new_username, new_password) new_user.databases.extend(user.databases) self._create_user_and_grant(client, new_user) self._drop_user(client, user) def alter_user_password(self, user): self._alter_user_password(self.client, user) def change_passwords(self, context, users): for user in users: self._alter_user_password(self.client, self._deserialize_user(user)) def _alter_user_password(self, client, user): LOG.debug("Changing password of user '%s'.", user.name) client.execute("ALTER USER '{}' " "WITH PASSWORD %s;", (user.name,), (user.password,)) def create_database(self, context, databases): for item in databases: self._create_single_node_keyspace( self.client, self._deserialize_keyspace(item)) def _create_single_node_keyspace(self, client, keyspace): """ Create a single-replica keyspace. Cassandra stores replicas on multiple nodes to ensure reliability and fault tolerance. All replicas are equally important; there is no primary or master. A replication strategy determines the nodes where replicas are placed. SimpleStrategy is for a single data center only. The total number of replicas across the cluster is referred to as the replication factor. Replication Strategy: 'SimpleStrategy' is not optimized for multiple data centers. 'replication_factor' The number of replicas of data on multiple nodes. Required for SimpleStrategy; otherwise, not used. Keyspace names are case-insensitive by default. To make a name case-sensitive, enclose it in double quotation marks. """ client.execute("CREATE KEYSPACE \"{}\" WITH REPLICATION = " "{{ 'class' : 'SimpleStrategy', " "'replication_factor' : 1 }};", (keyspace.name,)) def delete_database(self, context, database): self._drop_keyspace(self.client, self._deserialize_keyspace(database)) def _drop_keyspace(self, client, keyspace): LOG.debug("Dropping keyspace '%s'.", keyspace.name) client.execute("DROP KEYSPACE \"{}\";", (keyspace.name,)) def list_databases(self, context, limit=None, marker=None, include_marker=False): return guestagent_utils.serialize_list( self._get_available_keyspaces(self.client), limit=limit, marker=marker, include_marker=include_marker) def _get_available_keyspaces(self, client): """ Return a set of unique keyspace instances. Omit keyspace names on the ignore list. """ return {models.CassandraSchema(db.keyspace_name) for db in client.execute("SELECT * FROM " "system.schema_keyspaces;") if db.keyspace_name not in self.ignore_dbs} def list_access(self, context, username, hostname): user = self._find_user(self.client, username) if user: return user.databases raise exception.UserNotFound(uuid=username) def _deserialize_keyspace(self, keyspace_dict, check_reserved=True): if keyspace_dict: db = models.CassandraSchema.deserialize(keyspace_dict) if check_reserved: db.check_reserved() return db return None def _deserialize_user(self, user_dict, check_reserved=True): if user_dict: user = models.CassandraUser.deserialize(user_dict) if check_reserved: user.check_reserved() return user return None @property def ignore_users(self): return cfg.get_ignored_users() @property def ignore_dbs(self): return cfg.get_ignored_dbs() class CassandraConnection(object): """A wrapper to manage a Cassandra connection.""" # Cassandra 2.1 only supports protocol versions 3 and lower. NATIVE_PROTOCOL_VERSION = 3 CONNECTION_TIMEOUT_SEC = CONF.agent_call_high_timeout RECONNECT_DELAY_SEC = 3 def __init__(self, contact_points, user): self.__user = user # A Cluster is initialized with a set of initial contact points. # After the driver connects to one of the nodes it will automatically # discover the rest. # Will connect to '127.0.0.1' if None contact points are given. # # Set the 'reconnection_policy' so that dead connections recover fast. self._cluster = Cluster( contact_points=contact_points, auth_provider=PlainTextAuthProvider(user.name, user.password), protocol_version=self.NATIVE_PROTOCOL_VERSION, connect_timeout=self.CONNECTION_TIMEOUT_SEC, control_connection_timeout=self.CONNECTION_TIMEOUT_SEC, reconnection_policy=ConstantReconnectionPolicy( self.RECONNECT_DELAY_SEC, max_attempts=None)) self.__session = None self._connect() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._disconnect() def execute(self, query, identifiers=None, data_values=None, timeout=None): """ Execute a query with a given sequence or dict of data values to bind. If a sequence is used, '%s' should be used the placeholder for each argument. If a dict is used, '%(name)s' style placeholders must be used. Only data values should be supplied this way. Other items, such as keyspaces, table names, and column names should be set ahead of time. Use the '{}' style placeholders and 'identifiers' parameter for those. Raise an exception if the operation exceeds the given timeout (sec). There is no timeout if set to None. Return a set of rows or an empty list if None. """ if self.is_active(): try: rows = self.__session.execute(self.__bind(query, identifiers), data_values, timeout) return rows or [] except OperationTimedOut: LOG.error("Query execution timed out.") raise LOG.debug("Cannot perform this operation on a closed connection.") raise exception.UnprocessableEntity() def __bind(self, query, identifiers): if identifiers: return query.format(*identifiers) return query def node_is_up(self, host_ip): """Test whether the Cassandra node located at the given IP is up. """ for host in self._cluster.metadata.all_hosts(): if host.address == host_ip: return host.is_up return False def local_node_is_up(self): """Test whether Cassandra is up on the localhost. """ return (self.node_is_up('127.0.0.1') or self.node_is_up(netutils.get_my_ipv4())) def _connect(self): if not self._cluster.is_shutdown: LOG.debug("Connecting to a Cassandra cluster as '%s'.", self.__user.name) if not self.is_active(): self.__session = self._cluster.connect() else: LOG.debug("Connection already open.") LOG.debug("Connected to cluster: '%s'", self._cluster.metadata.cluster_name) for host in self._cluster.metadata.all_hosts(): LOG.debug("Connected to node: '%(address)s' in rack " "'%(rack)s' at datacenter '%(datacenter)s'", {'address': host.address, 'rack': host.rack, 'datacenter': host.datacenter}) else: LOG.debug("Cannot perform this operation on a terminated cluster.") raise exception.UnprocessableEntity() def _disconnect(self): if self.is_active(): try: LOG.debug("Disconnecting from cluster: '%s'", self._cluster.metadata.cluster_name) self._cluster.shutdown() except Exception: LOG.debug("Failed to disconnect from a Cassandra cluster.") def is_active(self): return self.__session and not self.__session.is_shutdown def __del__(self): # The connections would survive the parent object's GC. # We need to close it explicitly. self._disconnect() class CassandraLocalhostConnection(CassandraConnection): """ A connection to the localhost Cassandra server. """ def __init__(self, user): super(CassandraLocalhostConnection, self).__init__(None, user) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchbase/0000755000175000017500000000000000000000000026073 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchbase/__init__.py0000644000175000017500000000000000000000000030172 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchbase/manager.py0000644000175000017500000001063500000000000030064 0ustar00coreycorey00000000000000# Copyright (c) 2013 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common import instance as rd_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchbase import service from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is Couchbase Manager class. It is dynamically loaded based off of the datastore of the trove instance """ def __init__(self): self.appStatus = service.CouchbaseAppStatus() self.app = service.CouchbaseApp(self.appStatus) super(Manager, self).__init__('couchbase') @property def status(self): return self.appStatus def reset_configuration(self, context, configuration): self.app.reset_configuration(configuration) def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) LOG.debug('Mounted the volume (%s).', device_path) self.app.start_db_with_conf_changes(config_contents) LOG.debug('Securing couchbase now.') self.app.initial_setup() if backup_info: LOG.debug('Now going to perform restore.') self._perform_restore(backup_info, context, mount_point) def restart(self, context): """ Restart this couchbase instance. This method is called when the guest agent gets a restart message from the taskmanager. """ self.app.restart() def start_db_with_conf_changes(self, context, config_contents): self.app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this couchbase instance. This method is called when the guest agent gets a stop message from the taskmanager. """ self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def enable_root(self, context): LOG.debug("Enabling root.") return self.app.enable_root() def enable_root_with_password(self, context, root_password=None): return self.app.enable_root(root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return os.path.exists(system.pwd_file) def _perform_restore(self, backup_info, context, restore_location): """ Restores all couchbase buckets and their documents from the backup. """ LOG.info("Restoring database from backup %s", backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception as e: LOG.error("Error performing restore from backup %s", backup_info['id']) LOG.error(e) self.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info("Restored database successfully") def create_backup(self, context, backup_info): """ Backup all couchbase buckets and their documents. """ with EndNotification(context): backup.backup(context, backup_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchbase/service.py0000644000175000017500000002425400000000000030114 0ustar00coreycorey00000000000000# Copyright (c) 2013 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import stat import subprocess import tempfile from oslo_log import log as logging from oslo_utils import netutils import pexpect import six from trove.common import cfg from trove.common.db import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent.datastore import service from trove.guestagent import pkg LOG = logging.getLogger(__name__) CONF = cfg.CONF packager = pkg.Package() class CouchbaseApp(object): """ Handles installation and configuration of couchbase on a trove instance. """ def __init__(self, status, state_change_wait_time=None): """ Sets default status and state_change_wait_time """ if state_change_wait_time: self.state_change_wait_time = state_change_wait_time else: self.state_change_wait_time = CONF.state_change_wait_time self.status = status def install_if_needed(self, packages): """ Install couchbase if needed, do nothing if it is already installed. """ LOG.info('Preparing Guest as Couchbase Server.') if not packager.pkg_is_installed(packages): LOG.debug('Installing Couchbase.') self._install_couchbase(packages) def initial_setup(self): self.ip_address = netutils.get_my_ipv4() mount_point = CONF.couchbase.mount_point try: LOG.info('Couchbase Server change data dir path.') operating_system.chown(mount_point, 'couchbase', 'couchbase', as_root=True) pwd = CouchbaseRootAccess.get_password() utils.execute_with_timeout( (system.cmd_node_init % {'data_path': mount_point, 'IP': self.ip_address, 'PWD': pwd}), shell=True) operating_system.remove(system.INSTANCE_DATA_DIR, force=True, as_root=True) LOG.debug('Couchbase Server initialize cluster.') utils.execute_with_timeout( (system.cmd_cluster_init % {'IP': self.ip_address, 'PWD': pwd}), shell=True) utils.execute_with_timeout(system.cmd_set_swappiness, shell=True) utils.execute_with_timeout(system.cmd_update_sysctl_conf, shell=True) LOG.info('Couchbase Server initial setup finished.') except exception.ProcessExecutionError: LOG.exception('Error performing initial Couchbase setup.') raise RuntimeError(_("Couchbase Server initial setup failed")) def _install_couchbase(self, packages): """ Install the Couchbase Server. """ LOG.debug('Installing Couchbase Server. Creating %s', system.COUCHBASE_CONF_DIR) operating_system.create_directory(system.COUCHBASE_CONF_DIR, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, system.TIME_OUT) self.start_db() LOG.debug('Finished installing Couchbase Server.') def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time) def start_db(self, update_db=False): self.status.start_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def enable_root(self, root_password=None): return CouchbaseRootAccess.enable_root(root_password) def start_db_with_conf_changes(self, config_contents): LOG.info("Starting Couchbase with configuration changes.\n" "Configuration contents:\n %s.", config_contents) if self.status.is_running: LOG.error("Cannot start Couchbase with configuration changes. " "Couchbase state == %s.", self.status) raise RuntimeError(_("Couchbase is not stopped.")) self._write_config(config_contents) self.start_db(True) def reset_configuration(self, configuration): config_contents = configuration['config_contents'] LOG.debug("Resetting configuration.") self._write_config(config_contents) def _write_config(self, config_contents): """ Update contents of Couchbase configuration file """ return class CouchbaseAppStatus(service.BaseDbStatus): """ Handles all of the status updating for the couchbase guest agent. """ def _get_actual_db_status(self): self.ip_address = netutils.get_my_ipv4() pwd = None try: pwd = CouchbaseRootAccess.get_password() return self._get_status_from_couchbase(pwd) except exception.ProcessExecutionError: # log the exception, but continue with native config approach LOG.exception("Error getting the Couchbase status.") try: out, err = utils.execute_with_timeout( system.cmd_get_password_from_config, shell=True) except exception.ProcessExecutionError: LOG.exception("Error getting the root password from the " "native Couchbase config file.") return rd_instance.ServiceStatuses.SHUTDOWN config_pwd = out.strip() if out is not None else None if not config_pwd or config_pwd == pwd: LOG.debug("The root password from the native Couchbase config " "file is either empty or already matches the " "stored value.") return rd_instance.ServiceStatuses.SHUTDOWN try: status = self._get_status_from_couchbase(config_pwd) except exception.ProcessExecutionError: LOG.exception("Error getting Couchbase status using the " "password parsed from the native Couchbase " "config file.") return rd_instance.ServiceStatuses.SHUTDOWN # if the parsed root password worked, update the stored value to # avoid having to consult/parse the couchbase config file again. LOG.debug("Updating the stored value for the Couchbase " "root password.") CouchbaseRootAccess().write_password_to_file(config_pwd) return status def _get_status_from_couchbase(self, pwd): out, err = utils.execute_with_timeout( (system.cmd_couchbase_status % {'IP': self.ip_address, 'PWD': pwd}), shell=True) server_stats = json.loads(out) if not err and server_stats["clusterMembership"] == "active": return rd_instance.ServiceStatuses.RUNNING else: return rd_instance.ServiceStatuses.SHUTDOWN def cleanup_stalled_db_services(self): utils.execute_with_timeout(system.cmd_kill) class CouchbaseRootAccess(object): @classmethod def enable_root(cls, root_password=None): user = models.DatastoreUser.root(password=root_password) if root_password: CouchbaseRootAccess().write_password_to_file(root_password) else: CouchbaseRootAccess().set_password(user.password) return user.serialize() def set_password(self, root_password): self.ip_address = netutils.get_my_ipv4() child = pexpect.spawn(system.cmd_reset_pwd % {'IP': self.ip_address}) try: child.expect('.*password.*') child.sendline(root_password) child.expect('.*(yes/no).*') child.sendline('yes') child.expect('.*successfully.*') except pexpect.TIMEOUT: child.delayafterclose = 1 child.delayafterterminate = 1 try: child.close(force=True) except pexpect.ExceptionPexpect: # Close fails to terminate a sudo process on some OSes. subprocess.call(['sudo', 'kill', str(child.pid)]) self.write_password_to_file(root_password) def write_password_to_file(self, root_password): operating_system.create_directory(system.COUCHBASE_CONF_DIR, as_root=True) try: tempfd, tempname = tempfile.mkstemp() os.fchmod(tempfd, stat.S_IRUSR | stat.S_IWUSR) if isinstance(root_password, six.text_type): root_password = root_password.encode('utf-8') os.write(tempfd, root_password) os.fchmod(tempfd, stat.S_IRUSR) os.close(tempfd) except OSError as err: message = _("An error occurred in saving password " "(%(errno)s). %(strerror)s.") % { "errno": err.errno, "strerror": err.strerror} LOG.exception(message) raise RuntimeError(message) operating_system.move(tempname, system.pwd_file, as_root=True) @staticmethod def get_password(): pwd = "password" if os.path.exists(system.pwd_file): with open(system.pwd_file) as file: pwd = file.readline().strip() return pwd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchbase/system.py0000644000175000017500000000464300000000000030000 0ustar00coreycorey00000000000000# Copyright (c) 2013 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg CONF = cfg.CONF TIME_OUT = 1200 COUCHBASE_DUMP_DIR = '/tmp/backups' COUCHBASE_CONF_DIR = '/etc/couchbase' COUCHBASE_WEBADMIN_PORT = '8091' COUCHBASE_REST_API = 'http://localhost:' + COUCHBASE_WEBADMIN_PORT BUCKETS_JSON = '/buckets.json' SECRET_KEY = '/secret_key' SERVICE_CANDIDATES = ["couchbase-server"] INSTANCE_DATA_DIR = '/opt/couchbase/var/lib/couchbase/data' cmd_couchbase_status = ('sudo /opt/couchbase/bin/couchbase-cli server-info ' '-c %(IP)s:8091 -u root -p %(PWD)s') cmd_node_init = ('sudo /opt/couchbase/bin/couchbase-cli node-init ' '-c %(IP)s:8091 --node-init-data-path=%(data_path)s ' '-u root -p %(PWD)s') cmd_cluster_init = ('sudo /opt/couchbase/bin/couchbase-cli cluster-init ' '-c %(IP)s:8091 --cluster-init-username=root ' '--cluster-init-password=%(PWD)s ' '--cluster-init-port=8091 ' '-u root -p %(PWD)s') cmd_kill = 'sudo pkill -u couchbase' """ For optimal couchbase operations, swappiness of vm should be set to 0. Reference link: http://docs.couchbase.com/couchbase-manual-2 .5/cb-admin/#using-couchbase-in-the-cloud """ cmd_set_swappiness = 'sudo sysctl vm.swappiness=0' cmd_update_sysctl_conf = ('echo "vm.swappiness = 0" | sudo tee -a ' '/etc/sysctl.conf') cmd_reset_pwd = 'sudo /opt/couchbase/bin/cbreset_password %(IP)s:8091' pwd_file = COUCHBASE_CONF_DIR + SECRET_KEY cmd_get_password_from_config = ( r"""sudo /opt/couchbase/bin/erl -noinput -eval 'case file:read_file(""" r""""/opt/couchbase/var/lib/couchbase/config/config.dat") of {ok, B} ->""" r"""io:format("~p~n", [binary_to_term(B)]) end.' -run init stop""" r""" | grep '\[{"root",\[{password,' | awk -F\" '{print $4}'""") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchdb/0000755000175000017500000000000000000000000025546 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchdb/__init__.py0000644000175000017500000000000000000000000027645 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchdb/manager.py0000644000175000017500000001434500000000000027541 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common import instance as rd_instance from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchdb import service from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is CouchDB Manager class. It is dynamically loaded based off of the datastore of the Trove instance. """ def __init__(self): self.appStatus = service.CouchDBAppStatus() self.app = service.CouchDBApp(self.appStatus) super(Manager, self).__init__('couchdb') @property def status(self): return self.appStatus def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) if device_path: self.app.stop_db() device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): device.migrate_data(mount_point) device.mount(mount_point) LOG.debug('Mounted the volume (%s).', device_path) self.app.start_db() self.app.change_permissions() self.app.make_host_reachable() if backup_info: self._perform_restore(backup_info, context, mount_point) self.app.secure() def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this CouchDB instance. This method is called when the guest agent gets a stop message from the taskmanager. """ LOG.debug("Stopping the CouchDB instance.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def restart(self, context): """ Restart this CouchDB instance. This method is called when the guest agent gets a restart message from the taskmanager. """ LOG.debug("Restarting the CouchDB instance.") self.app.restart() def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting CouchDB with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def _perform_restore(self, backup_info, context, restore_location): """ Restores all CouchDB databases and their documents from the backup. """ LOG.info("Restoring database from backup %s", backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception("Error performing restore from backup %s", backup_info['id']) self.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info("Restored database successfully") def create_backup(self, context, backup_info): LOG.debug("Creating backup for CouchDB.") backup.backup(context, backup_info) def create_admin_user(self, context, password): self.app.create_admin_user(password) def store_admin_password(self, context, password): self.app.store_admin_password(password) def create_user(self, context, users): LOG.debug("Creating user(s).") return service.CouchDBAdmin().create_user(users) def delete_user(self, context, user): LOG.debug("Deleting user.") return service.CouchDBAdmin().delete_user(user) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing users.") return service.CouchDBAdmin().list_users(limit, marker, include_marker) def get_user(self, context, username, hostname): LOG.debug("Show details of user %s.", username) return service.CouchDBAdmin().get_user(username, hostname) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting access.") return service.CouchDBAdmin().grant_access(username, databases) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking access.") return service.CouchDBAdmin().revoke_access(username, database) def list_access(self, context, username, hostname): LOG.debug("Listing access.") return service.CouchDBAdmin().list_access(username, hostname) def enable_root(self, context): LOG.debug("Enabling root.") return service.CouchDBAdmin().enable_root() def enable_root_with_password(self, context, root_password=None): return service.CouchDBAdmin().enable_root(root_pwd=root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return service.CouchDBAdmin().is_root_enabled() def create_database(self, context, databases): LOG.debug("Creating database(s).") return service.CouchDBAdmin().create_database(databases) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing databases.") return service.CouchDBAdmin().list_databases(limit, marker, include_marker) def delete_database(self, context, database): LOG.debug("Deleting database.") return service.CouchDBAdmin().delete_database(database) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchdb/service.py0000644000175000017500000005641100000000000027567 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import getpass import json from oslo_log import log as logging from trove.common import cfg from trove.common.db.couchdb import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common import pagination from trove.common.stream_codecs import JsonCodec from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.couchdb import system from trove.guestagent.datastore import service from trove.guestagent import pkg CONF = cfg.CONF LOG = logging.getLogger(__name__) packager = pkg.Package() COUCHDB_LIB_DIR = "/var/lib/couchdb" COUCHDB_LOG_DIR = "/var/log/couchdb" COUCHDB_CONFIG_DIR = "/etc/couchdb" COUCHDB_BIN_DIR = "/var/run/couchdb" class CouchDBApp(object): """ Handles installation and configuration of CouchDB on a Trove instance. """ def __init__(self, status, state_change_wait_time=None): """ Sets default status and state_change_wait_time. """ self.state_change_wait_time = ( state_change_wait_time if state_change_wait_time else CONF.state_change_wait_time ) LOG.debug("state_change_wait_time = %s.", self.state_change_wait_time) self.status = status def install_if_needed(self, packages): """ Install CouchDB if needed, do nothing if it is already installed. """ LOG.info('Preparing guest as a CouchDB server.') if not packager.pkg_is_installed(packages): LOG.debug("Installing packages: %s.", str(packages)) packager.pkg_install(packages, {}, system.TIME_OUT) LOG.info("Finished installing CouchDB server.") def change_permissions(self): """ When CouchDB is installed, a default user 'couchdb' is created. Inorder to start/stop/restart CouchDB service as the current OS user, add the current OS user to the 'couchdb' group and provide read/write access to the 'couchdb' group. """ try: LOG.debug("Changing permissions.") for dir in [COUCHDB_LIB_DIR, COUCHDB_LOG_DIR, COUCHDB_BIN_DIR, COUCHDB_CONFIG_DIR]: operating_system.chown(dir, 'couchdb', 'couchdb', as_root=True) operating_system.chmod(dir, FileMode.ADD_GRP_RW, as_root=True) operating_system.change_user_group(getpass.getuser(), 'couchdb', as_root=True) LOG.debug("Successfully changed permissions.") except exception.ProcessExecutionError: LOG.exception("Error changing permissions.") def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def start_db(self, update_db=False): self.status.start_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def restart(self): self.status.restart_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time) def make_host_reachable(self): try: LOG.debug("Changing bind address to 0.0.0.0 .") self.stop_db() out, err = utils.execute_with_timeout( system.UPDATE_BIND_ADDRESS, shell=True ) self.start_db() except exception.ProcessExecutionError: LOG.exception("Error while trying to update bind address of" " CouchDB server.") def start_db_with_conf_changes(self, config_contents): ''' Will not be implementing configuration change API for CouchDB in the Kilo release. Currently all that this method does is to start the CouchDB server without any configuration changes. Looks like this needs to be implemented to enable volume resize on the guest agent side. ''' LOG.info("Starting CouchDB with configuration changes.") self.start_db(True) def store_admin_password(self, password): LOG.debug('Storing the admin password.') creds = CouchDBCredentials(username=system.COUCHDB_ADMIN_NAME, password=password) creds.write(system.COUCHDB_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): ''' Creating the admin user, os_admin, for the couchdb instance ''' LOG.debug('Creating the admin user.') creds = self.store_admin_password(password) out, err = utils.execute_with_timeout( system.COUCHDB_CREATE_ADMIN % {'password': creds.password}, shell=True) LOG.debug('Created admin user.') def secure(self): ''' Create the Trove admin user. The service should not be running at this point. ''' self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("CouchDB secure complete.") @property def admin_password(self): creds = CouchDBCredentials() creds.read(system.COUCHDB_ADMIN_CREDS_FILE) return creds.password class CouchDBAppStatus(service.BaseDbStatus): """ Handles all of the status updating for the CouchDB guest agent. We can verify that CouchDB is running by running the command: curl http://127.0.0.1:5984/ The response will be similar to: {"couchdb":"Welcome","version":"1.6.0"} """ def _get_actual_db_status(self): try: out, err = utils.execute_with_timeout( system.COUCHDB_SERVER_STATUS, shell=True ) LOG.debug("CouchDB status = %r", out) server_status = json.loads(out) status = server_status["couchdb"] if status == 'Welcome': LOG.debug("Status of CouchDB is active.") return rd_instance.ServiceStatuses.RUNNING else: LOG.debug("Status of CouchDB is not active.") return rd_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError: LOG.exception("Error getting CouchDB status.") return rd_instance.ServiceStatuses.SHUTDOWN class CouchDBAdmin(object): '''Handles administrative functions on CouchDB.''' # user is cached by making it a class attribute admin_user = None def _admin_user(self): if not type(self).admin_user: creds = CouchDBCredentials() creds.read(system.COUCHDB_ADMIN_CREDS_FILE) user = models.CouchDBUser(creds.username, creds.password) type(self).admin_user = user return type(self).admin_user def _is_modifiable_user(self, name): if name in cfg.get_ignored_users(): return False elif name == system.COUCHDB_ADMIN_NAME: return False return True def _is_modifiable_database(self, name): return name not in cfg.get_ignored_dbs() def create_user(self, users): LOG.debug("Creating user(s) for accessing CouchDB database(s).") self._admin_user() try: for item in users: user = models.CouchDBUser.deserialize(item) try: LOG.debug("Creating user: %s.", user.name) utils.execute_with_timeout( system.CREATE_USER_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'username': user.name, 'username': user.name, 'password': user.password}, shell=True) except exception.ProcessExecutionError: LOG.exception("Error creating user: %s.", user.name) for database in user.databases: mydb = models.CouchDBSchema.deserialize(database) try: LOG.debug("Granting user: %(user)s access to " "database: %(db)s.", {'user': user.name, 'db': mydb.name}) out, err = utils.execute_with_timeout( system.GRANT_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': mydb.name, 'username': user.name}, shell=True) except exception.ProcessExecutionError as pe: LOG.debug("Error granting user: %(user)s access to " "database: %(db)s.", {'user': user.name, 'db': mydb.name}) LOG.debug(pe) except exception.ProcessExecutionError as pe: LOG.exception("An error occurred creating users: %s.", str(pe)) def delete_user(self, user): LOG.debug("Delete a given CouchDB user.") couchdb_user = models.CouchDBUser.deserialize(user) db_names = self.list_database_names() for db in db_names: userlist = [] try: out, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db}, shell=True) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get the users for database: %s.", db) continue evalout = ast.literal_eval(out) if evalout: members = evalout['members'] names = members['names'] for i in range(0, len(names)): couchdb_user.databases = db userlist.append(names[i]) if couchdb_user.name in userlist: userlist.remove(couchdb_user.name) out2, err2 = utils.execute_with_timeout( system.REVOKE_ACCESS_COMMAND % { 'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db, 'username': userlist}, shell=True) try: out2, err = utils.execute_with_timeout( system.DELETE_REV_ID % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) evalout2 = ast.literal_eval(out2) rows = evalout2['rows'] userlist = [] for i in range(0, len(rows)): row = rows[i] username = "org.couchdb.user:" + couchdb_user.name if row['key'] == username: rev = row['value'] revid = rev['rev'] utils.execute_with_timeout( system.DELETE_USER_COMMAND % { 'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'username': couchdb_user.name, 'revid': revid}, shell=True) except exception.ProcessExecutionError as pe: LOG.exception( "There was an error while deleting user: %s.", pe) raise exception.GuestError(original_message=_( "Unable to delete user: %s.") % couchdb_user.name) def list_users(self, limit=None, marker=None, include_marker=False): '''List all users and the databases they have access to.''' users = [] db_names = self.list_database_names() try: out, err = utils.execute_with_timeout( system.ALL_USERS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) except exception.ProcessExecutionError: LOG.debug("Error while trying to get list of all couchdb users") evalout = ast.literal_eval(out) rows = evalout['rows'] userlist = [] for i in range(0, len(rows)): row = rows[i] uname = row['key'] if not self._is_modifiable_user(uname): break elif uname[17:]: userlist.append(uname[17:]) for i in range(len(userlist)): user = models.CouchDBUser(userlist[i]) for db in db_names: try: out2, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db}, shell=True) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get users for database: %s.", db) continue evalout2 = ast.literal_eval(out2) if evalout2: members = evalout2['members'] names = members['names'] for i in range(0, len(names)): if user.name == names[i]: user.databases = db users.append(user.serialize()) next_marker = None return users, next_marker def get_user(self, username, hostname): '''Get Information about the given user.''' LOG.debug('Getting user %s.', username) user = self._get_user(username, hostname) if not user: return None return user.serialize() def _get_user(self, username, hostname): user = models.CouchDBUser(username) db_names = self.list_database_names() for db in db_names: try: out, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db}, shell=True) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get the users for database: %s.", db) continue evalout = ast.literal_eval(out) if evalout: members = evalout['members'] names = members['names'] for i in range(0, len(names)): if user.name == names[i]: user.databases = db return user def grant_access(self, username, databases): if self._get_user(username, None).name != username: raise exception.BadRequest(_( 'Cannot grant access for non-existant user: ' '%(user)s') % {'user': username}) else: user = models.CouchDBUser(username) if not self._is_modifiable_user(user.name): LOG.warning('Cannot grant access for reserved user ' '%(user)s', {'user': username}) if not user: raise exception.BadRequest(_( 'Cannot grant access for reserved or non-existant user ' '%(user)s') % {'user': username}) for db_name in databases: out, err = utils.execute_with_timeout( system.GRANT_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': db_name, 'username': username}, shell=True) def revoke_access(self, username, database): userlist = [] if self._is_modifiable_user(username): out, err = utils.execute_with_timeout( system.DB_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': database}, shell=True) evalout = ast.literal_eval(out) members = evalout['members'] names = members['names'] for i in range(0, len(names)): userlist.append(names[i]) if username in userlist: userlist.remove(username) out2, err2 = utils.execute_with_timeout( system.REVOKE_ACCESS_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': database, 'username': userlist}, shell=True) def list_access(self, username, hostname): '''Returns a list of all databases which the user has access to''' user = self._get_user(username, hostname) return user.databases def enable_root(self, root_pwd=None): '''Create admin user root''' root_user = models.CouchDBUser.root(password=root_pwd) out, err = utils.execute_with_timeout( system.ENABLE_ROOT % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'password': root_pwd}, shell=True) return root_user.serialize() def is_root_enabled(self): '''Check if user root exists''' out, err = utils.execute_with_timeout( system.IS_ROOT_ENABLED % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) evalout = ast.literal_eval(out) if evalout['root']: return True else: return False def create_database(self, databases): '''Create the given database(s).''' dbName = None db_create_failed = [] LOG.debug("Creating CouchDB databases.") for database in databases: dbName = models.CouchDBSchema.deserialize(database).name if self._is_modifiable_database(dbName): LOG.debug('Creating CouchDB database %s', dbName) try: utils.execute_with_timeout( system.CREATE_DB_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': dbName}, shell=True) except exception.ProcessExecutionError: LOG.exception( "There was an error creating database: %s.", dbName) db_create_failed.append(dbName) pass else: LOG.warning('Cannot create database with a reserved name ' '%(db)s', {'db': dbName}) db_create_failed.append(dbName) if len(db_create_failed) > 0: LOG.exception("Creating the following databases failed: %s.", db_create_failed) def list_database_names(self): '''Get the list of database names.''' out, err = utils.execute_with_timeout( system.LIST_DB_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password}, shell=True) dbnames_list = eval(out) for hidden in cfg.get_ignored_dbs(): if hidden in dbnames_list: dbnames_list.remove(hidden) return dbnames_list def list_databases(self, limit=None, marker=None, include_marker=False): '''Lists all the CouchDB databases.''' databases = [] db_names = self.list_database_names() pag_dblist, marker = pagination.paginate_list(db_names, limit, marker, include_marker) databases = [models.CouchDBSchema(db_name).serialize() for db_name in pag_dblist] LOG.debug('databases = ' + str(databases)) return databases, marker def delete_database(self, database): '''Delete the specified database.''' dbName = models.CouchDBSchema.deserialize(database).name if self._is_modifiable_database(dbName): try: LOG.debug("Deleting CouchDB database: %s.", dbName) utils.execute_with_timeout( system.DELETE_DB_COMMAND % {'admin_name': self._admin_user().name, 'admin_password': self._admin_user().password, 'dbname': dbName}, shell=True) except exception.ProcessExecutionError: LOG.exception( "There was an error while deleting database:%s.", dbName) raise exception.GuestError(original_message=_( "Unable to delete database: %s.") % dbName) else: LOG.warning('Cannot delete a reserved database ' '%(db)s', {'db': dbName}) class CouchDBCredentials(object): """Handles storing/retrieving credentials. Stored as json in files""" def __init__(self, username=None, password=None): self.username = username self.password = password def read(self, filename): credentials = operating_system.read_file(filename, codec=JsonCodec()) self.username = credentials['username'] self.password = credentials['password'] def write(self, filename): self.clear_file(filename) credentials = {'username': self.username, 'password': self.password} operating_system.write_file(filename, credentials, codec=JsonCodec()) operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW) @staticmethod def clear_file(filename): LOG.debug("Creating clean file %s", filename) if operating_system.file_discovery([filename]): operating_system.remove(filename) # force file creation by just opening it open(filename, 'wb') operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW, as_root=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/couchdb/system.py0000644000175000017500000000647200000000000027455 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os import path SERVICE_CANDIDATES = ["couchdb"] UPDATE_BIND_ADDRESS = ( "sudo sed -i -r 's/;bind_address = 127.0.0.1/bind_address = 0.0.0.0/' " "/etc/couchdb/local.ini") TIME_OUT = 1200 COUCHDB_HTTPD_PORT = "5984" COUCHDB_SERVER_STATUS = "curl http://127.0.0.1:" + COUCHDB_HTTPD_PORT COUCHDB_ADMIN_NAME = 'os_admin' COUCHDB_CREATE_ADMIN = ( "curl -X PUT http://127.0.0.1:" + COUCHDB_HTTPD_PORT + "/_config/admins/" + COUCHDB_ADMIN_NAME + " -d '\"%(password)s\"'") COUCHDB_ADMIN_CREDS_FILE = path.join(path.expanduser('~'), '.os_couchdb_admin_creds.json') CREATE_USER_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/org.couchdb.user:%(username)s -H \"Accept:" " application/json\" -H \"Content-Type: application/json\" -d \'{\"name\"" ": \"%(username)s\", \"password\": \"%(password)s\", \"roles\": []," " \"type\":\"user\"}\'") DELETE_REV_ID = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/_all_docs") DELETE_USER_COMMAND = ( "curl -X DELETE http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/org.couchdb.user:%(username)s?rev=" "%(revid)s") ALL_USERS_COMMAND = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_users/_all_docs") DB_ACCESS_COMMAND = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s/_security") GRANT_ACCESS_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s/_security -d \'{\"admins\":{\"names\"" ":[], \"roles\":[]}, \"members\":{\"" + "names\":[\"%(username)s\"],\"" "roles\":[]}}\'") REVOKE_ACCESS_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s/_security" + " -d \'{\"admins\":{\"" "names\":[], \"roles\":[]}, \"members\":{\"" + "names\":%(username)s,\"" "roles\":[]}}\'") ENABLE_ROOT = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:5984" "/_config/admins/root -d '\"%(password)s\"'") IS_ROOT_ENABLED = ( "curl -s http://%(admin_name)s:%(admin_password)s@localhost:5984/_config/" "admins") CREATE_DB_COMMAND = ( "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s") LIST_DB_COMMAND = ( "curl -X GET http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/_all_dbs") DELETE_DB_COMMAND = ( "curl -X DELETE http://%(admin_name)s:%(admin_password)s@localhost:" + COUCHDB_HTTPD_PORT + "/%(dbname)s") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/db2/0000755000175000017500000000000000000000000024606 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/db2/__init__.py0000644000175000017500000000000000000000000026705 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/db2/manager.py0000644000175000017500000001333100000000000026573 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common import instance as ds_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.datastore.experimental.db2 import service from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is DB2 Manager class. It is dynamically loaded based off of the datastore of the Trove instance. """ def __init__(self): self.appStatus = service.DB2AppStatus() self.app = service.DB2App(self.appStatus) self.admin = service.DB2Admin() super(Manager, self).__init__('db2') @property def status(self): return self.appStatus @property def configuration_manager(self): return self.app.configuration_manager def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) device.unmount_device(device_path) device.format() if os.path.exists(mount_point): device.migrate_data(mount_point) device.mount(mount_point) LOG.debug("Mounted the volume.") self.app.update_hostname() self.app.change_ownership(mount_point) self.app.start_db() if backup_info: self._perform_restore(backup_info, context, mount_point) if config_contents: self.app.configuration_manager.save_configuration( config_contents) def restart(self, context): """ Restart this DB2 instance. This method is called when the guest agent gets a restart message from the taskmanager. """ LOG.debug("Restart a DB2 server instance.") self.app.restart() def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this DB2 instance. This method is called when the guest agent gets a stop message from the taskmanager. """ LOG.debug("Stop a given DB2 server instance.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def create_database(self, context, databases): LOG.debug("Creating database(s) %s.", databases) with EndNotification(context): self.admin.create_database(databases) def delete_database(self, context, database): LOG.debug("Deleting database %s.", database) with EndNotification(context): return self.admin.delete_database(database) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing all databases.") return self.admin.list_databases(limit, marker, include_marker) def create_user(self, context, users): LOG.debug("Create user(s).") with EndNotification(context): self.admin.create_user(users) def delete_user(self, context, user): LOG.debug("Delete a user %s.", user) with EndNotification(context): self.admin.delete_user(user) def get_user(self, context, username, hostname): LOG.debug("Show details of user %s.", username) return self.admin.get_user(username, hostname) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("List all users.") return self.admin.list_users(limit, marker, include_marker) def list_access(self, context, username, hostname): LOG.debug("List all the databases the user has access to.") return self.admin.list_access(username, hostname) def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting DB2 with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def _perform_restore(self, backup_info, context, restore_location): LOG.info("Restoring database from backup %s.", backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception("Error performing restore from backup %s.", backup_info['id']) self.status.set_status(ds_instance.ServiceStatuses.FAILED) raise LOG.info("Restored database successfully.") def create_backup(self, context, backup_info): LOG.debug("Creating backup.") backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self.app.remove_overrides() else: self.app.update_overrides(context, overrides) def apply_overrides(self, context, overrides): if overrides: LOG.debug("Applying overrides: %s", str(overrides)) self.app.apply_overrides(overrides) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/db2/service.py0000644000175000017500000006360200000000000026627 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from oslo_utils import encodeutils from trove.common import cfg from trove.common.db import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.stream_codecs import PropertiesCodec from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.db2 import system from trove.guestagent.datastore import service CONF = cfg.CONF LOG = logging.getLogger(__name__) MOUNT_POINT = CONF.db2.mount_point FAKE_CFG = os.path.join(MOUNT_POINT, "db2.cfg.fake") DB2_DEFAULT_CFG = os.path.join(MOUNT_POINT, "db2_default_dbm.cfg") class DB2App(object): """ Handles installation and configuration of DB2 on a Trove instance. """ def __init__(self, status, state_change_wait_time=None): LOG.debug("Initialize DB2App.") self.state_change_wait_time = ( state_change_wait_time if state_change_wait_time else CONF.state_change_wait_time ) LOG.debug("state_change_wait_time = %s.", self.state_change_wait_time) self.status = status self.dbm_default_config = {} self.init_config() ''' If DB2 guest agent has been configured for online backups, every database that is created will be configured for online backups. Since online backups are done using archive logging, we need to create a directory to store the archived logs. ''' if CONF.db2.backup_strategy == 'DB2OnlineBackup': create_db2_dir(system.DB2_ARCHIVE_LOGS_DIR) def init_config(self): if not operating_system.exists(MOUNT_POINT, True): operating_system.create_directory(MOUNT_POINT, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) """ The database manager configuration file - db2systm is stored under the /home/db2inst1/sqllib directory. To update the configuration parameters, DB2 recommends using the command - UPDATE DBM CONFIGURATION commands instead of directly updating the config file. The existing PropertiesCodec implementation has been reused to handle text-file operations. Configuration overrides are implemented using the ImportOverrideStrategy of the guestagent configuration manager. """ LOG.debug("Initialize DB2 configuration") revision_dir = ( guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.DB2_INSTANCE_OWNER)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) ) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) self.configuration_manager = ( ConfigurationManager(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf")) ) ''' Below we are getting the database manager default configuration and saving it to the DB2_DEFAULT_CFG file. This is done to help with correctly resetting the configurations to the original values when user wants to detach a user-defined configuration group from an instance. DB2 provides a command to reset the database manager configuration parameters (RESET DBM CONFIGURATION) but this command resets all the configuration parameters to the system defaults. When we build a DB2 guest image there are certain configurations parameters like SVCENAME which we set so that the instance can start correctly. Hence resetting this value to the system default will render the instance in an unstable state. Instead, the recommended way for resetting a subset of configuration parameters is to save the output of GET DBM CONFIGURATION of the original configuration and then call UPDATE DBM CONFIGURATION to reset the value. http://www.ibm.com/support/knowledgecenter/SSEPGG_10.5.0/ com.ibm.db2.luw.admin.cmd.doc/doc/r0001970.html ''' if not operating_system.exists(DB2_DEFAULT_CFG): run_command(system.GET_DBM_CONFIGURATION % { "dbm_config": DB2_DEFAULT_CFG}) self.process_default_dbm_config() def process_default_dbm_config(self): """ Once the default database manager configuration is saved to DB2_DEFAULT_CFG, we try to store the configuration parameters and values into a dictionary object, dbm_default_config. For example, a sample content of the database manager configuration file looks like this: Buffer pool (DFT_MON_BUFPOOL) = OFF We need to process this so that we key it on the configuration parameter DFT_MON_BUFPOOL. """ with open(DB2_DEFAULT_CFG) as cfg_file: for line in cfg_file: if '=' in line: item = line.rstrip('\n').split(' = ') fIndex = item[0].rfind('(') lIndex = item[0].rfind(')') if fIndex > -1: param = item[0][fIndex + 1: lIndex] value = item[1] ''' Some of the configuration parameters have the keyword AUTOMATIC to indicate that DB2 will automatically adjust the setting depending on system resources. For some configuration parameters, DB2 also allows setting a starting value along with the AUTOMATIC setting. In the configuration parameter listing, this is displayed as: MON_HEAP_SZ = AUTOMATIC(90) This can be set using the following command: db2 update dbm cfg using mon_heap_sz 90 automatic ''' if not value: value = 'NULL' elif 'AUTOMATIC' in value: fIndex = item[1].rfind('(') lIndex = item[1].rfind(')') if fIndex > -1: default_value = item[1][fIndex + 1: lIndex] value = default_value + " AUTOMATIC" self.dbm_default_config.update({param: value}) def update_hostname(self): """ When DB2 server is installed, it uses the hostname of the instance were the image was built. This needs to be updated to reflect the guest instance. """ LOG.debug("Update the hostname of the DB2 instance.") try: run_command(system.UPDATE_HOSTNAME, superuser='root') except exception.ProcessExecutionError: raise RuntimeError(_("Command to update the hostname failed.")) def change_ownership(self, mount_point): """ When DB2 server instance is installed, it does not have the DB2 local database directory created (/home/db2inst1/db2inst1). This gets created when we mount the cinder volume. So we need to change ownership of this directory to the DB2 instance user - db2inst1. """ LOG.debug("Changing ownership of the DB2 data directory.") try: operating_system.chown(mount_point, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, recursive=False, as_root=True) except exception.ProcessExecutionError: raise RuntimeError(_( "Command to change ownership of DB2 data directory failed.")) def _enable_db_on_boot(self): LOG.debug("Enable DB on boot.") try: run_command(system.ENABLE_AUTOSTART) except exception.ProcessExecutionError: raise RuntimeError(_( "Command to enable DB2 server on boot failed.")) def _disable_db_on_boot(self): LOG.debug("Disable DB2 on boot.") try: run_command(system.DISABLE_AUTOSTART) except exception.ProcessExecutionError: raise RuntimeError(_( "Command to disable DB2 server on boot failed.")) def start_db_with_conf_changes(self, config_contents): LOG.info("Starting DB2 with configuration changes.") self.configuration_manager.save_configuration(config_contents) self.start_db(True) def start_db(self, update_db=False): LOG.debug("Start the DB2 server instance.") self._enable_db_on_boot() try: run_command(system.START_DB2) except exception.ProcessExecutionError: pass if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.RUNNING, self.state_change_wait_time, update_db): LOG.error("Start of DB2 server instance failed.") self.status.end_restart() raise RuntimeError(_("Could not start DB2.")) def stop_db(self, update_db=False, do_not_start_on_reboot=False): LOG.debug("Stop the DB2 server instance.") if do_not_start_on_reboot: self._disable_db_on_boot() try: run_command(system.STOP_DB2) except exception.ProcessExecutionError: pass if not (self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db)): LOG.error("Could not stop DB2.") self.status.end_restart() raise RuntimeError(_("Could not stop DB2.")) def restart(self): LOG.debug("Restarting DB2 server instance.") try: self.status.begin_restart() self.stop_db() self.start_db() finally: self.status.end_restart() def update_overrides(self, context, overrides, remove=False): if overrides: self.apply_overrides(overrides) def remove_overrides(self): config = self.configuration_manager.get_user_override() self._reset_config(config) self.configuration_manager.remove_user_override() def apply_overrides(self, overrides): self._apply_config(overrides) self.configuration_manager.apply_user_override(overrides) def _update_dbm_config(self, param, value): try: run_command( system.UPDATE_DBM_CONFIGURATION % { "parameter": param, "value": value}) except exception.ProcessExecutionError: LOG.exception("Failed to update config %s", param) raise def _reset_config(self, config): try: for k, v in config.items(): default_cfg_value = self.dbm_default_config[k] self._update_dbm_config(k, default_cfg_value) except Exception: LOG.exception("DB2 configuration reset failed.") raise RuntimeError(_("DB2 configuration reset failed.")) LOG.info("DB2 configuration reset completed.") def _apply_config(self, config): try: for k, v in config.items(): self._update_dbm_config(k, v) except Exception: LOG.exception("DB2 configuration apply failed") raise RuntimeError(_("DB2 configuration apply failed")) LOG.info("DB2 config apply completed.") class DB2AppStatus(service.BaseDbStatus): """ Handles all of the status updating for the DB2 guest agent. """ def _get_actual_db_status(self): LOG.debug("Getting the status of the DB2 server instance.") try: out, err = utils.execute_with_timeout( system.DB2_STATUS, shell=True) if "0" not in out: return rd_instance.ServiceStatuses.RUNNING else: return rd_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError: LOG.exception("Error getting the DB2 server status.") return rd_instance.ServiceStatuses.CRASHED def run_command(command, superuser=system.DB2_INSTANCE_OWNER, timeout=system.TIMEOUT): return utils.execute_with_timeout("sudo", "su", "-", superuser, "-c", command, timeout=timeout) def create_db2_dir(dir_name): if not operating_system.exists(dir_name, True): operating_system.create_directory(dir_name, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) def remove_db2_dir(dir_name): operating_system.remove(dir_name, force=True, as_root=True) class DB2Admin(object): """ Handles administrative tasks on the DB2 instance. """ def create_database(self, databases): """Create the given database(s).""" dbName = None db_create_failed = [] LOG.debug("Creating DB2 databases.") for item in databases: mydb = models.DatastoreSchema.deserialize(item) mydb.check_create() dbName = mydb.name LOG.debug("Creating DB2 database: %s.", dbName) try: run_command(system.CREATE_DB_COMMAND % {'dbname': dbName}) except exception.ProcessExecutionError: LOG.exception( "There was an error creating database: %s.", dbName) db_create_failed.append(dbName) ''' Configure each database to do archive logging for online backups. Once the database is configured, it will go in to a BACKUP PENDING state. In this state, the database will not be accessible for any operations. To get the database back to normal mode, we have to do a full offline backup as soon as we configure it for archive logging. ''' try: if CONF.db2.backup_strategy == 'DB2OnlineBackup': run_command(system.UPDATE_DB_LOG_CONFIGURATION % { 'dbname': dbName}) run_command(system.RECOVER_FROM_BACKUP_PENDING_MODE % { 'dbname': dbName}) except exception.ProcessExecutionError: LOG.exception( "There was an error while configuring the database for " "online backup: %s.", dbName) if len(db_create_failed) > 0: LOG.exception("Creating the following databases failed: %s.", db_create_failed) def delete_database(self, database): """Delete the specified database.""" dbName = None try: mydb = models.DatastoreSchema.deserialize(database) mydb.check_delete() dbName = mydb.name LOG.debug("Deleting DB2 database: %s.", dbName) run_command(system.DELETE_DB_COMMAND % {'dbname': dbName}) except exception.ProcessExecutionError: LOG.exception( "There was an error while deleting database:%s.", dbName) raise exception.GuestError(original_message=_( "Unable to delete database: %s.") % dbName) def list_databases(self, limit=None, marker=None, include_marker=False): LOG.debug("Listing all the DB2 databases.") databases = [] next_marker = None try: out, err = run_command(system.LIST_DB_COMMAND) dblist = out.split() result = iter(dblist) count = 0 if marker is not None: try: item = next(result) while item != marker: item = next(result) if item == marker: marker = None except StopIteration: pass try: item = next(result) while item: count = count + 1 if (limit and count <= limit) or limit is None: db2_db = models.DatastoreSchema(name=item) LOG.debug("database = %s .", item) next_marker = db2_db.name databases.append(db2_db.serialize()) item = next(result) else: next_marker = None break except StopIteration: next_marker = None LOG.debug("databases = %s.", str(databases)) except exception.ProcessExecutionError as pe: err_msg = encodeutils.exception_to_unicode(pe) LOG.exception("An error occurred listing databases: %s.", err_msg) return databases, next_marker def create_user(self, users): LOG.debug("Creating user(s) for accessing DB2 database(s).") try: for item in users: user = models.DatastoreUser.deserialize(item) user.check_create() try: LOG.debug("Creating OS user: %s.", user.name) utils.execute_with_timeout( system.CREATE_USER_COMMAND % { 'login': user.name, 'login': user.name, 'passwd': user.password}, shell=True) except exception.ProcessExecutionError: LOG.exception("Error creating user: %s.", user.name) continue for database in user.databases: mydb = models.DatastoreSchema.deserialize(database) try: LOG.debug("Granting user: %(user)s access to " "database: %(db)s.", {'user': user.name, 'db': mydb.name}) run_command(system.GRANT_USER_ACCESS % { 'dbname': mydb.name, 'login': user.name}) except exception.ProcessExecutionError as pe: LOG.debug("Error granting user: %(user)s access to " "database: %(db)s.", {'user': user.name, 'db': mydb.name}) LOG.debug(pe) except exception.ProcessExecutionError as pe: LOG.exception("An error occurred creating users: %s.", str(pe)) def delete_user(self, user): LOG.debug("Delete a given user.") db2_user = models.DatastoreUser.deserialize(user) db2_user.check_delete() userName = db2_user.name user_dbs = db2_user.databases LOG.debug("For user %(user)s, databases to be deleted = %(dbs)r.", {'user': userName, 'dbs': user_dbs}) if len(user_dbs) == 0: databases = self.list_access(db2_user.name, None) else: databases = user_dbs LOG.debug("databases for user = %r.", databases) for database in databases: mydb = models.DatastoreSchema.deserialize(database) try: run_command(system.REVOKE_USER_ACCESS % { 'dbname': mydb.name, 'login': userName}) LOG.debug("Revoked access for user:%(user)s on " "database:%(db)s.", {'user': userName, 'db': mydb.name}) except exception.ProcessExecutionError: LOG.debug("Error occurred while revoking access to %s.", mydb.name) try: utils.execute_with_timeout(system.DELETE_USER_COMMAND % { 'login': db2_user.name.lower()}, shell=True) except exception.ProcessExecutionError as pe: LOG.exception( "There was an error while deleting user: %s.", pe) raise exception.GuestError(original_message=_( "Unable to delete user: %s.") % userName) def list_users(self, limit=None, marker=None, include_marker=False): LOG.debug( "List all users for all the databases in a DB2 server instance.") users = [] user_map = {} next_marker = None count = 0 databases, marker = self.list_databases() for database in databases: db2_db = models.DatastoreSchema.deserialize(database) out = None try: out, err = run_command( system.LIST_DB_USERS % {'dbname': db2_db.name}) except exception.ProcessExecutionError: LOG.debug( "There was an error while listing users for database: %s.", db2_db.name) continue userlist = [] for item in out.split('\n'): LOG.debug("item = %r", item) user = item.split() if item != "" else None LOG.debug("user = %r", user) if (user is not None and (user[0] not in cfg.get_ignored_users() and user[1] == 'Y')): userlist.append(user[0]) result = iter(userlist) if marker is not None: try: item = next(result) while item != marker: item = next(result) if item == marker: marker = None except StopIteration: pass try: item = next(result) while item: ''' Check if the user has already been discovered. If so, add this database to the database list for this user. ''' if item in user_map: db2user = user_map.get(item) db2user.databases = db2_db.name item = next(result) continue ''' If this user was not previously discovered, then add this to the user's list. ''' count = count + 1 if (limit and count <= limit) or limit is None: db2_user = models.DatastoreUser(name=item, databases=db2_db.name) users.append(db2_user.serialize()) user_map.update({item: db2_user}) item = next(result) else: next_marker = None break except StopIteration: next_marker = None if count == limit: break return users, next_marker def get_user(self, username, hostname): LOG.debug("Get details of a given database user.") user = self._get_user(username, hostname) if not user: return None return user.serialize() def _get_user(self, username, hostname): LOG.debug("Get details of a given database user %s.", username) user = models.DatastoreUser(name=username) databases, marker = self.list_databases() out = None for database in databases: db2_db = models.DatastoreSchema.deserialize(database) try: out, err = run_command( system.LIST_DB_USERS % {'dbname': db2_db.name}) except exception.ProcessExecutionError: LOG.debug( "Error while trying to get the users for database: %s.", db2_db.name) continue for item in out.split('\n'): user_access = item.split() if item != "" else None if (user_access is not None and user_access[0].lower() == username.lower() and user_access[1] == 'Y'): user.databases = db2_db.name break return user def list_access(self, username, hostname): """ Show all the databases to which the user has more than USAGE granted. """ LOG.debug("Listing databases that user: %s has access to.", username) user = self._get_user(username, hostname) return user.databases ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/db2/system.py0000644000175000017500000000745700000000000026521 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg CONF = cfg.CONF TIMEOUT = 1200 DB2_INSTANCE_OWNER = "db2inst1" MOUNT_POINT = CONF.db2.mount_point DB2_BACKUP_DIR = MOUNT_POINT + "/backup" DB2_ARCHIVE_LOGS_DIR = MOUNT_POINT + "/ArchiveLogs" UPDATE_HOSTNAME = ( 'source /home/db2inst1/sqllib/db2profile;' 'db2set -g DB2SYSTEM="$(hostname)"') ENABLE_AUTOSTART = ( "/opt/ibm/db2/current/instance/db2iauto -on " + DB2_INSTANCE_OWNER) DISABLE_AUTOSTART = ( "/opt/ibm/db2/current/instance/db2iauto -off " + DB2_INSTANCE_OWNER) START_DB2 = "db2start" QUIESCE_DB2 = ("db2 QUIESCE INSTANCE DB2INST1 RESTRICTED ACCESS IMMEDIATE " "FORCE CONNECTIONS") UNQUIESCE_DB2 = "db2 UNQUIESCE INSTANCE DB2INST1" STOP_DB2 = "db2 force application all; db2 terminate; db2stop" DB2_STATUS = ("ps -ef | grep " + DB2_INSTANCE_OWNER + " | grep db2sysc |" "grep -v grep | wc -l") CREATE_DB_COMMAND = "db2 create database %(dbname)s" DELETE_DB_COMMAND = "db2 drop database %(dbname)s" LIST_DB_COMMAND = ( "db2 list database directory | grep -B6 -i indirect | " "grep 'Database name' | sed 's/.*= //'") CREATE_USER_COMMAND = ( 'sudo useradd -m -d /home/%(login)s %(login)s;' 'sudo echo %(login)s:%(passwd)s |sudo chpasswd') GRANT_USER_ACCESS = ( "db2 connect to %(dbname)s; " "db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " "ON DATABASE TO USER %(login)s; db2 connect reset") DELETE_USER_COMMAND = 'sudo userdel -r %(login)s' REVOKE_USER_ACCESS = ( "db2 connect to %(dbname)s; " "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " "ON DATABASE FROM USER %(login)s; db2 connect reset") LIST_DB_USERS = ( "db2 +o connect to %(dbname)s; " "db2 -x select grantee, dataaccessauth from sysibm.sysdbauth; " "db2 connect reset") OFFLINE_BACKUP_DB = "db2 backup database %(dbname)s to " + DB2_BACKUP_DIR RESTORE_OFFLINE_DB = ( "db2 restore database %(dbname)s from " + DB2_BACKUP_DIR) GET_DB_SIZE = ( "db2 +o connect to %(dbname)s;" r"db2 call get_dbsize_info\(?, ?, ?, -1\) | " "grep -A1 'DATABASESIZE' | grep 'Parameter Value' | sed 's/.*[:]//' |" " tr -d '\n'; db2 +o connect reset") GET_DB_NAMES = ("find /home/db2inst1/db2inst1/backup/ -type f -name '*.001' |" " grep -Po \"(?<=backup/)[^.']*(?=\\.)\"") GET_DBM_CONFIGURATION = "db2 get dbm configuration > %(dbm_config)s" UPDATE_DBM_CONFIGURATION = ("db2 update database manager configuration using " "%(parameter)s %(value)s") UPDATE_DB_LOG_CONFIGURATION = ( "db2 update database configuration for " "%(dbname)s using LOGARCHMETH1 'DISK:" + DB2_ARCHIVE_LOGS_DIR + "'") LOG_UTILIZATION = ( "db2 +o connect to %(dbname)s;" "db2 -x SELECT TOTAL_LOG_USED_KB FROM SYSIBMADM.LOG_UTILIZATION | " "tr -d '\n';db2 +o connect reset") ONLINE_BACKUP_DB = ( "db2 backup database %(dbname)s ONLINE to " + DB2_BACKUP_DIR + " INCLUDE LOGS") RESTORE_ONLINE_DB = ( "db2 RESTORE DATABASE %(dbname)s FROM " + DB2_BACKUP_DIR + " LOGTARGET " + DB2_ARCHIVE_LOGS_DIR) ROLL_FORWARD_DB = ( "db2 ROLLFORWARD DATABASE %(dbname)s TO END OF BACKUP " "AND COMPLETE OVERFLOW LOG PATH '(" + DB2_ARCHIVE_LOGS_DIR + ")'") RECOVER_FROM_BACKUP_PENDING_MODE = ( "db2 backup database %(dbname)s to /dev/null") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mariadb/0000755000175000017500000000000000000000000025536 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mariadb/__init__.py0000644000175000017500000000000000000000000027635 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mariadb/manager.py0000644000175000017500000000213700000000000027525 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.guestagent.datastore.experimental.mariadb import ( service as mariadb_service) from trove.guestagent.datastore.galera_common import manager as galera_manager from trove.guestagent.datastore.mysql_common import service as mysql_service class Manager(galera_manager.GaleraManager): def __init__(self): super(Manager, self).__init__( mariadb_service.MariaDBApp, mysql_service.BaseMySqlAppStatus, mariadb_service.MariaDBAdmin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mariadb/service.py0000644000175000017500000000763700000000000027565 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.guestagent.common import operating_system from trove.guestagent.datastore.galera_common import service as galera_service from trove.guestagent.datastore.mysql_common import service as mysql_service LOG = logging.getLogger(__name__) class MariaDBApp(galera_service.GaleraApp): OS = operating_system.get_os() def __init__(self, status): super(MariaDBApp, self).__init__( status, mysql_service.BaseLocalSqlClient, mysql_service.BaseKeepAliveConnection) @property def service_candidates(self): service_candidates = super(MariaDBApp, self).service_candidates return { operating_system.DEBIAN: ["mariadb"] + service_candidates, operating_system.REDHAT: ["mariadb"], operating_system.SUSE: service_candidates }[self.OS] @property def mysql_service(self): result = super(MariaDBApp, self).mysql_service if result['type'] == 'sysvinit': result['cmd_bootstrap_galera_cluster'] = ( "sudo service %s bootstrap" % result['service']) elif result['type'] == 'systemd': if operating_system.find_executable('galera_new_cluster'): result['cmd_bootstrap_galera_cluster'] = ( "sudo galera_new_cluster") else: result['cmd_bootstrap_galera_cluster'] = ( "sudo systemctl start %s@bootstrap.service" % result['service']) return result @property def cluster_configuration(self): return self.configuration_manager.get_value('galera') def _get_slave_status(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SHOW SLAVE STATUS').first() def _get_master_UUID(self): slave_status = self._get_slave_status() return slave_status and slave_status['Master_Server_Id'] or None def _get_gtid_executed(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SELECT @@global.gtid_binlog_pos').first()[0] def get_last_txn(self): master_UUID = self._get_master_UUID() last_txn_id = '0' gtid_executed = self._get_gtid_executed() for gtid_set in gtid_executed.split(','): uuid_set = gtid_set.split('-') if uuid_set[1] == master_UUID: last_txn_id = uuid_set[-1] break return master_UUID, int(last_txn_id) def get_latest_txn_id(self): LOG.info("Retrieving latest txn id.") return self._get_gtid_executed() def wait_for_txn(self, txn): LOG.info("Waiting on txn '%s'.", txn) with self.local_sql_client(self.get_engine()) as client: client.execute("SELECT MASTER_GTID_WAIT('%s')" % txn) class MariaDBRootAccess(mysql_service.BaseMySqlRootAccess): def __init__(self): super(MariaDBRootAccess, self).__init__( mysql_service.BaseLocalSqlClient, MariaDBApp(mysql_service.BaseMySqlAppStatus.get())) class MariaDBAdmin(mysql_service.BaseMySqlAdmin): def __init__(self): super(MariaDBAdmin, self).__init__( mysql_service.BaseLocalSqlClient, MariaDBRootAccess(), MariaDBApp) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7601106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mongodb/0000755000175000017500000000000000000000000025564 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mongodb/__init__.py0000644000175000017500000000000000000000000027663 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mongodb/manager.py0000644000175000017500000002454300000000000027560 0ustar00coreycorey00000000000000# Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common import instance as ds_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import service from trove.guestagent.datastore.experimental.mongodb import system from trove.guestagent.datastore import manager from trove.guestagent import dbaas from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): def __init__(self): self.app = service.MongoDBApp() super(Manager, self).__init__('mongodb') @property def status(self): return self.app.status @property def configuration_manager(self): return self.app.configuration_manager def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) self.status.wait_for_database_service_start( self.app.state_change_wait_time) self.app.stop_db() self.app.clear_storage() mount_point = system.MONGODB_MOUNT_POINT if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(system.MONGODB_MOUNT_POINT): device.migrate_data(mount_point) device.mount(mount_point) operating_system.chown(mount_point, system.MONGO_USER, system.MONGO_USER, as_root=True) LOG.debug("Mounted the volume %(path)s as %(mount)s.", {'path': device_path, "mount": mount_point}) if config_contents: # Save resolved configuration template first. self.app.configuration_manager.save_configuration(config_contents) # Apply guestagent specific configuration changes. self.app.apply_initial_guestagent_configuration( cluster_config, mount_point) if not cluster_config: # Create the Trove admin user. self.app.secure() # Don't start mongos until add_config_servers is invoked, # don't start members as they should already be running. if not (self.app.is_query_router or self.app.is_cluster_member): self.app.start_db(update_db=True) if not cluster_config and backup_info: self._perform_restore(backup_info, context, mount_point, self.app) if service.MongoDBAdmin().is_root_enabled(): self.app.status.report_root(context) def restart(self, context): LOG.debug("Restarting MongoDB.") self.app.restart() def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting MongoDB with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): LOG.debug("Stopping MongoDB.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("Getting file system status.") # TODO(peterstac) - why is this hard-coded? return dbaas.get_filesystem_volume_stats(system.MONGODB_MOUNT_POINT) def change_passwords(self, context, users): LOG.debug("Changing password.") with EndNotification(context): return service.MongoDBAdmin().change_passwords(users) def update_attributes(self, context, username, hostname, user_attrs): LOG.debug("Updating database attributes.") with EndNotification(context): return service.MongoDBAdmin().update_attributes(username, user_attrs) def create_database(self, context, databases): LOG.debug("Creating database(s).") with EndNotification(context): return service.MongoDBAdmin().create_database(databases) def create_user(self, context, users): LOG.debug("Creating user(s).") with EndNotification(context): return service.MongoDBAdmin().create_users(users) def delete_database(self, context, database): LOG.debug("Deleting database.") with EndNotification(context): return service.MongoDBAdmin().delete_database(database) def delete_user(self, context, user): LOG.debug("Deleting user.") with EndNotification(context): return service.MongoDBAdmin().delete_user(user) def get_user(self, context, username, hostname): LOG.debug("Getting user.") return service.MongoDBAdmin().get_user(username) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting access.") return service.MongoDBAdmin().grant_access(username, databases) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking access.") return service.MongoDBAdmin().revoke_access(username, database) def list_access(self, context, username, hostname): LOG.debug("Listing access.") return service.MongoDBAdmin().list_access(username) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing databases.") return service.MongoDBAdmin().list_databases(limit, marker, include_marker) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing users.") return service.MongoDBAdmin().list_users(limit, marker, include_marker) def enable_root(self, context): LOG.debug("Enabling root.") return service.MongoDBAdmin().enable_root() def enable_root_with_password(self, context, root_password=None): return service.MongoDBAdmin().enable_root(root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return service.MongoDBAdmin().is_root_enabled() def _perform_restore(self, backup_info, context, restore_location, app): LOG.info("Restoring database from backup %s.", backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception("Error performing restore from backup %s.", backup_info['id']) self.status.set_status(ds_instance.ServiceStatuses.FAILED) raise LOG.info("Restored database successfully.") def create_backup(self, context, backup_info): LOG.debug("Creating backup.") with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self.app.remove_overrides() else: self.app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): LOG.debug("Overrides will be applied after restart.") pass def add_members(self, context, members): try: LOG.debug("add_members called.") LOG.debug("args: members=%s.", members) self.app.add_members(members) LOG.debug("add_members call has finished.") except Exception: self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) raise def add_config_servers(self, context, config_servers): try: LOG.debug("add_config_servers called.") LOG.debug("args: config_servers=%s.", config_servers) self.app.add_config_servers(config_servers) LOG.debug("add_config_servers call has finished.") except Exception: self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) raise def add_shard(self, context, replica_set_name, replica_set_member): try: LOG.debug("add_shard called.") LOG.debug("args: replica_set_name=%(name)s, " "replica_set_member=%(member)s.", {'name': replica_set_name, 'member': replica_set_member}) self.app.add_shard(replica_set_name, replica_set_member) LOG.debug("add_shard call has finished.") except Exception: self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) raise def get_key(self, context): # Return the cluster key LOG.debug("Getting the cluster key.") return self.app.get_key() def prep_primary(self, context): LOG.debug("Preparing to be primary member.") self.app.prep_primary() def create_admin_user(self, context, password): self.app.create_admin_user(password) def store_admin_password(self, context, password): self.app.store_admin_password(password) def get_replica_set_name(self, context): # Return this nodes replica set name LOG.debug("Getting the replica set name.") return self.app.replica_set_name def get_admin_password(self, context): # Return the admin password from this instance LOG.debug("Getting the admin password.") return self.app.admin_password def is_shard_active(self, context, replica_set_name): return self.app.is_shard_active(replica_set_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mongodb/service.py0000644000175000017500000010352200000000000027601 0ustar00coreycorey00000000000000# Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from oslo_utils import netutils import pymongo from trove.common import cfg from trove.common.db.mongodb import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance as ds_instance from trove.common.stream_codecs import JsonCodec, SafeYamlCodec from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import system from trove.guestagent.datastore import service LOG = logging.getLogger(__name__) CONF = cfg.CONF CONFIG_FILE = operating_system.file_discovery(system.CONFIG_CANDIDATES) MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mongodb' # Configuration group for clustering-related settings. CNF_CLUSTER = 'clustering' MONGODB_PORT = CONF.mongodb.mongodb_port CONFIGSVR_PORT = CONF.mongodb.configsvr_port class MongoDBApp(object): """Prepares DBaaS on a Guest container.""" def __init__(self): self.state_change_wait_time = CONF.state_change_wait_time revision_dir = guestagent_utils.build_file_path( os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, SafeYamlCodec(default_flow_style=False), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.is_query_router = False self.is_cluster_member = False self.status = MongoDBAppStatus() def install_if_needed(self, packages): """Prepare the guest machine with a MongoDB installation.""" LOG.info("Preparing Guest as MongoDB.") if not system.PACKAGER.pkg_is_installed(packages): LOG.debug("Installing packages: %s.", str(packages)) system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) LOG.info("Finished installing MongoDB server.") def _get_service_candidates(self): if self.is_query_router: return system.MONGOS_SERVICE_CANDIDATES return system.MONGOD_SERVICE_CANDIDATES def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( self._get_service_candidates(), self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( self._get_service_candidates(), self.state_change_wait_time) def start_db(self, update_db=False): self.status.start_db_service( self._get_service_candidates(), self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) def remove_overrides(self): self.configuration_manager.remove_user_override() def start_db_with_conf_changes(self, config_contents): LOG.info('Starting MongoDB with configuration changes.') if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info("Initiating config.") self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration( None, mount_point=system.MONGODB_MOUNT_POINT) self.start_db(True) def apply_initial_guestagent_configuration( self, cluster_config, mount_point=None): LOG.debug("Applying initial configuration.") # Mongodb init scripts assume the PID-file path is writable by the # database service. # See: https://jira.mongodb.org/browse/SERVER-20075 self._initialize_writable_run_dir() self.configuration_manager.apply_system_override( {'processManagement.fork': False, 'processManagement.pidFilePath': system.MONGO_PID_FILE, 'systemLog.destination': 'file', 'systemLog.path': system.MONGO_LOG_FILE, 'systemLog.logAppend': True }) if mount_point: self.configuration_manager.apply_system_override( {'storage.dbPath': mount_point}) if cluster_config is not None: self._configure_as_cluster_instance(cluster_config) else: self._configure_network(MONGODB_PORT) def _initialize_writable_run_dir(self): """Create a writable directory for Mongodb's runtime data (e.g. PID-file). """ mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE) LOG.debug("Initializing a runtime directory: %s", mongodb_run_dir) operating_system.create_directory( mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True) def _configure_as_cluster_instance(self, cluster_config): """Configure this guest as a cluster instance and return its new status. """ if cluster_config['instance_type'] == "query_router": self._configure_as_query_router() elif cluster_config["instance_type"] == "config_server": self._configure_as_config_server() elif cluster_config["instance_type"] == "member": self._configure_as_cluster_member( cluster_config['replica_set_name']) else: LOG.error("Bad cluster configuration; instance type " "given as %s.", cluster_config['instance_type']) return ds_instance.ServiceStatuses.FAILED if 'key' in cluster_config: self._configure_cluster_security(cluster_config['key']) def _configure_as_query_router(self): LOG.info("Configuring instance as a cluster query router.") self.is_query_router = True # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict( self.configuration_manager.parse_configuration()) if 'storage' in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config['storage'] self.configuration_manager.save_configuration( guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'sharding.configDB': ''}, CNF_CLUSTER) def _configure_as_config_server(self): LOG.info("Configuring instance as a cluster config server.") self._configure_network(CONFIGSVR_PORT) self.configuration_manager.apply_system_override( {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER) def _configure_as_cluster_member(self, replica_set_name): LOG.info("Configuring instance as a cluster member.") self.is_cluster_member = True self._configure_network(MONGODB_PORT) # we don't want these thinking they are in a replica set yet # as that would prevent us from creating the admin user, # so start mongo before updating the config. # mongo will be started by the cluster taskmanager self.start_db() self.configuration_manager.apply_system_override( {'replication.replSetName': replica_set_name}, CNF_CLUSTER) def _configure_cluster_security(self, key_value): """Force cluster key-file-based authentication. This will enabled RBAC. """ # Store the cluster member authentication key. self.store_key(key_value) self.configuration_manager.apply_system_override( {'security.clusterAuthMode': 'keyFile', 'security.keyFile': self.get_key_file()}, CNF_CLUSTER) def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) options = {'net.bindIp': bind_interfaces_string} if port is not None: guestagent_utils.update_dict({'net.port': port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port) def clear_storage(self): mount_point = "/var/lib/mongodb/*" LOG.debug("Clearing storage at %s.", mount_point) try: operating_system.remove(mount_point, force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception("Error clearing storage.") def _has_config_db(self): value_string = self.configuration_manager.get_value( 'sharding', {}).get('configDB') return value_string is not None # FIXME(pmalik): This method should really be called 'set_config_servers'. # The current name suggests it adds more config servers, but it # rather replaces the existing ones. def add_config_servers(self, config_server_hosts): """Set config servers on a query router (mongos) instance. """ config_servers_string = ','.join(['%s:%s' % (host, CONFIGSVR_PORT) for host in config_server_hosts]) LOG.info("Setting config servers: %s", config_servers_string) self.configuration_manager.apply_system_override( {'sharding.configDB': config_servers_string}, CNF_CLUSTER) self.start_db(True) def add_shard(self, replica_set_name, replica_set_member): """ This method is used by query router (mongos) instances. """ url = "%(rs)s/%(host)s:%(port)s"\ % {'rs': replica_set_name, 'host': replica_set_member, 'port': MONGODB_PORT} MongoDBAdmin().add_shard(url) def add_members(self, members): """ This method is used by a replica-set member instance. """ def check_initiate_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() if((status["ok"] == 1) and (status["members"][0]["stateStr"] == "PRIMARY") and (status["myState"] == 1)): return True else: return False def check_rs_status(): """ This method is used to verify replica-set status. """ status = MongoDBAdmin().get_repl_status() primary_count = 0 if status["ok"] != 1: return False if len(status["members"]) != (len(members) + 1): return False for rs_member in status["members"]: if rs_member["state"] not in [1, 2, 7]: return False if rs_member["health"] != 1: return False if rs_member["state"] == 1: primary_count += 1 return primary_count == 1 MongoDBAdmin().rs_initiate() # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_initiate_status, sleep_time=30, time_out=CONF.mongodb.add_members_timeout) # add replica-set members MongoDBAdmin().rs_add_members(members) # TODO(ramashri) see if hardcoded values can be removed utils.poll_until(check_rs_status, sleep_time=10, time_out=CONF.mongodb.add_members_timeout) def _set_localhost_auth_bypass(self, enabled): """When active, the localhost exception allows connections from the localhost interface to create the first user on the admin database. The exception applies only when there are no users created in the MongoDB instance. """ self.configuration_manager.apply_system_override( {'setParameter': {'enableLocalhostAuthBypass': enabled}}) def list_all_dbs(self): return MongoDBAdmin().list_database_names() def db_data_size(self, db_name): schema = models.MongoDBSchema(db_name) return MongoDBAdmin().db_stats(schema.serialize())['dataSize'] def admin_cmd_auth_params(self): return MongoDBAdmin().cmd_admin_auth_params def get_key_file(self): return system.MONGO_KEY_FILE def get_key(self): return operating_system.read_file( system.MONGO_KEY_FILE, as_root=True).rstrip() def store_key(self, key): """Store the cluster key.""" LOG.debug('Storing key for MongoDB cluster.') operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True) def store_admin_password(self, password): LOG.debug('Storing admin password.') creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, password=password) creds.write(system.MONGO_ADMIN_CREDS_FILE) return creds def create_admin_user(self, password): """Create the admin user while the localhost exception is active.""" LOG.debug('Creating the admin user.') creds = self.store_admin_password(password) user = models.MongoDBUser(name='admin.%s' % creds.username, password=creds.password) user.roles = system.MONGO_ADMIN_ROLES # the driver engine is already cached, but we need to change it it with MongoDBClient(None, host='localhost', port=MONGODB_PORT) as client: MongoDBAdmin().create_validated_user(user, client=client) # now revert to the normal engine self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT) LOG.debug('Created admin user.') def secure(self): """Create the Trove admin user. The service should not be running at this point. This will enable role-based access control (RBAC) by default. """ if self.status.is_running: raise RuntimeError(_("Cannot secure the instance. " "The service is still running.")) try: self.configuration_manager.apply_system_override( {'security.authorization': 'enabled'}) self._set_localhost_auth_bypass(True) self.start_db(update_db=False) password = utils.generate_random_password() self.create_admin_user(password) LOG.debug("MongoDB secure complete.") finally: self._set_localhost_auth_bypass(False) self.stop_db() def get_configuration_property(self, name, default=None): """Return the value of a MongoDB configuration property. """ return self.configuration_manager.get_value(name, default) def prep_primary(self): # Prepare the primary member of a replica set. password = utils.generate_random_password() self.create_admin_user(password) self.restart() @property def replica_set_name(self): return MongoDBAdmin().get_repl_status()['set'] @property def admin_password(self): creds = MongoDBCredentials() creds.read(system.MONGO_ADMIN_CREDS_FILE) return creds.password def is_shard_active(self, replica_set_name): shards = MongoDBAdmin().list_active_shards() if replica_set_name in [shard['_id'] for shard in shards]: LOG.debug('Replica set %s is active.', replica_set_name) return True else: LOG.debug('Replica set %s is not active.', replica_set_name) return False class MongoDBAppStatus(service.BaseDbStatus): def __init__(self, host='localhost', port=None): super(MongoDBAppStatus, self).__init__() self.set_host(host, port=port) def set_host(self, host, port=None): # This forces refresh of the 'pymongo' engine cached in the # MongoDBClient class. # Authentication is not required to check the server status. MongoDBClient(None, host=host, port=port) def _get_actual_db_status(self): try: with MongoDBClient(None) as client: client.server_info() return ds_instance.ServiceStatuses.RUNNING except (pymongo.errors.ServerSelectionTimeoutError, pymongo.errors.AutoReconnect): return ds_instance.ServiceStatuses.SHUTDOWN except Exception: LOG.exception("Error getting MongoDB status.") return ds_instance.ServiceStatuses.SHUTDOWN def cleanup_stalled_db_services(self): pid, err = utils.execute_with_timeout(system.FIND_PID, shell=True) utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) class MongoDBAdmin(object): """Handles administrative tasks on MongoDB.""" # user is cached by making it a class attribute admin_user = None def _admin_user(self): if not type(self).admin_user: creds = MongoDBCredentials() creds.read(system.MONGO_ADMIN_CREDS_FILE) user = models.MongoDBUser( 'admin.%s' % creds.username, creds.password ) type(self).admin_user = user return type(self).admin_user @property def cmd_admin_auth_params(self): """Returns a list of strings that constitute MongoDB command line authentication parameters. """ user = self._admin_user() return ['--username', user.username, '--password', user.password, '--authenticationDatabase', user.database.name] def _create_user_with_client(self, user, client): """Run the add user command.""" client[user.database.name].add_user( user.username, password=user.password, roles=user.roles ) def create_validated_user(self, user, client=None): """Creates a user on their database. The caller should ensure that this action is valid. :param user: a MongoDBUser object """ LOG.debug('Creating user %(user)s on database %(db)s with roles ' '%(role)s.', {'user': user.username, 'db': user.database.name, 'role': str(user.roles)}) if client: self._create_user_with_client(user, client) else: with MongoDBClient(self._admin_user()) as admin_client: self._create_user_with_client(user, admin_client) def create_users(self, users): """Create the given user(s). :param users: list of serialized user objects """ with MongoDBClient(self._admin_user()) as client: for item in users: user = models.MongoDBUser.deserialize(item) # this could be called to create multiple users at once; # catch exceptions, log the message, and continue try: user.check_create() if self._get_user_record(user.name, client=client): raise ValueError(_('User with name %(user)s already ' 'exists.') % {'user': user.name}) self.create_validated_user(user, client=client) except (ValueError, pymongo.errors.PyMongoError) as e: LOG.error(e) LOG.warning('Skipping creation of user with name ' '%(user)s', {'user': user.name}) def delete_validated_user(self, user): """Deletes a user from their database. The caller should ensure that this action is valid. :param user: a MongoDBUser object """ LOG.debug('Deleting user %(user)s from database %(db)s.', {'user': user.username, 'db': user.database.name}) with MongoDBClient(self._admin_user()) as admin_client: admin_client[user.database.name].remove_user(user.username) def delete_user(self, user): """Delete the given user. :param user: a serialized user object """ user = models.MongoDBUser.deserialize(user) user.check_delete() self.delete_validated_user(user) def _get_user_record(self, name, client=None): """Get the user's record.""" user = models.MongoDBUser(name) if user.is_ignored: LOG.warning('Skipping retrieval of user with reserved ' 'name %(user)s', {'user': user.name}) return None if client: user_info = client.admin.system.users.find_one( {'user': user.username, 'db': user.database.name}) else: with MongoDBClient(self._admin_user()) as admin_client: user_info = admin_client.admin.system.users.find_one( {'user': user.username, 'db': user.database.name}) if not user_info: return None user.roles = user_info['roles'] return user def get_existing_user(self, name): """Check that a user exists.""" user = self._get_user_record(name) if not user: raise ValueError(_('User with name %(user)s does not' 'exist.') % {'user': name}) return user def get_user(self, name): """Get information for the given user.""" LOG.debug('Getting user %s.', name) user = self._get_user_record(name) if not user: return None return user.serialize() def list_users(self, limit=None, marker=None, include_marker=False): """Get a list of all users.""" users = [] with MongoDBClient(self._admin_user()) as admin_client: for user_info in admin_client.admin.system.users.find(): user = models.MongoDBUser(name=user_info['_id']) user.roles = user_info['roles'] if not user.is_ignored: users.append(user) LOG.debug('users = ' + str(users)) return guestagent_utils.serialize_list( users, limit=limit, marker=marker, include_marker=include_marker) def change_passwords(self, users): with MongoDBClient(self._admin_user()) as admin_client: for item in users: user = models.MongoDBUser.deserialize(item) # this could be called to create multiple users at once; # catch exceptions, log the message, and continue try: user.check_create() self.get_existing_user(user.name) self.create_validated_user(user, admin_client) LOG.debug('Changing password for user %(user)s', {'user': user.name}) self._create_user_with_client(user, admin_client) except (ValueError, pymongo.errors.PyMongoError) as e: LOG.error(e) LOG.warning('Skipping password change for user with ' 'name %(user)s', {'user': user.name}) def update_attributes(self, name, user_attrs): """Update user attributes.""" user = self.get_existing_user(name) password = user_attrs.get('password') if password: user.password = password self.change_passwords([user.serialize()]) if user_attrs.get('name'): LOG.warning('Changing user name is not supported.') if user_attrs.get('host'): LOG.warning('Changing user host is not supported.') def enable_root(self, password=None): """Create a user 'root' with role 'root'.""" if not password: LOG.debug('Generating root user password.') password = utils.generate_random_password() root_user = models.MongoDBUser.root(password=password) root_user.roles = {'db': 'admin', 'role': 'root'} root_user.check_create() self.create_validated_user(root_user) return root_user.serialize() def is_root_enabled(self): """Check if user 'admin.root' exists.""" with MongoDBClient(self._admin_user()) as admin_client: return bool(admin_client.admin.system.users.find_one( {'roles.role': 'root'} )) def _update_user_roles(self, user): with MongoDBClient(self._admin_user()) as admin_client: admin_client[user.database.name].add_user( user.username, roles=user.roles ) def grant_access(self, username, databases): """Adds the RW role to the user for each specified database.""" user = self.get_existing_user(username) for db_name in databases: # verify the database name models.MongoDBSchema(db_name) role = {'db': db_name, 'role': 'readWrite'} if role not in user.roles: LOG.debug('Adding role %(role)s to user %(user)s.', {'role': str(role), 'user': username}) user.roles = role else: LOG.debug('User %(user)s already has role %(role)s.', {'user': username, 'role': str(role)}) LOG.debug('Updating user %s.', username) self._update_user_roles(user) def revoke_access(self, username, database): """Removes the RW role from the user for the specified database.""" user = self.get_existing_user(username) # verify the database name models.MongoDBSchema(database) role = {'db': database, 'role': 'readWrite'} LOG.debug('Removing role %(role)s from user %(user)s.', {'role': str(role), 'user': username}) user.revoke_role(role) LOG.debug('Updating user %s.', username) self._update_user_roles(user) def list_access(self, username): """Returns a list of all databases for which the user has the RW role. """ user = self.get_existing_user(username) return user.databases def create_database(self, databases): """Forces creation of databases. For each new database creates a dummy document in a dummy collection, then drops the collection. """ tmp = 'dummy' with MongoDBClient(self._admin_user()) as admin_client: for item in databases: schema = models.MongoDBSchema.deserialize(item) schema.check_create() LOG.debug('Creating MongoDB database %s', schema.name) db = admin_client[schema.name] # FIXME(songjian):can not create database with null content, # so create a collection # db[tmp].insert({'dummy': True}) # db.drop_collection(tmp) db.create_collection(tmp) def delete_database(self, database): """Deletes the database.""" with MongoDBClient(self._admin_user()) as admin_client: schema = models.MongoDBSchema.deserialize(database) schema.check_delete() admin_client.drop_database(schema.name) def list_database_names(self): """Get the list of database names.""" with MongoDBClient(self._admin_user()) as admin_client: return admin_client.database_names() def list_databases(self, limit=None, marker=None, include_marker=False): """Lists the databases.""" databases = [] for db_name in self.list_database_names(): schema = models.MongoDBSchema(name=db_name) if not schema.is_ignored(): databases.append(schema) LOG.debug('databases = ' + str(databases)) return guestagent_utils.serialize_list( databases, limit=limit, marker=marker, include_marker=include_marker) def add_shard(self, url): """Runs the addShard command.""" with MongoDBClient(self._admin_user()) as admin_client: admin_client.admin.command({'addShard': url}) def get_repl_status(self): """Runs the replSetGetStatus command.""" with MongoDBClient(self._admin_user()) as admin_client: status = admin_client.admin.command('replSetGetStatus') LOG.debug('Replica set status: %s', status) return status def rs_initiate(self): """Runs the replSetInitiate command.""" with MongoDBClient(self._admin_user()) as admin_client: return admin_client.admin.command('replSetInitiate') def rs_add_members(self, members): """Adds the given members to the replication set.""" with MongoDBClient(self._admin_user()) as admin_client: # get the current config, add the new members, then save it config = admin_client.admin.command('replSetGetConfig')['config'] config['version'] += 1 next_id = max([m['_id'] for m in config['members']]) + 1 for member in members: config['members'].append({'_id': next_id, 'host': member}) next_id += 1 admin_client.admin.command('replSetReconfig', config) def db_stats(self, database, scale=1): """Gets the stats for the given database.""" with MongoDBClient(self._admin_user()) as admin_client: db_name = models.MongoDBSchema.deserialize(database).name return admin_client[db_name].command('dbStats', scale=scale) def list_active_shards(self): """Get a list of shards active in this cluster.""" with MongoDBClient(self._admin_user()) as admin_client: return [shard for shard in admin_client.config.shards.find()] class MongoDBClient(object): """A wrapper to manage a MongoDB connection.""" # engine information is cached by making it a class attribute engine = {} def __init__(self, user, host=None, port=None): """Get the client. Specifying host and/or port updates cached values. :param user: MongoDBUser instance used to authenticate :param host: server address, defaults to localhost :param port: server port, defaults to 27017 :return: """ new_client = False self._logged_in = False if not type(self).engine: # no engine cached type(self).engine['host'] = (host if host else 'localhost') type(self).engine['port'] = (port if port else MONGODB_PORT) new_client = True elif host or port: LOG.debug("Updating MongoDB client.") if host: type(self).engine['host'] = host if port: type(self).engine['port'] = port new_client = True if new_client: host = type(self).engine['host'] port = type(self).engine['port'] LOG.debug("Creating MongoDB client to %(host)s:%(port)s.", {'host': host, 'port': port}) type(self).engine['client'] = pymongo.MongoClient(host=host, port=port, connect=False) self.session = type(self).engine['client'] if user: db_name = user.database.name LOG.debug("Authenticating MongoDB client on %s.", db_name) self._db = self.session[db_name] self._db.authenticate(user.username, password=user.password) self._logged_in = True def __enter__(self): return self.session def __exit__(self, exc_type, exc_value, traceback): LOG.debug("Disconnecting from MongoDB.") if self._logged_in: self._db.logout() self.session.close() class MongoDBCredentials(object): """Handles storing/retrieving credentials. Stored as json in files.""" def __init__(self, username=None, password=None): self.username = username self.password = password def read(self, filename): credentials = operating_system.read_file(filename, codec=JsonCodec()) self.username = credentials['username'] self.password = credentials['password'] def write(self, filename): credentials = {'username': self.username, 'password': self.password} operating_system.write_file(filename, credentials, codec=JsonCodec()) operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/mongodb/system.py0000644000175000017500000000343300000000000027465 0ustar00coreycorey00000000000000# Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os import path from trove.guestagent.common import operating_system from trove.guestagent import pkg OS_NAME = operating_system.get_os() MONGODB_MOUNT_POINT = "/var/lib/mongodb" MONGO_PID_FILE = '/var/run/mongodb/mongodb.pid' MONGO_LOG_FILE = '/var/log/mongodb/mongod.log' CONFIG_CANDIDATES = ["/etc/mongodb.conf", "/etc/mongod.conf"] MONGO_ADMIN_NAME = 'os_admin' MONGO_ADMIN_ROLES = [{'db': 'admin', 'role': 'userAdminAnyDatabase'}, {'db': 'admin', 'role': 'dbAdminAnyDatabase'}, {'db': 'admin', 'role': 'clusterAdmin'}, {'db': 'admin', 'role': 'readWriteAnyDatabase'}] MONGO_ADMIN_CREDS_FILE = path.join(path.expanduser('~'), '.os_mongo_admin_creds.json') MONGO_KEY_FILE = '/etc/mongo_key' MONGOS_SERVICE_CANDIDATES = ["mongos"] MONGOD_SERVICE_CANDIDATES = ["mongodb", "mongod"] MONGODB_KILL = "sudo kill %s" FIND_PID = "ps xaco pid,cmd | awk '/mongo(d|db|s)/ {print $1}'" TIME_OUT = 1000 MONGO_USER = {operating_system.REDHAT: "mongod", operating_system.DEBIAN: "mongodb", operating_system.SUSE: "mongod"}[OS_NAME] PACKAGER = pkg.Package() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/percona/0000755000175000017500000000000000000000000025566 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/percona/__init__.py0000644000175000017500000000000000000000000027665 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/percona/manager.py0000644000175000017500000000253700000000000027561 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_utils import importutils from trove.guestagent.datastore.mysql_common import manager MYSQL_APP = ("trove.guestagent.datastore.experimental.percona.service." "MySqlApp") MYSQL_APP_STATUS = ("trove.guestagent.datastore.experimental.percona.service." "MySqlAppStatus") MYSQL_ADMIN = ("trove.guestagent.datastore.experimental.percona.service." "MySqlAdmin") class Manager(manager.MySqlManager): def __init__(self): mysql_app = importutils.import_class(MYSQL_APP) mysql_app_status = importutils.import_class(MYSQL_APP_STATUS) mysql_admin = importutils.import_class(MYSQL_ADMIN) super(Manager, self).__init__(mysql_app, mysql_app_status, mysql_admin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/percona/service.py0000644000175000017500000000543000000000000027602 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.guestagent.datastore.mysql_common import service LOG = logging.getLogger(__name__) class KeepAliveConnection(service.BaseKeepAliveConnection): pass class MySqlAppStatus(service.BaseMySqlAppStatus): pass class LocalSqlClient(service.BaseLocalSqlClient): pass class MySqlApp(service.BaseMySqlApp): def __init__(self, status): super(MySqlApp, self).__init__(status, LocalSqlClient, KeepAliveConnection) def _get_slave_status(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SHOW SLAVE STATUS').first() def _get_master_UUID(self): slave_status = self._get_slave_status() return slave_status and slave_status['Master_UUID'] or None def _get_gtid_executed(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SELECT @@global.gtid_executed').first()[0] def get_last_txn(self): master_UUID = self._get_master_UUID() last_txn_id = '0' gtid_executed = self._get_gtid_executed() for gtid_set in gtid_executed.split(','): uuid_set = gtid_set.split(':') if uuid_set[0] == master_UUID: last_txn_id = uuid_set[-1].split('-')[-1] break return master_UUID, int(last_txn_id) def get_latest_txn_id(self): LOG.info("Retrieving latest txn id.") return self._get_gtid_executed() def wait_for_txn(self, txn): LOG.info("Waiting on txn '%s'.", txn) with self.local_sql_client(self.get_engine()) as client: client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')" % txn) class MySqlRootAccess(service.BaseMySqlRootAccess): def __init__(self): super(MySqlRootAccess, self).__init__(LocalSqlClient, MySqlApp(MySqlAppStatus.get())) class MySqlAdmin(service.BaseMySqlAdmin): def __init__(self): super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(), MySqlApp) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/postgresql/0000755000175000017500000000000000000000000026342 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/postgresql/__init__.py0000644000175000017500000000000000000000000030441 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/postgresql/manager.py0000644000175000017500000003141100000000000030326 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common import cfg from trove.common.db.postgresql import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance as trove_instance from trove.common.notification import EndNotification from trove.common import utils from trove.guestagent import backup from trove.guestagent.datastore.experimental.postgresql.service import ( PgSqlAdmin) from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp from trove.guestagent.datastore import manager from trove.guestagent import guest_log from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(manager.Manager): def __init__(self, manager_name='postgresql'): super(Manager, self).__init__(manager_name) self._app = None self._admin = None @property def status(self): return self.app.status @property def app(self): if self._app is None: self._app = self.build_app() return self._app def build_app(self): return PgSqlApp() @property def admin(self): if self._admin is None: self._admin = self.app.build_admin() return self._admin @property def configuration_manager(self): return self.app.configuration_manager def get_datastore_log_defs(self): owner = self.app.pgsql_owner long_query_time = CONF.get(self.manager).get( 'guest_log_long_query_time') general_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_GENERAL_LABEL, owner, datastore_dir=self.app.pgsql_log_dir) general_log_dir, general_log_filename = os.path.split(general_log_file) return { self.GUEST_LOG_DEFS_GENERAL_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: general_log_file, self.GUEST_LOG_ENABLE_LABEL: { 'logging_collector': 'on', 'log_destination': self._quote_str('stderr'), 'log_directory': self._quote_str(general_log_dir), 'log_filename': self._quote_str(general_log_filename), 'log_statement': self._quote_str('all'), 'debug_print_plan': 'on', 'log_min_duration_statement': long_query_time, }, self.GUEST_LOG_DISABLE_LABEL: { 'logging_collector': 'off', }, self.GUEST_LOG_RESTART_LABEL: True, }, } def _quote_str(self, value): return "'%s'" % value def grant_access(self, context, username, hostname, databases): self.admin.grant_access(context, username, hostname, databases) def revoke_access(self, context, username, hostname, database): self.admin.revoke_access(context, username, hostname, database) def list_access(self, context, username, hostname): return self.admin.list_access(context, username, hostname) def update_overrides(self, context, overrides, remove=False): self.app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): self.app.apply_overrides(context, overrides) def reset_configuration(self, context, configuration): self.app.reset_configuration(context, configuration) def start_db_with_conf_changes(self, context, config_contents): self.app.start_db_with_conf_changes(context, config_contents) def create_database(self, context, databases): with EndNotification(context): self.admin.create_database(context, databases) def delete_database(self, context, database): with EndNotification(context): self.admin.delete_database(context, database) def list_databases( self, context, limit=None, marker=None, include_marker=False): return self.admin.list_databases( context, limit=limit, marker=marker, include_marker=include_marker) def install(self, context, packages): self.app.install(context, packages) def stop_db(self, context, do_not_start_on_reboot=False): self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def restart(self, context): self.app.restart() self.set_guest_log_status(guest_log.LogStatus.Restart_Completed) def pre_upgrade(self, context): LOG.debug('Preparing Postgresql for upgrade.') self.app.status.begin_restart() self.app.stop_db() mount_point = self.app.pgsql_base_data_dir upgrade_info = self.app.save_files_pre_upgrade(mount_point) upgrade_info['mount_point'] = mount_point return upgrade_info def post_upgrade(self, context, upgrade_info): LOG.debug('Finalizing Postgresql upgrade.') self.app.stop_db() if 'device' in upgrade_info: self.mount_volume(context, mount_point=upgrade_info['mount_point'], device_path=upgrade_info['device'], write_to_fstab=True) self.app.restore_files_post_upgrade(upgrade_info) self.app.start_db() def is_root_enabled(self, context): return self.app.is_root_enabled(context) def enable_root(self, context, root_password=None): return self.app.enable_root(context, root_password=root_password) def disable_root(self, context): self.app.disable_root(context) def enable_root_with_password(self, context, root_password=None): return self.app.enable_root_with_password( context, root_password=root_password) def create_user(self, context, users): with EndNotification(context): self.admin.create_user(context, users) def list_users( self, context, limit=None, marker=None, include_marker=False): return self.admin.list_users( context, limit=limit, marker=marker, include_marker=include_marker) def delete_user(self, context, user): with EndNotification(context): self.admin.delete_user(context, user) def get_user(self, context, username, hostname): return self.admin.get_user(context, username, hostname) def change_passwords(self, context, users): with EndNotification(context): self.admin.change_passwords(context, users) def update_attributes(self, context, username, hostname, user_attrs): with EndNotification(context): self.admin.update_attributes( context, username, hostname, user_attrs) def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): self.app.install(context, packages) LOG.debug("Waiting for database first boot.") if (self.app.status.wait_for_real_status_to_change_to( trove_instance.ServiceStatuses.RUNNING, CONF.state_change_wait_time, False)): LOG.debug("Stopping database prior to initial configuration.") self.app.stop_db() if device_path: device = volume.VolumeDevice(device_path) device.format() if os.path.exists(mount_point): device.migrate_data(mount_point) device.mount(mount_point) self.configuration_manager.save_configuration(config_contents) self.app.apply_initial_guestagent_configuration() os_admin = models.PostgreSQLUser(self.app.ADMIN_USER) if backup_info: backup.restore(context, backup_info, '/tmp') self.app.set_current_admin_user(os_admin) if snapshot: LOG.info("Found snapshot info: %s", str(snapshot)) self.attach_replica(context, snapshot, snapshot['config']) self.app.start_db() if not backup_info: self.app.secure(context) self._admin = PgSqlAdmin(os_admin) if not cluster_config and self.is_root_enabled(context): self.status.report_root(context) def create_backup(self, context, backup_info): with EndNotification(context): self.app.enable_backups() backup.backup(context, backup_info) def backup_required_for_replication(self, context): return self.replication.backup_required_for_replication() def attach_replica(self, context, replica_info, slave_config): self.replication.enable_as_slave(self.app, replica_info, None) def detach_replica(self, context, for_failover=False): replica_info = self.replication.detach_slave(self.app, for_failover) return replica_info def enable_as_master(self, context, replica_source_config): self.app.enable_backups() self.replication.enable_as_master(self.app, None) def make_read_only(self, context, read_only): """There seems to be no way to flag this at the database level in PostgreSQL at the moment -- see discussion here: http://www.postgresql.org/message-id/flat/CA+TgmobWQJ-GCa_tWUc4=80A 1RJ2_+Rq3w_MqaVguk_q018dqw@mail.gmail.com#CA+TgmobWQJ-GCa_tWUc4=80A1RJ 2_+Rq3w_MqaVguk_q018dqw@mail.gmail.com """ pass def get_replica_context(self, context): LOG.debug("Getting replica context.") return self.replication.get_replica_context(self.app) def get_latest_txn_id(self, context): if self.app.pg_is_in_recovery(): lsn = self.app.pg_last_xlog_replay_location() else: lsn = self.app.pg_current_xlog_location() LOG.info("Last xlog location found: %s", lsn) return lsn def get_last_txn(self, context): master_host = self.app.pg_primary_host() repl_offset = self.get_latest_txn_id(context) return master_host, repl_offset def wait_for_txn(self, context, txn): if not self.app.pg_is_in_recovery(): raise RuntimeError(_("Attempting to wait for a txn on a server " "not in recovery mode!")) def _wait_for_txn(): lsn = self.app.pg_last_xlog_replay_location() LOG.info("Last xlog location found: %s", lsn) return lsn >= txn try: utils.poll_until(_wait_for_txn, time_out=120) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for xlog " "offset to change to '%s'.") % txn) def cleanup_source_on_replica_detach(self, context, replica_info): LOG.debug("Calling cleanup_source_on_replica_detach") self.replication.cleanup_source_on_replica_detach(self.app, replica_info) def demote_replication_master(self, context): LOG.debug("Calling demote_replication_master") self.replication.demote_master(self.app) def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") self.app.enable_backups() self.replication.enable_as_master(self.app, None) snapshot_id, log_position = ( self.replication.snapshot_for_replication(context, self.app, None, snapshot_info)) mount_point = CONF.get(self.manager).mount_point volume_stats = self.get_filesystem_stats(context, mount_point) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(self.app, snapshot_info), 'log_position': log_position } return replication_snapshot ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/postgresql/pgsql_query.py0000644000175000017500000001253500000000000031275 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # Copyright (c) 2016 Tesora, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class DatabaseQuery(object): @classmethod def list(cls, ignore=()): """Query to list all databases.""" statement = ( "SELECT datname, pg_encoding_to_char(encoding), " "datcollate FROM pg_database " "WHERE datistemplate = false" ) for name in ignore: statement += " AND datname != '{name}'".format(name=name) return statement @classmethod def create(cls, name, encoding=None, collation=None): """Query to create a database.""" statement = "CREATE DATABASE \"{name}\"".format(name=name) if encoding is not None: statement += " ENCODING = '{encoding}'".format( encoding=encoding, ) if collation is not None: statement += " LC_COLLATE = '{collation}'".format( collation=collation, ) return statement @classmethod def drop(cls, name): """Query to drop a database.""" return "DROP DATABASE IF EXISTS \"{name}\"".format(name=name) class UserQuery(object): @classmethod def list(cls, ignore=()): """Query to list all users.""" statement = ( "SELECT usename, datname, pg_encoding_to_char(encoding), " "datcollate FROM pg_catalog.pg_user " "LEFT JOIN pg_catalog.pg_database " "ON CONCAT(usename, '=CTc/os_admin') = ANY(datacl::text[]) " "WHERE (datistemplate ISNULL OR datistemplate = false)") if ignore: for name in ignore: statement += " AND usename != '{name}'".format(name=name) return statement @classmethod def list_root(cls, ignore=()): """Query to list all superuser accounts.""" statement = ( "SELECT usename FROM pg_catalog.pg_user WHERE usesuper = true" ) for name in ignore: statement += " AND usename != '{name}'".format(name=name) return statement @classmethod def get(cls, name): """Query to get a single user.""" return cls.list() + " AND usename = '{name}'".format(name=name) @classmethod def create(cls, name, password, encrypt_password=None, *options): """Query to create a user with a password.""" create_clause = "CREATE USER \"{name}\"".format(name=name) with_clause = cls._build_with_clause( password, encrypt_password, *options) return ' '.join([create_clause, with_clause]) @classmethod def _build_with_clause(cls, password, encrypt_password=None, *options): tokens = ['WITH'] if password: # Do not specify the encryption option if 'encrypt_password' # is None. PostgreSQL will use the configuration default. if encrypt_password is True: tokens.append('ENCRYPTED') elif encrypt_password is False: tokens.append('UNENCRYPTED') tokens.append('PASSWORD') tokens.append("'{password}'".format(password=password)) if options: tokens.extend(options) if len(tokens) > 1: return ' '.join(tokens) return '' @classmethod def update_password(cls, name, password, encrypt_password=None): """Query to update the password for a user.""" return cls.alter_user(name, password, encrypt_password) @classmethod def alter_user(cls, name, password, encrypt_password=None, *options): """Query to alter a user.""" alter_clause = "ALTER USER \"{name}\"".format(name=name) with_clause = cls._build_with_clause( password, encrypt_password, *options) return ''.join([alter_clause, with_clause]) @classmethod def update_name(cls, old, new): """Query to update the name of a user. This statement also results in an automatic permission transfer to the new username. """ return "ALTER USER \"{old}\" RENAME TO \"{new}\"".format( old=old, new=new, ) @classmethod def drop(cls, name): """Query to drop a user.""" return "DROP USER \"{name}\"".format(name=name) class AccessQuery(object): @classmethod def grant(cls, user, database): """Query to grant user access to a database.""" return "GRANT ALL ON DATABASE \"{database}\" TO \"{user}\"".format( database=database, user=user, ) @classmethod def revoke(cls, user, database): """Query to revoke user access to a database.""" return "REVOKE ALL ON DATABASE \"{database}\" FROM \"{user}\"".format( database=database, user=user, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/postgresql/service.py0000644000175000017500000011445500000000000030366 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # Copyright (c) 2016 Tesora, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import OrderedDict import os import re from oslo_log import log as logging import psycopg2 from trove.common import cfg from trove.common.db.postgresql import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance from trove.common.stream_codecs import PropertiesCodec from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.postgresql import pgsql_query from trove.guestagent.datastore import service from trove.guestagent import pkg LOG = logging.getLogger(__name__) CONF = cfg.CONF BACKUP_CFG_OVERRIDE = 'PgBaseBackupConfig' DEBUG_MODE_OVERRIDE = 'DebugLevelOverride' class PgSqlApp(object): OS = operating_system.get_os() LISTEN_ADDRESSES = ['*'] # Listen on all available IP (v4/v6) interfaces. ADMIN_USER = 'os_admin' # Trove's administrative user. def __init__(self): super(PgSqlApp, self).__init__() self._current_admin_user = None self.status = PgSqlAppStatus(self.pgsql_extra_bin_dir) revision_dir = guestagent_utils.build_file_path( os.path.dirname(self.pgsql_config), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self.configuration_manager = ConfigurationManager( self.pgsql_config, self.pgsql_owner, self.pgsql_owner, PropertiesCodec( delimiter='=', string_mappings={'on': True, 'off': False, "''": None}), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) @property def service_candidates(self): return ['postgresql'] @property def pgsql_owner(self): return 'postgres' @property def default_superuser_name(self): return "postgres" @property def pgsql_base_data_dir(self): return '/var/lib/postgresql/' @property def pgsql_pid_file(self): return guestagent_utils.build_file_path(self.pgsql_run_dir, 'postgresql.pid') @property def pgsql_run_dir(self): return '/var/run/postgresql/' @property def pgsql_extra_bin_dir(self): """Redhat and Ubuntu packages for PgSql do not place 'extra' important binaries in /usr/bin, but rather in a directory like /usr/pgsql-9.4/bin in the case of PostgreSQL 9.4 for RHEL/CentOS """ return { operating_system.DEBIAN: '/usr/lib/postgresql/%s/bin/', operating_system.REDHAT: '/usr/pgsql-%s/bin/', operating_system.SUSE: '/usr/bin/' }[self.OS] % self.pg_version[1] @property def pgsql_config(self): return self._find_config_file('postgresql.conf') @property def pgsql_hba_config(self): return self._find_config_file('pg_hba.conf') @property def pgsql_ident_config(self): return self._find_config_file('pg_ident.conf') def _find_config_file(self, name_pattern): version_base = guestagent_utils.build_file_path(self.pgsql_config_dir, self.pg_version[1]) return sorted(operating_system.list_files_in_directory( version_base, recursive=True, pattern=name_pattern, as_root=True), key=len)[0] @property def pgsql_config_dir(self): return { operating_system.DEBIAN: '/etc/postgresql/', operating_system.REDHAT: '/var/lib/postgresql/', operating_system.SUSE: '/var/lib/pgsql/' }[self.OS] @property def pgsql_log_dir(self): return "/var/log/postgresql/" def build_admin(self): return PgSqlAdmin(self.get_current_admin_user()) def update_overrides(self, context, overrides, remove=False): if remove: self.configuration_manager.remove_user_override() elif overrides: self.configuration_manager.apply_user_override(overrides) def set_current_admin_user(self, user): self._current_admin_user = user def get_current_admin_user(self): if self._current_admin_user is not None: return self._current_admin_user if self.status.is_installed: return models.PostgreSQLUser(self.ADMIN_USER) return models.PostgreSQLUser(self.default_superuser_name) def apply_overrides(self, context, overrides): self.reload_configuration() def reload_configuration(self): """Send a signal to the server, causing configuration files to be reloaded by all server processes. Active queries or connections to the database will not be interrupted. NOTE: Do not use the 'SET' command as it only affects the current session. """ self.build_admin().psql( "SELECT pg_reload_conf()") def reset_configuration(self, context, configuration): """Reset the PgSql configuration to the one given. """ config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) def start_db_with_conf_changes(self, context, config_contents): """Starts the PgSql instance with a new configuration.""" if self.status.is_running: raise RuntimeError(_("The service is still running.")) self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration() self.start_db() def apply_initial_guestagent_configuration(self): """Update guestagent-controlled configuration properties. """ LOG.debug("Applying initial guestagent configuration.") file_locations = { 'data_directory': self._quote(self.pgsql_data_dir), 'hba_file': self._quote(self.pgsql_hba_config), 'ident_file': self._quote(self.pgsql_ident_config), 'external_pid_file': self._quote(self.pgsql_pid_file), 'unix_socket_directories': self._quote(self.pgsql_run_dir), 'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)), 'port': cfg.get_configuration_property('postgresql_port')} self.configuration_manager.apply_system_override(file_locations) self._apply_access_rules() @staticmethod def _quote(value): return "'%s'" % value def _apply_access_rules(self): LOG.debug("Applying database access rules.") # Connections to all resources are granted. # # Local access from administrative users is implicitly trusted. # # Remote access from the Trove's account is always rejected as # it is not needed and could be used by malicious users to hijack the # instance. # # Connections from other accounts always require a double-MD5-hashed # password. # # Make the rules readable only by the Postgres service. # # NOTE: The order of entries is important. # The first failure to authenticate stops the lookup. # That is why the 'local' connections validate first. # The OrderedDict is necessary to guarantee the iteration order. local_admins = ','.join([self.default_superuser_name, self.ADMIN_USER]) remote_admins = self.ADMIN_USER access_rules = OrderedDict( [('local', [['all', local_admins, None, 'trust'], ['replication', local_admins, None, 'trust'], ['all', 'all', None, 'md5']]), ('host', [['all', local_admins, '127.0.0.1/32', 'trust'], ['all', local_admins, '::1/128', 'trust'], ['all', local_admins, 'localhost', 'trust'], ['all', remote_admins, '0.0.0.0/0', 'reject'], ['all', remote_admins, '::/0', 'reject'], ['all', 'all', '0.0.0.0/0', 'md5'], ['all', 'all', '::/0', 'md5']]) ]) operating_system.write_file(self.pgsql_hba_config, access_rules, PropertiesCodec( string_mappings={'\t': None}), as_root=True) operating_system.chown(self.pgsql_hba_config, self.pgsql_owner, self.pgsql_owner, as_root=True) operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO, as_root=True) def disable_backups(self): """Reverse overrides applied by PgBaseBackup strategy""" if not self.configuration_manager.has_system_override( BACKUP_CFG_OVERRIDE): return LOG.info("Removing configuration changes for backups") self.configuration_manager.remove_system_override(BACKUP_CFG_OVERRIDE) self.remove_wal_archive_dir() self.restart() def enable_backups(self): """Apply necessary changes to config to enable WAL-based backups if we are using the PgBaseBackup strategy """ LOG.info("Checking if we need to apply changes to WAL config") if 'PgBaseBackup' not in self.backup_strategy: return if self.configuration_manager.has_system_override(BACKUP_CFG_OVERRIDE): return LOG.info("Applying changes to WAL config for use by base backups") wal_arch_loc = self.wal_archive_location if not os.path.isdir(wal_arch_loc): raise RuntimeError(_("Cannot enable backup as WAL dir '%s' does " "not exist.") % wal_arch_loc) arch_cmd = "'test ! -f {wal_arch}/%f && cp %p {wal_arch}/%f'".format( wal_arch=wal_arch_loc ) # Only support pg version > 9.6, wal_level set to replica, and # remove parameter "checkpoint_segments". opts = { 'wal_level': 'replica', 'archive_mode': 'on', 'max_wal_senders': 8, 'wal_log_hints': 'on', 'wal_keep_segments': 8, 'archive_command': arch_cmd } self.configuration_manager.apply_system_override( opts, BACKUP_CFG_OVERRIDE) self.restart() def disable_debugging(self, level=1): """Disable debug-level logging in postgres""" self.configuration_manager.remove_system_override(DEBUG_MODE_OVERRIDE) def enable_debugging(self, level=1): """Enable debug-level logging in postgres""" opt = {'log_min_messages': 'DEBUG%s' % level} self.configuration_manager.apply_system_override(opt, DEBUG_MODE_OVERRIDE) def install(self, context, packages): """Install one or more packages that postgresql needs to run. The packages parameter is a string representing the package names that should be given to the system's package manager. """ LOG.debug( "{guest_id}: Beginning PgSql package installation.".format( guest_id=CONF.guest_id ) ) self.recreate_wal_archive_dir() packager = pkg.Package() if not packager.pkg_is_installed(packages): try: LOG.info( "{guest_id}: Installing ({packages}).".format( guest_id=CONF.guest_id, packages=packages, ) ) packager.pkg_install(packages, {}, 1000) except (pkg.PkgAdminLockError, pkg.PkgPermissionError, pkg.PkgPackageStateError, pkg.PkgNotFoundError, pkg.PkgTimeout, pkg.PkgScriptletError, pkg.PkgDownloadError, pkg.PkgSignError, pkg.PkgBrokenError): LOG.exception( "{guest_id}: There was a package manager error while " "trying to install ({packages}).".format( guest_id=CONF.guest_id, packages=packages, ) ) raise except Exception: LOG.exception( "{guest_id}: The package manager encountered an unknown " "error while trying to install ({packages}).".format( guest_id=CONF.guest_id, packages=packages, ) ) raise else: self.start_db() LOG.debug( "{guest_id}: Completed package installation.".format( guest_id=CONF.guest_id, ) ) @property def pgsql_recovery_config(self): return os.path.join(self.pgsql_data_dir, "recovery.conf") @property def pgsql_data_dir(self): return os.path.dirname(self.pg_version[0]) @property def pg_version(self): """Find the database version file stored in the data directory. :returns: A tuple with the path to the version file (in the root of the data directory) and the version string. """ version_files = operating_system.list_files_in_directory( self.pgsql_base_data_dir, recursive=True, pattern='PG_VERSION', as_root=True) version_file = sorted(version_files, key=len)[0] version = operating_system.read_file(version_file, as_root=True) return version_file, version.strip() def restart(self): self.status.restart_db_service( self.service_candidates, CONF.state_change_wait_time) def start_db(self, enable_on_boot=True, update_db=False): self.status.start_db_service( self.service_candidates, CONF.state_change_wait_time, enable_on_boot=enable_on_boot, update_db=update_db) def stop_db(self, do_not_start_on_reboot=False, update_db=False): self.status.stop_db_service( self.service_candidates, CONF.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def secure(self, context): """Create an administrative user for Trove. Force password encryption. Also disable the built-in superuser """ password = utils.generate_random_password() os_admin_db = models.PostgreSQLSchema(self.ADMIN_USER) os_admin = models.PostgreSQLUser(self.ADMIN_USER, password) os_admin.databases.append(os_admin_db.serialize()) postgres = models.PostgreSQLUser(self.default_superuser_name) admin = PgSqlAdmin(postgres) admin._create_database(context, os_admin_db) admin._create_admin_user(context, os_admin, encrypt_password=True) PgSqlAdmin(os_admin).alter_user(context, postgres, None, 'NOSUPERUSER', 'NOLOGIN') self.set_current_admin_user(os_admin) def pg_current_xlog_location(self): """Wrapper for pg_current_xlog_location() Cannot be used against a running slave """ version = int(self.pg_version[1]) if version < 10: query = "SELECT pg_current_xlog_location()" else: query = "SELECT pg_current_wal_lsn()" r = self.build_admin().query(query) return r[0][0] def pg_last_xlog_replay_location(self): """Wrapper for pg_last_xlog_replay_location() For use on standby servers """ version = int(self.pg_version[1]) if version < 10: query = "SELECT pg_last_xlog_replay_location()" else: query = "SELECT pg_last_wal_replay_lsn()" r = self.build_admin().query(query) return r[0][0] def pg_is_in_recovery(self): """Wrapper for pg_is_in_recovery() for detecting a server in standby mode """ r = self.build_admin().query("SELECT pg_is_in_recovery()") return r[0][0] def pg_primary_host(self): """There seems to be no way to programmatically determine this on a hot standby, so grab what we have written to the recovery file """ r = operating_system.read_file(self.pgsql_recovery_config, as_root=True) regexp = re.compile(r"host=(\d+.\d+.\d+.\d+) ") m = regexp.search(r) return m.group(1) def recreate_wal_archive_dir(self): wal_archive_dir = self.wal_archive_location operating_system.remove(wal_archive_dir, force=True, recursive=True, as_root=True) operating_system.create_directory(wal_archive_dir, user=self.pgsql_owner, group=self.pgsql_owner, force=True, as_root=True) def remove_wal_archive_dir(self): wal_archive_dir = self.wal_archive_location operating_system.remove(wal_archive_dir, force=True, recursive=True, as_root=True) def is_root_enabled(self, context): """Return True if there is a superuser account enabled. """ results = self.build_admin().query( pgsql_query.UserQuery.list_root(), timeout=30, ) # There should be only one superuser (Trove's administrative account). return len(results) > 1 or (results[0][0] != self.ADMIN_USER) def enable_root(self, context, root_password=None): """Create a superuser user or reset the superuser password. The default PostgreSQL administration account is 'postgres'. This account always exists and cannot be removed. Its attributes and access can however be altered. Clients can connect from the localhost or remotely via TCP/IP: Local clients (e.g. psql) can connect from a preset *system* account called 'postgres'. This system account has no password and is *locked* by default, so that it can be used by *local* users only. It should *never* be enabled (or its password set)!!! That would just open up a new attack vector on the system account. Remote clients should use a build-in *database* account of the same name. It's password can be changed using the "ALTER USER" statement. Access to this account is disabled by Trove exposed only once the superuser access is requested. Trove itself creates its own administrative account. {"_name": "postgres", "_password": ""} """ user = self.build_root_user(root_password) self.build_admin().alter_user( context, user, None, *PgSqlAdmin.ADMIN_OPTIONS) return user.serialize() def build_root_user(self, password=None): return models.PostgreSQLUser.root(password=password) def pg_start_backup(self, backup_label): r = self.build_admin().query( "SELECT pg_start_backup('%s', true)" % backup_label) return r[0][0] def pg_xlogfile_name(self, start_segment): version = int(self.pg_version[1]) if version < 10: query = "SELECT pg_xlogfile_name('%s')" else: query = "SELECT pg_walfile_name('%s')" r = self.build_admin().query(query % start_segment) return r[0][0] def pg_stop_backup(self): r = self.build_admin().query("SELECT pg_stop_backup()") return r[0][0] def disable_root(self, context): """Generate a new random password for the public superuser account. Do not disable its access rights. Once enabled the account should stay that way. """ self.enable_root(context) def enable_root_with_password(self, context, root_password=None): return self.enable_root(context, root_password) @property def wal_archive_location(self): return cfg.get_configuration_property('wal_archive_location') @property def backup_strategy(self): return cfg.get_configuration_property('backup_strategy') def save_files_pre_upgrade(self, mount_point): LOG.debug('Saving files pre-upgrade.') mnt_etc_dir = os.path.join(mount_point, 'save_etc') if self.OS not in [operating_system.REDHAT]: # No need to store the config files away for Redhat because # they are already stored in the data volume. operating_system.remove(mnt_etc_dir, force=True, as_root=True) operating_system.copy(self.pgsql_config_dir, mnt_etc_dir, preserve=True, recursive=True, as_root=True) return {'save_etc': mnt_etc_dir} def restore_files_post_upgrade(self, upgrade_info): LOG.debug('Restoring files post-upgrade.') if self.OS not in [operating_system.REDHAT]: # No need to restore the config files for Redhat because # they are already in the data volume. operating_system.copy('%s/.' % upgrade_info['save_etc'], self.pgsql_config_dir, preserve=True, recursive=True, force=True, as_root=True) operating_system.remove(upgrade_info['save_etc'], force=True, as_root=True) self.configuration_manager.refresh_cache() self.status.set_ready() class PgSqlAppStatus(service.BaseDbStatus): HOST = 'localhost' def __init__(self, tools_dir): super(PgSqlAppStatus, self).__init__() self._cmd = guestagent_utils.build_file_path(tools_dir, 'pg_isready') def _get_actual_db_status(self): try: utils.execute_with_timeout( self._cmd, '-h', self.HOST, log_output_on_error=True) return instance.ServiceStatuses.RUNNING except exception.ProcessExecutionError: return instance.ServiceStatuses.SHUTDOWN except utils.Timeout: return instance.ServiceStatuses.BLOCKED except Exception: LOG.exception("Error getting Postgres status.") return instance.ServiceStatuses.CRASHED return instance.ServiceStatuses.SHUTDOWN class PgSqlAdmin(object): # Default set of options of an administrative account. ADMIN_OPTIONS = ( 'SUPERUSER', 'CREATEDB', 'CREATEROLE', 'INHERIT', 'REPLICATION', 'BYPASSRLS', 'LOGIN' ) def __init__(self, user): port = cfg.get_configuration_property('postgresql_port') self.__connection = PostgresLocalhostConnection(user.name, port=port) def grant_access(self, context, username, hostname, databases): """Give a user permission to use a given database. The username and hostname parameters are strings. The databases parameter is a list of strings representing the names of the databases to grant permission on. """ for database in databases: LOG.info( "{guest_id}: Granting user ({user}) access to database " "({database}).".format( guest_id=CONF.guest_id, user=username, database=database,) ) self.psql( pgsql_query.AccessQuery.grant( user=username, database=database, ), timeout=30, ) def revoke_access(self, context, username, hostname, database): """Revoke a user's permission to use a given database. The username and hostname parameters are strings. The database parameter is a string representing the name of the database. """ LOG.info( "{guest_id}: Revoking user ({user}) access to database" "({database}).".format( guest_id=CONF.guest_id, user=username, database=database,) ) self.psql( pgsql_query.AccessQuery.revoke( user=username, database=database, ), timeout=30, ) def list_access(self, context, username, hostname): """List database for which the given user as access. Return a list of serialized Postgres databases. """ user = self._find_user(context, username) if user is not None: return user.databases raise exception.UserNotFound(uuid=username) def create_database(self, context, databases): """Create the list of specified databases. The databases parameter is a list of serialized Postgres databases. """ for database in databases: self._create_database( context, models.PostgreSQLSchema.deserialize(database)) def _create_database(self, context, database): """Create a database. :param database: Database to be created. :type database: PostgreSQLSchema """ LOG.info( "{guest_id}: Creating database {name}.".format( guest_id=CONF.guest_id, name=database.name, ) ) self.psql( pgsql_query.DatabaseQuery.create( name=database.name, encoding=database.character_set, collation=database.collate, ), timeout=30, ) def delete_database(self, context, database): """Delete the specified database. """ self._drop_database( models.PostgreSQLSchema.deserialize(database)) def _drop_database(self, database): """Drop a given Postgres database. :param database: Database to be dropped. :type database: PostgreSQLSchema """ LOG.info( "{guest_id}: Dropping database {name}.".format( guest_id=CONF.guest_id, name=database.name, ) ) self.psql( pgsql_query.DatabaseQuery.drop(name=database.name), timeout=30, ) def list_databases(self, context, limit=None, marker=None, include_marker=False): """List all databases on the instance. Return a paginated list of serialized Postgres databases. """ return guestagent_utils.serialize_list( self._get_databases(), limit=limit, marker=marker, include_marker=include_marker) def _get_databases(self): """Return all non-system Postgres databases on the instance.""" results = self.query( pgsql_query.DatabaseQuery.list(ignore=self.ignore_dbs), timeout=30, ) return [models.PostgreSQLSchema( row[0].strip(), character_set=row[1], collate=row[2]) for row in results] def create_user(self, context, users): """Create users and grant privileges for the specified databases. The users parameter is a list of serialized Postgres users. """ for user in users: self._create_user( context, models.PostgreSQLUser.deserialize(user), None) def _create_user(self, context, user, encrypt_password=None, *options): """Create a user and grant privileges for the specified databases. :param user: User to be created. :type user: PostgreSQLUser :param encrypt_password: Store passwords encrypted if True. Fallback to configured default behavior if None. :type encrypt_password: boolean :param options: Other user options. :type options: list """ LOG.info( "{guest_id}: Creating user {user} {with_clause}." .format( guest_id=CONF.guest_id, user=user.name, with_clause=pgsql_query.UserQuery._build_with_clause( '', encrypt_password, *options ), ) ) self.psql( pgsql_query.UserQuery.create( user.name, user.password, encrypt_password, *options ), timeout=30, ) self._grant_access( context, user.name, [models.PostgreSQLSchema.deserialize(db) for db in user.databases]) def _create_admin_user(self, context, user, encrypt_password=None): self._create_user(context, user, encrypt_password, *self.ADMIN_OPTIONS) def _grant_access(self, context, username, databases): self.grant_access( context, username, None, [db.name for db in databases], ) def list_users( self, context, limit=None, marker=None, include_marker=False): """List all users on the instance along with their access permissions. Return a paginated list of serialized Postgres users. """ return guestagent_utils.serialize_list( self._get_users(context), limit=limit, marker=marker, include_marker=include_marker) def _get_users(self, context): """Return all non-system Postgres users on the instance.""" results = self.query( pgsql_query.UserQuery.list(ignore=self.ignore_users), timeout=30, ) names = set([row[0].strip() for row in results]) return [self._build_user(context, name, results) for name in names] def _build_user(self, context, username, acl=None): """Build a model representation of a Postgres user. Include all databases it has access to. """ user = models.PostgreSQLUser(username) if acl: dbs = [models.PostgreSQLSchema(row[1].strip(), character_set=row[2], collate=row[3]) for row in acl if row[0] == username and row[1] is not None] for d in dbs: user.databases.append(d.serialize()) return user def delete_user(self, context, user): """Delete the specified user. """ self._drop_user( context, models.PostgreSQLUser.deserialize(user)) def _drop_user(self, context, user): """Drop a given Postgres user. :param user: User to be dropped. :type user: PostgreSQLUser """ # Postgresql requires that you revoke grants before dropping the user databases = list(self.list_access(context, user.name, None)) for db in databases: db_schema = models.PostgreSQLSchema.deserialize(db) self.revoke_access(context, user.name, None, db_schema.name) LOG.info( "{guest_id}: Dropping user {name}.".format( guest_id=CONF.guest_id, name=user.name, ) ) self.psql( pgsql_query.UserQuery.drop(name=user.name), timeout=30, ) def get_user(self, context, username, hostname): """Return a serialized representation of a user with a given name. """ user = self._find_user(context, username) return user.serialize() if user is not None else None def _find_user(self, context, username): """Lookup a user with a given username. Return a new Postgres user instance or None if no match is found. """ results = self.query( pgsql_query.UserQuery.get(name=username), timeout=30, ) if results: return self._build_user(context, username, results) return None def user_exists(self, username): """Return whether a given user exists on the instance.""" results = self.query( pgsql_query.UserQuery.get(name=username), timeout=30, ) return bool(results) def change_passwords(self, context, users): """Change the passwords of one or more existing users. The users parameter is a list of serialized Postgres users. """ for user in users: self.alter_user( context, models.PostgreSQLUser.deserialize(user), None) def alter_user(self, context, user, encrypt_password=None, *options): """Change the password and options of an existing users. :param user: User to be altered. :type user: PostgreSQLUser :param encrypt_password: Store passwords encrypted if True. Fallback to configured default behavior if None. :type encrypt_password: boolean :param options: Other user options. :type options: list """ LOG.info( "{guest_id}: Altering user {user} {with_clause}." .format( guest_id=CONF.guest_id, user=user.name, with_clause=pgsql_query.UserQuery._build_with_clause( '', encrypt_password, *options ), ) ) self.psql( pgsql_query.UserQuery.alter_user( user.name, user.password, encrypt_password, *options), timeout=30, ) def update_attributes(self, context, username, hostname, user_attrs): """Change the attributes of one existing user. The username and hostname parameters are strings. The user_attrs parameter is a dictionary in the following form: {"password": "", "name": ""} Each key/value pair in user_attrs is optional. """ user = self._build_user(context, username) new_username = user_attrs.get('name') new_password = user_attrs.get('password') if new_username is not None: self._rename_user(context, user, new_username) # Make sure we can retrieve the renamed user. user = self._find_user(context, new_username) if user is None: raise exception.TroveError(_( "Renamed user %s could not be found on the instance.") % new_username) if new_password is not None: user.password = new_password self.alter_user(context, user) def _rename_user(self, context, user, new_username): """Rename a given Postgres user and transfer all access to the new name. :param user: User to be renamed. :type user: PostgreSQLUser """ LOG.info( "{guest_id}: Changing username for {old} to {new}.".format( guest_id=CONF.guest_id, old=user.name, new=new_username, ) ) # PostgreSQL handles the permission transfer itself. self.psql( pgsql_query.UserQuery.update_name( old=user.name, new=new_username, ), timeout=30, ) def psql(self, statement, timeout=30): """Execute a non-returning statement (usually DDL); Turn autocommit ON (this is necessary for statements that cannot run within an implicit transaction, like CREATE DATABASE). """ return self.__connection.execute(statement) def query(self, query, timeout=30): """Execute a query and return the result set. """ return self.__connection.query(query) @property def ignore_users(self): return cfg.get_ignored_users() @property def ignore_dbs(self): return cfg.get_ignored_dbs() class PostgresConnection(object): def __init__(self, **connection_args): self._connection_args = connection_args def execute(self, statement, identifiers=None, data_values=None): """Execute a non-returning statement. """ self._execute_stmt(statement, identifiers, data_values, False, autocommit=True) def query(self, query, identifiers=None, data_values=None): """Execute a query and return the result set. """ return self._execute_stmt(query, identifiers, data_values, True) def _execute_stmt(self, statement, identifiers, data_values, fetch, autocommit=False): if statement: with psycopg2.connect(**self._connection_args) as connection: connection.autocommit = autocommit with connection.cursor() as cursor: cursor.execute( self._bind(statement, identifiers), data_values) if fetch: return cursor.fetchall() else: raise exception.UnprocessableEntity(_("Invalid SQL statement: %s") % statement) def _bind(self, statement, identifiers): if identifiers: return statement.format(*identifiers) return statement class PostgresLocalhostConnection(PostgresConnection): HOST = 'localhost' def __init__(self, user, password=None, port=5432): super(PostgresLocalhostConnection, self).__init__( user=user, password=password, host=self.HOST, port=port) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/pxc/0000755000175000017500000000000000000000000024731 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/pxc/__init__.py0000644000175000017500000000000000000000000027030 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/pxc/manager.py0000644000175000017500000000211600000000000026715 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.guestagent.datastore.experimental.pxc import service as pxc_service from trove.guestagent.datastore.galera_common import manager from trove.guestagent.datastore.mysql_common import service as mysql_service class Manager(manager.GaleraManager): def __init__(self): super(Manager, self).__init__(pxc_service.PXCApp, mysql_service.BaseMySqlAppStatus, pxc_service.PXCAdmin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/pxc/service.py0000644000175000017500000000373300000000000026751 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.guestagent.datastore.galera_common import service as galera_service from trove.guestagent.datastore.mysql_common import service as mysql_service class PXCApp(galera_service.GaleraApp): def __init__(self, status): super(PXCApp, self).__init__( status, mysql_service.BaseLocalSqlClient, mysql_service.BaseKeepAliveConnection) @property def mysql_service(self): result = super(PXCApp, self).mysql_service if result['type'] == 'sysvinit': result['cmd_bootstrap_galera_cluster'] = ( "sudo service %s bootstrap-pxc" % result['service']) elif result['type'] == 'systemd': result['cmd_bootstrap_galera_cluster'] = ( "sudo systemctl start %s@bootstrap.service" % result['service']) return result @property def cluster_configuration(self): return self.configuration_manager.get_value('mysqld') class PXCRootAccess(mysql_service.BaseMySqlRootAccess): def __init__(self): super(PXCRootAccess, self).__init__( mysql_service.BaseLocalSqlClient, PXCApp(mysql_service.BaseMySqlAppStatus.get())) class PXCAdmin(mysql_service.BaseMySqlAdmin): def __init__(self): super(PXCAdmin, self).__init__( mysql_service.BaseLocalSqlClient, PXCRootAccess(), PXCApp) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/redis/0000755000175000017500000000000000000000000025245 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/redis/__init__.py0000644000175000017500000000000000000000000027344 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/redis/manager.py0000644000175000017500000003201500000000000027232 0ustar00coreycorey00000000000000# Copyright (c) 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.notification import EndNotification from trove.common import utils from trove.guestagent import backup from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.redis import service, system from trove.guestagent.datastore import manager from trove.guestagent import guest_log from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): """ This is the Redis manager class. It is dynamically loaded based off of the service_type of the trove instance """ GUEST_LOG_DEFS_REDIS_LABEL = 'redis' def __init__(self): super(Manager, self).__init__('redis') self._app = service.RedisApp() @property def status(self): return self._app.status @property def configuration_manager(self): return self._app.configuration_manager def get_datastore_log_defs(self): logfile = self._app.get_logfile() if not logfile: return {} return { self.GUEST_LOG_DEFS_REDIS_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS, self.GUEST_LOG_USER_LABEL: system.REDIS_OWNER, self.GUEST_LOG_FILE_LABEL: logfile } } def _perform_restore(self, backup_info, context, restore_location, app): """Perform a restore on this instance.""" LOG.info("Restoring database from backup %s.", backup_info['id']) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception("Error performing restore from backup %s.", backup_info['id']) app.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info("Restored database successfully.") def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) operating_system.chown(mount_point, 'redis', 'redis', as_root=True) LOG.debug('Mounted the volume.') self._app.install_if_needed(packages) LOG.info('Writing redis configuration.') if cluster_config: config_contents = (config_contents + "\n" + "cluster-enabled yes\n" + "cluster-config-file cluster.conf\n") self._app.configuration_manager.save_configuration(config_contents) self._app.apply_initial_guestagent_configuration() if backup_info: persistence_dir = self._app.get_working_dir() self._perform_restore(backup_info, context, persistence_dir, self._app) else: # If we're not restoring, we have to force a restart of the # server manually so that the configuration stuff takes effect self._app.restart() if snapshot: self.attach_replica(context, snapshot, snapshot['config']) def pre_upgrade(self, context): mount_point = self._app.get_working_dir() save_etc_dir = "%s/etc" % mount_point home_save = "%s/trove_user" % mount_point self._app.status.begin_restart() self._app.stop_db() operating_system.copy("%s/." % system.REDIS_CONF_DIR, save_etc_dir, preserve=True, as_root=True) operating_system.copy("%s/." % os.path.expanduser('~'), home_save, preserve=True, as_root=True) self.unmount_volume(context, mount_point=mount_point) return { 'mount_point': mount_point, 'save_etc_dir': save_etc_dir, 'home_save': home_save } def post_upgrade(self, context, upgrade_info): self._app.stop_db() if 'device' in upgrade_info: self.mount_volume(context, mount_point=upgrade_info['mount_point'], device_path=upgrade_info['device'], write_to_fstab=True) operating_system.chown(path=upgrade_info['mount_point'], user=system.REDIS_OWNER, group=system.REDIS_OWNER, recursive=True, as_root=True) self._restore_home_directory(upgrade_info['home_save']) self._restore_directory(upgrade_info['save_etc_dir'], system.REDIS_CONF_DIR) self._app = service.RedisApp() self._app.start_db() self._app.status.end_restart() def restart(self, context): """ Restart this redis instance. This method is called when the guest agent gets a restart message from the taskmanager. """ LOG.debug("Restart called.") self._app.restart() def start_db_with_conf_changes(self, context, config_contents): """ Start this redis instance with new conf changes. """ LOG.debug("Start DB with conf changes called.") self._app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): """ Stop this redis instance. This method is called when the guest agent gets a stop message from the taskmanager. """ LOG.debug("Stop DB called.") self._app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def create_backup(self, context, backup_info): """Create a backup of the database.""" LOG.debug("Creating backup.") with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self._app.remove_overrides() else: self._app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): LOG.debug("Applying overrides.") self._app.apply_overrides(self._app.admin, overrides) def backup_required_for_replication(self, context): return self.replication.backup_required_for_replication() def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") self.replication.enable_as_master(self._app, replica_source_config) snapshot_id, log_position = self.replication.snapshot_for_replication( context, self._app, None, snapshot_info) volume_stats = self.get_filesystem_stats(context, None) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(self._app, snapshot_info), 'log_position': log_position } return replication_snapshot def enable_as_master(self, context, replica_source_config): LOG.debug("Calling enable_as_master.") self.replication.enable_as_master(self._app, replica_source_config) def detach_replica(self, context, for_failover=False): LOG.debug("Detaching replica.") replica_info = self.replication.detach_slave(self._app, for_failover) return replica_info def get_replica_context(self, context): LOG.debug("Getting replica context.") replica_info = self.replication.get_replica_context(self._app) return replica_info def _validate_slave_for_replication(self, context, replica_info): if replica_info['replication_strategy'] != self.replication_strategy: raise exception.IncompatibleReplicationStrategy( replica_info.update({ 'guest_strategy': self.replication_strategy })) def attach_replica(self, context, replica_info, slave_config): LOG.debug("Attaching replica.") try: if 'replication_strategy' in replica_info: self._validate_slave_for_replication(context, replica_info) self.replication.enable_as_slave(self._app, replica_info, slave_config) except Exception: LOG.exception("Error enabling replication.") raise def make_read_only(self, context, read_only): LOG.debug("Executing make_read_only(%s)", read_only) self._app.make_read_only(read_only) def _get_repl_info(self): return self._app.admin.get_info('replication') def _get_master_host(self): slave_info = self._get_repl_info() return slave_info and slave_info['master_host'] or None def _get_repl_offset(self): repl_info = self._get_repl_info() LOG.debug("Got repl info: %s", repl_info) offset_key = '%s_repl_offset' % repl_info['role'] offset = repl_info[offset_key] LOG.debug("Found offset %(offset)s for key %(key)s.", {'offset': offset, 'key': offset_key}) return int(offset) def get_last_txn(self, context): master_host = self._get_master_host() repl_offset = self._get_repl_offset() return master_host, repl_offset def get_latest_txn_id(self, context): LOG.info("Retrieving latest repl offset.") return self._get_repl_offset() def wait_for_txn(self, context, txn): LOG.info("Waiting on repl offset '%s'.", txn) def _wait_for_txn(): current_offset = self._get_repl_offset() LOG.debug("Current offset: %s.", current_offset) return current_offset >= txn try: utils.poll_until(_wait_for_txn, time_out=120) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for Redis repl " "offset to change to '%s'.") % txn) def cleanup_source_on_replica_detach(self, context, replica_info): LOG.debug("Cleaning up the source on the detach of a replica.") self.replication.cleanup_source_on_replica_detach(self._app, replica_info) def demote_replication_master(self, context): LOG.debug("Demoting replica source.") self.replication.demote_master(self._app) def cluster_meet(self, context, ip, port): LOG.debug("Executing cluster_meet to join node to cluster.") self._app.cluster_meet(ip, port) def get_node_ip(self, context): LOG.debug("Retrieving cluster node ip address.") return self._app.get_node_ip() def get_node_id_for_removal(self, context): LOG.debug("Validating removal of node from cluster.") return self._app.get_node_id_for_removal() def remove_nodes(self, context, node_ids): LOG.debug("Removing nodes from cluster.") self._app.remove_nodes(node_ids) def cluster_addslots(self, context, first_slot, last_slot): LOG.debug("Executing cluster_addslots to assign hash slots %s-%s.", first_slot, last_slot) self._app.cluster_addslots(first_slot, last_slot) def enable_root(self, context): LOG.debug("Enabling authentication.") return self._app.enable_root() def enable_root_with_password(self, context, root_password=None): LOG.debug("Enabling authentication with password.") return self._app.enable_root(root_password) def disable_root(self, context): LOG.debug("Disabling authentication.") return self._app.disable_root() def get_root_password(self, context): LOG.debug("Getting auth password.") return self._app.get_auth_password() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/redis/service.py0000644000175000017500000005342700000000000027272 0ustar00coreycorey00000000000000# Copyright (c) 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import redis from redis.exceptions import BusyLoadingError, ConnectionError from oslo_log import log as logging from trove.common import cfg from trove.common.db.redis.models import RedisRootUser from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.stream_codecs import PropertiesCodec, StringConverter from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.redis import system from trove.guestagent.datastore import service from trove.guestagent import pkg LOG = logging.getLogger(__name__) TIME_OUT = 1200 CONF = cfg.CONF CLUSTER_CFG = 'clustering' SYS_OVERRIDES_AUTH = 'auth_password' packager = pkg.Package() class RedisAppStatus(service.BaseDbStatus): """ Handles all of the status updating for the redis guest agent. """ def __init__(self, client): super(RedisAppStatus, self).__init__() self.__client = client def set_client(self, client): self.__client = client def _get_actual_db_status(self): try: if self.__client.ping(): return rd_instance.ServiceStatuses.RUNNING except ConnectionError: return rd_instance.ServiceStatuses.SHUTDOWN except BusyLoadingError: return rd_instance.ServiceStatuses.BLOCKED except Exception: LOG.exception("Error getting Redis status.") return rd_instance.ServiceStatuses.CRASHED def cleanup_stalled_db_services(self): utils.execute_with_timeout('pkill', '-9', 'redis-server', run_as_root=True, root_helper='sudo') class RedisApp(object): """ Handles installation and configuration of redis on a trove instance. """ def __init__(self, state_change_wait_time=None): """ Sets default status and state_change_wait_time """ if state_change_wait_time: self.state_change_wait_time = state_change_wait_time else: self.state_change_wait_time = CONF.state_change_wait_time revision_dir = guestagent_utils.build_file_path( os.path.dirname(system.REDIS_CONFIG), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) config_value_mappings = {'yes': True, 'no': False, "''": None} self._value_converter = StringConverter(config_value_mappings) self.configuration_manager = ConfigurationManager( system.REDIS_CONFIG, system.REDIS_OWNER, system.REDIS_OWNER, PropertiesCodec( unpack_singletons=False, string_mappings=config_value_mappings ), requires_root=True, override_strategy=OneFileOverrideStrategy(revision_dir)) self.admin = self._build_admin_client() self.status = RedisAppStatus(self.admin) def _build_admin_client(self): password = self.get_configuration_property('requirepass') socket = self.get_configuration_property('unixsocket') cmd = self.get_config_command_name() return RedisAdmin(password=password, unix_socket_path=socket, config_cmd=cmd) def _refresh_admin_client(self): self.admin = self._build_admin_client() self.status.set_client(self.admin) return self.admin def install_if_needed(self, packages): """ Install redis if needed do nothing if it is already installed. """ LOG.info('Preparing Guest as Redis Server.') if not packager.pkg_is_installed(packages): LOG.info('Installing Redis.') self._install_redis(packages) LOG.info('Redis installed completely.') def _install_redis(self, packages): """ Install the redis server. """ LOG.debug('Installing redis server.') LOG.debug("Creating %s.", system.REDIS_CONF_DIR) operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, TIME_OUT) self.start_db() LOG.debug('Finished installing redis server.') def stop_db(self, update_db=False, do_not_start_on_reboot=False): self.status.stop_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, disable_on_boot=do_not_start_on_reboot, update_db=update_db) def restart(self): self.status.restart_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time) def update_overrides(self, context, overrides, remove=False): if overrides: self.configuration_manager.apply_user_override(overrides) # apply requirepass at runtime # TODO(zhaochao): updating 'requirepass' here will be removed # in the future releases, Redis only use enable_root/disable_root # to set this parameter. if 'requirepass' in overrides: self.admin.config_set('requirepass', overrides['requirepass']) self._refresh_admin_client() def apply_overrides(self, client, overrides): """Use the 'CONFIG SET' command to apply configuration at runtime. Commands that appear multiple times have values separated by a white space. For instance, the following two 'save' directives from the configuration file... save 900 1 save 300 10 ... would be applied in a single command as: CONFIG SET save "900 1 300 10" Note that the 'CONFIG' command has been renamed to prevent users from using it to bypass configuration groups. """ for prop_name, prop_args in overrides.items(): args_string = self._join_lists( self._value_converter.to_strings(prop_args), ' ') client.config_set(prop_name, args_string) # NOTE(zhaochao): requirepass applied in update_overrides is # only kept for back compatibility. Now requirepass is set # via enable_root/disable_root, Redis admin client should be # refreshed here. if prop_name == "requirepass": client = self._refresh_admin_client() def _join_lists(self, items, sep): """Join list items (including items from sub-lists) into a string. Non-list inputs are returned unchanged. _join_lists('1234', ' ') = "1234" _join_lists(['1','2','3','4'], ' ') = "1 2 3 4" _join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4" """ if isinstance(items, list): return sep.join([sep.join(e) if isinstance(e, list) else e for e in items]) return items def remove_overrides(self): self.configuration_manager.remove_user_override() def make_read_only(self, read_only): # Redis has no mechanism to make an instance read-only at present pass def start_db_with_conf_changes(self, config_contents): LOG.info('Starting redis with conf changes.') if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info("Initiating config.") self.configuration_manager.save_configuration(config_contents) # The configuration template has to be updated with # guestagent-controlled settings. self.apply_initial_guestagent_configuration() self.start_db(True) def start_db(self, update_db=False): self.status.start_db_service( system.SERVICE_CANDIDATES, self.state_change_wait_time, enable_on_boot=True, update_db=update_db) def apply_initial_guestagent_configuration(self): """Update guestagent-controlled configuration properties. """ # Hide the 'CONFIG' command from end users by mangling its name. self.admin.set_config_command_name(self._mangle_config_command_name()) self.configuration_manager.apply_system_override( {'daemonize': 'yes', 'protected-mode': 'no', 'supervised': 'systemd', 'pidfile': system.REDIS_PID_FILE, 'logfile': system.REDIS_LOG_FILE, 'dir': system.REDIS_DATA_DIR}) def get_config_command_name(self): """Get current name of the 'CONFIG' command. """ renamed_cmds = self.configuration_manager.get_value('rename-command') if renamed_cmds: for name_pair in renamed_cmds: if name_pair[0] == 'CONFIG': return name_pair[1] return None def _mangle_config_command_name(self): """Hide the 'CONFIG' command from the clients by renaming it to a random string known only to the guestagent. Return the mangled name. """ mangled = utils.generate_random_password() self._rename_command('CONFIG', mangled) return mangled def _rename_command(self, old_name, new_name): """It is possible to completely disable a command by renaming it to an empty string. """ self.configuration_manager.apply_system_override( {'rename-command': [old_name, new_name]}) def get_logfile(self): """Specify the log file name. Also the empty string can be used to force Redis to log on the standard output. Note that if you use standard output for logging but daemonize, logs will be sent to /dev/null """ return self.get_configuration_property('logfile') def get_db_filename(self): """The filename where to dump the DB. """ return self.get_configuration_property('dbfilename') def get_working_dir(self): """The DB will be written inside this directory, with the filename specified the 'dbfilename' configuration directive. The Append Only File will also be created inside this directory. """ return self.get_configuration_property('dir') def get_persistence_filepath(self): """Returns the full path to the persistence file.""" return guestagent_utils.build_file_path( self.get_working_dir(), self.get_db_filename()) def get_port(self): """Port for this instance or default if not set.""" return self.get_configuration_property('port', system.REDIS_PORT) def get_auth_password(self): """Client authentication password for this instance or None if not set. """ return self.get_configuration_property('requirepass') def is_appendonly_enabled(self): """True if the Append Only File (AOF) persistence mode is enabled. """ return self.get_configuration_property('appendonly', False) def get_append_file_name(self): """The name of the append only file (AOF). """ return self.get_configuration_property('appendfilename') def is_cluster_enabled(self): """Only nodes that are started as cluster nodes can be part of a Redis Cluster. """ return self.get_configuration_property('cluster-enabled', False) def enable_cluster(self): """In order to start a Redis instance as a cluster node enable the cluster support """ self.configuration_manager.apply_system_override( {'cluster-enabled': 'yes'}, CLUSTER_CFG) def get_cluster_config_filename(self): """Cluster node configuration file. """ return self.get_configuration_property('cluster-config-file') def set_cluster_config_filename(self, name): """Make sure that instances running in the same system do not have overlapping cluster configuration file names. """ self.configuration_manager.apply_system_override( {'cluster-config-file': name}, CLUSTER_CFG) def get_cluster_node_timeout(self): """Cluster node timeout is the amount of milliseconds a node must be unreachable for it to be considered in failure state. """ return self.get_configuration_property('cluster-node-timeout') def get_configuration_property(self, name, default=None): """Return the value of a Redis configuration property. Returns a single value for single-argument properties or a list otherwise. """ return utils.unpack_singleton( self.configuration_manager.get_value(name, default)) def cluster_meet(self, ip, port): try: utils.execute_with_timeout('redis-cli', 'cluster', 'meet', ip, port) except exception.ProcessExecutionError: LOG.exception('Error joining node to cluster at %s.', ip) raise def cluster_addslots(self, first_slot, last_slot): try: group_size = 200 # Create list of slots represented in strings # eg. ['10', '11', '12', '13'] slots = list(map(str, range(first_slot, last_slot + 1))) while slots: cmd = (['redis-cli', 'cluster', 'addslots'] + slots[0:group_size]) out, err = utils.execute_with_timeout(*cmd, run_as_root=True, root_helper='sudo') if 'OK' not in out: raise RuntimeError(_('Error executing addslots: %s') % out) del slots[0:group_size] except exception.ProcessExecutionError: LOG.exception('Error adding slots %(first_slot)s-%(last_slot)s' ' to cluster.', {'first_slot': first_slot, 'last_slot': last_slot}) raise def _get_node_info(self): try: out, _ = utils.execute_with_timeout('redis-cli', '--csv', 'cluster', 'nodes') return [line.split(' ') for line in out.splitlines()] except exception.ProcessExecutionError: LOG.exception('Error getting node info.') raise def _get_node_details(self): for node_details in self._get_node_info(): if 'myself' in node_details[2]: return node_details raise exception.TroveError(_("Unable to determine node details")) def get_node_ip(self): """Returns [ip, port] where both values are strings""" return self._get_node_details()[1].split(':') def get_node_id_for_removal(self): node_details = self._get_node_details() node_id = node_details[0] my_ip = node_details[1].split(':')[0] try: slots, _ = utils.execute_with_timeout('redis-cli', '--csv', 'cluster', 'slots') return node_id if my_ip not in slots else None except exception.ProcessExecutionError: LOG.exception('Error validating node to for removal.') raise def remove_nodes(self, node_ids): try: for node_id in node_ids: utils.execute_with_timeout('redis-cli', 'cluster', 'forget', node_id) except exception.ProcessExecutionError: LOG.exception('Error removing node from cluster.') raise def enable_root(self, password=None): if not password: password = utils.generate_random_password() redis_password = RedisRootUser(password=password) try: self.configuration_manager.apply_system_override( {'requirepass': password, 'masterauth': password}, change_id=SYS_OVERRIDES_AUTH) self.apply_overrides( self.admin, {'requirepass': password, 'masterauth': password}) except exception.TroveError: LOG.exception('Error enabling authentication for instance.') raise return redis_password.serialize() def disable_root(self): try: self.configuration_manager.remove_system_override( change_id=SYS_OVERRIDES_AUTH) self.apply_overrides(self.admin, {'requirepass': '', 'masterauth': ''}) except exception.TroveError: LOG.exception('Error disabling authentication for instance.') raise class RedisAdmin(object): """Handles administrative tasks on the Redis database. """ DEFAULT_CONFIG_CMD = 'CONFIG' def __init__(self, password=None, unix_socket_path=None, config_cmd=None): self.__client = redis.StrictRedis( password=password, unix_socket_path=unix_socket_path) self.__config_cmd_name = config_cmd or self.DEFAULT_CONFIG_CMD def set_config_command_name(self, name): """Set name of the 'CONFIG' command or None for default. """ self.__config_cmd_name = name or self.DEFAULT_CONFIG_CMD def ping(self): """Ping the Redis server and return True if a response is received. """ return self.__client.ping() def get_info(self, section=None): return self.__client.info(section=section) def persist_data(self): save_cmd = 'SAVE' last_save = self.__client.lastsave() LOG.debug("Starting Redis data persist") save_ok = True try: save_ok = self.__client.bgsave() except redis.exceptions.ResponseError as re: # If an auto-save is in progress just use it, since it must have # just happened if "Background save already in progress" in str(re): LOG.info("Waiting for existing background save to finish") else: raise if save_ok: save_cmd = 'BGSAVE' def _timestamp_changed(): return last_save != self.__client.lastsave() try: utils.poll_until(_timestamp_changed, sleep_time=2, time_out=TIME_OUT) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for Redis " "persist (%s) to complete.") % save_cmd) # If the background save fails for any reason, try doing a foreground # one. This blocks client connections, so we don't want it to be # the default. elif not self.__client.save(): raise exception.BackupCreationError(_("Could not persist " "Redis data (%s)") % save_cmd) LOG.debug("Redis data persist (%s) completed", save_cmd) def set_master(self, host=None, port=None): self.__client.slaveof(host, port) def config_set(self, name, value): response = self.execute( '%s %s' % (self.__config_cmd_name, 'SET'), name, value) if not self._is_ok_response(response): raise exception.UnprocessableEntity( _("Could not set configuration property '%(name)s' to " "'%(value)s'.") % {'name': name, 'value': value}) def _is_ok_response(self, response): """Return True if a given Redis response is 'OK'. """ return response and redis.client.bool_ok(response) def execute(self, cmd_name, *cmd_args, **options): """Execute a command and return a parsed response. """ try: return self.__client.execute_command(cmd_name, *cmd_args, **options) except Exception as e: LOG.exception(e) raise exception.TroveError( _("Redis command '%(cmd_name)s %(cmd_args)s' failed.") % {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)}) def wait_until(self, key, wait_value, section=None, timeout=None): """Polls redis until the specified 'key' changes to 'wait_value'.""" timeout = timeout or CONF.usage_timeout LOG.debug("Waiting for Redis '%(key)s' to be: %(value)s.", {'key': key, 'value': wait_value}) def _check_info(): redis_info = self.get_info(section) if key in redis_info: current_value = redis_info[key] LOG.debug("Found '%(value)s' for field %(key)s.", {'value': current_value, 'key': key}) else: LOG.error('Output from Redis command: %s', redis_info) raise RuntimeError(_("Field %(field)s not found " "(Section: '%(sec)s').") % ({'field': key, 'sec': section})) return current_value == wait_value try: utils.poll_until(_check_info, time_out=timeout) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for Redis field " "'%(field)s' to change to '%(val)s'.") % {'field': key, 'val': wait_value}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/redis/system.py0000644000175000017500000000227400000000000027150 0ustar00coreycorey00000000000000# Copyright (c) 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Determines operating system version and OS dependent commands. """ from trove.guestagent.common.operating_system import get_os REDIS_OWNER = 'redis' REDIS_CONFIG = '/etc/redis/redis.conf' REDIS_PID_FILE = '/var/run/redis/redis-server.pid' REDIS_LOG_FILE = '/var/log/redis/server.log' REDIS_CONF_DIR = '/etc/redis' REDIS_DATA_DIR = '/var/lib/redis' REDIS_PORT = '6379' REDIS_INIT = '/etc/init/redis-server.conf' REDIS_PACKAGE = '' SERVICE_CANDIDATES = ['redis-server', 'redis'] OS = get_os() if OS == 'redhat': REDIS_CONFIG = '/etc/redis.conf' REDIS_PACKAGE = 'redis' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/vertica/0000755000175000017500000000000000000000000025574 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/vertica/__init__.py0000644000175000017500000000000000000000000027673 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/vertica/manager.py0000644000175000017500000001366400000000000027572 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_log import log as logging from trove.common.i18n import _ from trove.common import instance as rd_ins from trove.guestagent.datastore.experimental.vertica.service import ( VerticaAppStatus) from trove.guestagent.datastore.experimental.vertica.service import VerticaApp from trove.guestagent.datastore import manager from trove.guestagent import volume LOG = logging.getLogger(__name__) class Manager(manager.Manager): def __init__(self): self.appStatus = VerticaAppStatus() self.app = VerticaApp(self.appStatus) super(Manager, self).__init__('vertica') @property def status(self): return self.appStatus @property def configuration_manager(self): return self.app.configuration_manager def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync any existing data device.migrate_data(mount_point) # mount the volume device.mount(mount_point) LOG.debug("Mounted the volume.") self.app.install_if_needed(packages) self.app.prepare_for_install_vertica() if cluster_config is None: self.app.install_vertica() self.app.create_db() self.app.add_udls() if config_contents: self.app.configuration_manager.save_configuration( config_contents) elif cluster_config['instance_type'] not in ["member", "master"]: raise RuntimeError(_("Bad cluster configuration: instance type " "given as %s.") % cluster_config['instance_type']) def restart(self, context): LOG.debug("Restarting the database.") self.app.restart() LOG.debug("Restarted the database.") def stop_db(self, context, do_not_start_on_reboot=False): LOG.debug("Stopping the database.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) LOG.debug("Stopped the database.") def enable_root(self, context): LOG.debug("Enabling root.") return self.app.enable_root() def enable_root_with_password(self, context, root_password=None): LOG.debug("Enabling root.") return self.app.enable_root(root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return self.app.is_root_enabled() def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def get_public_keys(self, context, user): LOG.debug("Retrieving public keys for %s.", user) return self.app.get_public_keys(user) def authorize_public_keys(self, context, user, public_keys): LOG.debug("Authorizing public keys for %s.", user) return self.app.authorize_public_keys(user, public_keys) def install_cluster(self, context, members): try: LOG.debug("Installing cluster on members: %s.", members) self.app.install_cluster(members) self.app.add_udls() LOG.debug("install_cluster call has finished.") except Exception: LOG.exception('Cluster installation failed.') self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") if remove: self.app.remove_overrides() else: self.app.update_overrides(context, overrides, remove) def apply_overrides(self, context, overrides): if overrides: LOG.debug("Applying overrides: " + str(overrides)) self.app.apply_overrides(overrides) def grow_cluster(self, context, members): try: LOG.debug("Growing cluster to members: %s.", members) self.app.grow_cluster(members) LOG.debug("grow_cluster call has finished.") except Exception: LOG.exception('Cluster grow failed.') self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise def shrink_cluster(self, context, members): try: LOG.debug("Shrinking cluster members: %s.", members) self.app.shrink_cluster(members) LOG.debug("shrink_cluster call has finished.") except Exception: LOG.exception('Cluster shrink failed.') self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise def mark_design_ksafe(self, context, k): try: LOG.debug("Setting vertica k-safety to %s.", k) self.app.mark_design_ksafe(k) except Exception: LOG.exception('K-safety setting failed.') self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/vertica/service.py0000644000175000017500000006635500000000000027625 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import tempfile from oslo_log import log as logging from oslo_utils import netutils from six.moves import configparser from trove.common import cfg from trove.common.db import models from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.stream_codecs import PropertiesCodec from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.vertica import system from trove.guestagent.datastore import service from trove.guestagent import pkg from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF packager = pkg.Package() DB_NAME = 'db_srvr' MOUNT_POINT = CONF.vertica.mount_point # We will use a fake configuration file for the options managed through # configuration groups that we apply directly with ALTER DB ... SET ... FAKE_CFG = os.path.join(MOUNT_POINT, "vertica.cfg.fake") class VerticaAppStatus(service.BaseDbStatus): def _get_actual_db_status(self): """Get the status of dbaas and report it back.""" try: out, err = system.shell_execute(system.STATUS_ACTIVE_DB, system.VERTICA_ADMIN) if out.strip() == DB_NAME: # UP status is confirmed LOG.info("Service Status is RUNNING.") return rd_instance.ServiceStatuses.RUNNING else: LOG.info("Service Status is SHUTDOWN.") return rd_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError: LOG.exception("Failed to get database status.") return rd_instance.ServiceStatuses.CRASHED class VerticaApp(object): """Prepares DBaaS on a Guest container.""" def __init__(self, status): self.state_change_wait_time = CONF.state_change_wait_time self.status = status revision_dir = \ guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.VERTICA_ADMIN)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, as_root=True) operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(), as_root=True) self.configuration_manager = \ ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf")) def update_overrides(self, context, overrides, remove=False): if overrides: self.apply_overrides(overrides) def remove_overrides(self): config = self.configuration_manager.get_user_override() self._reset_config(config) self.configuration_manager.remove_user_override() def apply_overrides(self, overrides): self.configuration_manager.apply_user_override(overrides) self._apply_config(overrides) def _reset_config(self, config): try: db_password = self._get_database_password() for k, v in config.items(): alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k)) out, err = system.exec_vsql_command(db_password, alter_db_cmd) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to remove config %s") % k) except Exception: LOG.exception("Vertica configuration remove failed.") raise RuntimeError(_("Vertica configuration remove failed.")) LOG.info("Vertica configuration reset completed.") def _apply_config(self, config): try: db_password = self._get_database_password() for k, v in config.items(): alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v)) out, err = system.exec_vsql_command(db_password, alter_db_cmd) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to apply config %s") % k) except Exception: LOG.exception("Vertica configuration apply failed") raise RuntimeError(_("Vertica configuration apply failed")) LOG.info("Vertica config apply completed.") def _enable_db_on_boot(self): try: command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c", (system.SET_RESTART_POLICY % (DB_NAME, "always"))] subprocess.Popen(command) command = ["sudo", "su", "-", "root", "-c", (system.VERTICA_AGENT_SERVICE_COMMAND % "enable")] subprocess.Popen(command) except Exception: LOG.exception("Failed to enable database on boot.") raise RuntimeError(_("Could not enable database on boot.")) def _disable_db_on_boot(self): try: command = (system.SET_RESTART_POLICY % (DB_NAME, "never")) system.shell_execute(command, system.VERTICA_ADMIN) command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable") system.shell_execute(command) except exception.ProcessExecutionError: LOG.exception("Failed to disable database on boot.") raise RuntimeError(_("Could not disable database on boot.")) def stop_db(self, update_db=False, do_not_start_on_reboot=False): """Stop the database.""" LOG.info("Stopping Vertica.") if do_not_start_on_reboot: self._disable_db_on_boot() try: # Stop vertica-agent service command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop") system.shell_execute(command) # Using Vertica adminTools to stop db. db_password = self._get_database_password() stop_db_command = (system.STOP_DB % (DB_NAME, db_password)) out, err = system.shell_execute(system.STATUS_ACTIVE_DB, system.VERTICA_ADMIN) if out.strip() == DB_NAME: system.shell_execute(stop_db_command, system.VERTICA_ADMIN) if not self.status._is_restarting: if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db): LOG.error("Could not stop Vertica.") self.status.end_restart() raise RuntimeError(_("Could not stop Vertica!")) LOG.debug("Database stopped.") else: LOG.debug("Database is not running.") except exception.ProcessExecutionError: LOG.exception("Failed to stop database.") raise RuntimeError(_("Could not stop database.")) def start_db(self, update_db=False): """Start the database.""" LOG.info("Starting Vertica.") try: self._enable_db_on_boot() # Start vertica-agent service command = ["sudo", "su", "-", "root", "-c", (system.VERTICA_AGENT_SERVICE_COMMAND % "start")] subprocess.Popen(command) # Using Vertica adminTools to start db. db_password = self._get_database_password() start_db_command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c", (system.START_DB % (DB_NAME, db_password))] subprocess.Popen(start_db_command) if not self.status._is_restarting: self.status.end_restart() LOG.debug("Database started.") except Exception as e: raise RuntimeError(_("Could not start Vertica due to %s") % e) def start_db_with_conf_changes(self, config_contents): """ Currently all that this method does is to start Vertica. This method needs to be implemented to enable volume resize on guestagent side. """ LOG.info("Starting Vertica with configuration changes.") if self.status.is_running: format = 'Cannot start_db_with_conf_changes because status is %s.' LOG.debug(format, self.status) raise RuntimeError(format % self.status) LOG.info("Initiating config.") self.configuration_manager.save_configuration(config_contents) self.start_db(True) def restart(self): """Restart the database.""" try: self.status.begin_restart() self.stop_db() self.start_db() finally: self.status.end_restart() def add_db_to_node(self, members=netutils.get_my_ipv4()): """Add db to host with admintools""" LOG.info("Calling admintools to add DB to host") try: # Create db after install db_password = self._get_database_password() create_db_command = (system.ADD_DB_TO_NODE % (members, DB_NAME, db_password)) system.shell_execute(create_db_command, "dbadmin") except exception.ProcessExecutionError: # Give vertica some time to get the node up, won't be available # by the time adminTools -t db_add_node completes LOG.info("adminTools failed as expected - wait for node") self.wait_for_node_status() LOG.info("Vertica add db to host completed.") def remove_db_from_node(self, members=netutils.get_my_ipv4()): """Remove db from node with admintools""" LOG.info("Removing db from node") try: # Create db after install db_password = self._get_database_password() create_db_command = (system.REMOVE_DB_FROM_NODE % (members, DB_NAME, db_password)) system.shell_execute(create_db_command, "dbadmin") except exception.ProcessExecutionError: # Give vertica some time to get the node up, won't be available # by the time adminTools -t db_add_node completes LOG.info("adminTools failed as expected - wait for node") # Give vertica some time to take the node down - it won't be available # by the time adminTools -t db_add_node completes self.wait_for_node_status() LOG.info("Vertica remove host from db completed.") def create_db(self, members=netutils.get_my_ipv4()): """Prepare the guest machine with a Vertica db creation.""" LOG.info("Creating database on Vertica host.") try: # Create db after install db_password = self._get_database_password() create_db_command = (system.CREATE_DB % (members, DB_NAME, MOUNT_POINT, MOUNT_POINT, db_password)) system.shell_execute(create_db_command, system.VERTICA_ADMIN) except Exception: LOG.exception("Vertica database create failed.") raise RuntimeError(_("Vertica database create failed.")) LOG.info("Vertica database create completed.") def install_vertica(self, members=netutils.get_my_ipv4()): """Prepare the guest machine with a Vertica db creation.""" LOG.info("Installing Vertica Server.") try: # Create db after install install_vertica_cmd = (system.INSTALL_VERTICA % (members, MOUNT_POINT)) system.shell_execute(install_vertica_cmd) except exception.ProcessExecutionError: LOG.exception("install_vertica failed.") raise RuntimeError(_("install_vertica failed.")) self._generate_database_password() LOG.info("install_vertica completed.") def update_vertica(self, command, members=netutils.get_my_ipv4()): LOG.info("Calling update_vertica with command %s", command) try: update_vertica_cmd = (system.UPDATE_VERTICA % (command, members, MOUNT_POINT)) system.shell_execute(update_vertica_cmd) except exception.ProcessExecutionError: LOG.exception("update_vertica failed.") raise RuntimeError(_("update_vertica failed.")) # self._generate_database_password() LOG.info("update_vertica completed.") def add_udls(self): """Load the user defined load libraries into the database.""" LOG.info("Adding configured user defined load libraries.") password = self._get_database_password() loaded_udls = [] for lib in system.UDL_LIBS: func_name = lib['func_name'] lib_name = lib['lib_name'] language = lib['language'] factory = lib['factory'] path = lib['path'] if os.path.isfile(path): LOG.debug("Adding the %(func)s library as %(lib)s.", {'func': func_name, 'lib': lib_name}) out, err = system.exec_vsql_command( password, system.CREATE_LIBRARY % (lib_name, path) ) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to create library %s.") % lib_name) out, err = system.exec_vsql_command( password, system.CREATE_SOURCE % (func_name, language, factory, lib_name) ) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to create source %s.") % func_name) loaded_udls.append(func_name) else: LOG.warning("Skipping %(func)s as path %(path)s not " "found.", {"func": func_name, "path": path}) LOG.info("The following UDL functions are available for use: %s", loaded_udls) def _generate_database_password(self): """Generate and write the password to vertica.cnf file.""" config = configparser.ConfigParser() config.add_section('credentials') config.set('credentials', 'dbadmin_password', utils.generate_random_password()) self.write_config(config) def write_config(self, config, unlink_function=os.unlink, temp_function=tempfile.NamedTemporaryFile): """Write the configuration contents to vertica.cnf file.""" LOG.debug('Defining config holder at %s.', system.VERTICA_CONF) tempfile = temp_function('w', delete=False) try: config.write(tempfile) tempfile.close() command = (("install -o root -g root -m 644 %(source)s %(target)s" ) % {'source': tempfile.name, 'target': system.VERTICA_CONF}) system.shell_execute(command) unlink_function(tempfile.name) except Exception: unlink_function(tempfile.name) raise def read_config(self): """Reads and returns the Vertica config.""" try: config = configparser.ConfigParser() config.read(system.VERTICA_CONF) return config except Exception: LOG.exception("Failed to read config %s.", system.VERTICA_CONF) raise RuntimeError def _get_database_password(self): """Read the password from vertica.cnf file and return it.""" return self.read_config().get('credentials', 'dbadmin_password') def install_if_needed(self, packages): """Install Vertica package if needed.""" LOG.info("Preparing Guest as Vertica Server.") if not packager.pkg_is_installed(packages): LOG.debug("Installing Vertica Package.") packager.pkg_install(packages, None, system.INSTALL_TIMEOUT) def _set_readahead_for_disks(self): """This method sets readhead size for disks as needed by Vertica.""" device = volume.VolumeDevice(CONF.device_path) device.set_readahead_size(CONF.vertica.readahead_size) LOG.debug("Set readhead size as required by Vertica.") def prepare_for_install_vertica(self): """This method executes preparatory methods before executing install_vertica. """ command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin " "VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python" " -m vertica.local_coerce" % (system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP)) try: self._set_readahead_for_disks() system.shell_execute(command) except exception.ProcessExecutionError: LOG.exception("Failed to prepare for install_vertica.") raise def mark_design_ksafe(self, k): """Wrapper for mark_design_ksafe function for setting k-safety """ LOG.info("Setting Vertica k-safety to %s", str(k)) out, err = system.exec_vsql_command(self._get_database_password(), system.MARK_DESIGN_KSAFE % k) # Only fail if we get an ERROR as opposed to a warning complaining # about setting k = 0 if "ERROR" in err: LOG.error(err) raise RuntimeError(_("Failed to set k-safety level %s.") % k) def _create_user(self, username, password, role=None): """Creates a user, granting and enabling the given role for it.""" LOG.info("Creating user in Vertica database.") out, err = system.exec_vsql_command(self._get_database_password(), system.CREATE_USER % (username, password)) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to create user %s.") % username) if role: self._grant_role(username, role) def _grant_role(self, username, role): """Grants a role to the user on the schema.""" out, err = system.exec_vsql_command(self._get_database_password(), system.GRANT_TO_USER % (role, username)) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to grant role %(r)s to user " "%(u)s.") % {'r': role, 'u': username}) out, err = system.exec_vsql_command(self._get_database_password(), system.ENABLE_FOR_USER % (username, role)) if err: LOG.warning(err) def enable_root(self, root_password=None): """Resets the root password.""" LOG.info("Enabling root.") user = models.DatastoreUser.root(password=root_password) if not self.is_root_enabled(): self._create_user(user.name, user.password, 'pseudosuperuser') else: LOG.debug("Updating %s password.", user.name) try: out, err = system.exec_vsql_command( self._get_database_password(), system.ALTER_USER_PASSWORD % (user.name, user.password)) if err: if err.is_warning(): LOG.warning(err) else: LOG.error(err) raise RuntimeError(_("Failed to update %s " "password.") % user.name) except exception.ProcessExecutionError: LOG.error("Failed to update %s password.", user.name) raise RuntimeError(_("Failed to update %s password.") % user.name) return user.serialize() def is_root_enabled(self): """Return True if root access is enabled else False.""" LOG.debug("Checking is root enabled.") try: out, err = system.shell_execute(system.USER_EXISTS % (self._get_database_password(), 'root'), system.VERTICA_ADMIN) if err: LOG.error(err) raise RuntimeError(_("Failed to query for root user.")) except exception.ProcessExecutionError: raise RuntimeError(_("Failed to query for root user.")) return out.rstrip() == "1" def get_public_keys(self, user): """Generates key (if not found), and sends public key for user.""" LOG.debug("Public keys requested for user: %s.", user) user_home_directory = os.path.expanduser('~' + user) public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub' try: key_generate_command = (system.SSH_KEY_GEN % user_home_directory) system.shell_execute(key_generate_command, user) except exception.ProcessExecutionError: LOG.debug("Cannot generate key.") try: read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name}) out, err = system.shell_execute(read_key_cmd) except exception.ProcessExecutionError: LOG.exception("Cannot read public key.") raise return out.strip() def authorize_public_keys(self, user, public_keys): """Adds public key to authorized_keys for user.""" LOG.debug("public keys to be added for user: %s.", user) user_home_directory = os.path.expanduser('~' + user) authorized_file_name = user_home_directory + '/.ssh/authorized_keys' try: read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name}) out, err = system.shell_execute(read_key_cmd) public_keys.append(out.strip()) except exception.ProcessExecutionError: LOG.debug("Cannot read authorized_keys.") all_keys = '\n'.join(public_keys) + "\n" try: with tempfile.NamedTemporaryFile("w", delete=False) as tempkeyfile: tempkeyfile.write(all_keys) copy_key_cmd = (("install -o %(user)s -m 600 %(source)s %(target)s" ) % {'user': user, 'source': tempkeyfile.name, 'target': authorized_file_name}) system.shell_execute(copy_key_cmd) os.remove(tempkeyfile.name) except exception.ProcessExecutionError: LOG.exception("Cannot install public keys.") os.remove(tempkeyfile.name) raise def _export_conf_to_members(self, members): """This method exports conf files to other members.""" try: for member in members: COPY_CMD = (system.SEND_CONF_TO_SERVER % (system.VERTICA_CONF, member, system.VERTICA_CONF)) system.shell_execute(COPY_CMD) except exception.ProcessExecutionError: LOG.exception("Cannot export configuration.") raise def install_cluster(self, members): """Installs & configures cluster.""" cluster_members = ','.join(members) LOG.debug("Installing cluster with members: %s.", cluster_members) self.install_vertica(cluster_members) self._export_conf_to_members(members) LOG.debug("Creating database with members: %s.", cluster_members) self.create_db(cluster_members) LOG.debug("Cluster configured on members: %s.", cluster_members) def grow_cluster(self, members): """Adds nodes to cluster.""" cluster_members = ','.join(members) LOG.debug("Growing cluster with members: %s.", cluster_members) self.update_vertica("--add-hosts", cluster_members) self._export_conf_to_members(members) LOG.debug("Creating database with members: %s.", cluster_members) self.add_db_to_node(cluster_members) LOG.debug("Cluster configured on members: %s.", cluster_members) def shrink_cluster(self, members): """Removes nodes from cluster.""" cluster_members = ','.join(members) LOG.debug("Shrinking cluster with members: %s.", cluster_members) self.remove_db_from_node(cluster_members) self.update_vertica("--remove-hosts", cluster_members) def wait_for_node_status(self, status='UP'): """Wait until all nodes are the same status""" # select node_state from nodes where node_state <> 'UP' def _wait_for_node_status(): out, err = system.exec_vsql_command(self._get_database_password(), system.NODE_STATUS % status) LOG.debug("Polled vertica node states: %s", out) if err: LOG.error(err) raise RuntimeError(_("Failed to query for root user.")) return "0 rows" in out try: utils.poll_until(_wait_for_node_status, time_out=600, sleep_time=15) except exception.PollTimeOut: raise RuntimeError(_("Timed out waiting for cluster to " "change to status %s") % status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/experimental/vertica/system.py0000644000175000017500000001245300000000000027477 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from trove.common.i18n import _ from trove.common import utils ALTER_DB_CFG = "ALTER DATABASE %s SET %s = %s" ALTER_DB_RESET_CFG = "ALTER DATABASE %s CLEAR %s" ALTER_USER_PASSWORD = "ALTER USER %s IDENTIFIED BY '%s'" ADD_DB_TO_NODE = ("/opt/vertica/bin/adminTools -t db_add_node -a" " %s -d %s -p '%s'") REMOVE_DB_FROM_NODE = ("/opt/vertica/bin/adminTools -t db_remove_node -s" " %s -d %s -i -p '%s'") CREATE_DB = ("echo yes | /opt/vertica/bin/adminTools -t create_db -s" " %s -d %s -c %s -D %s -p '%s'") CREATE_USER = "CREATE USER %s IDENTIFIED BY '%s'" ENABLE_FOR_USER = "ALTER USER %s DEFAULT ROLE %s" GRANT_TO_USER = "GRANT %s to %s" INSTALL_VERTICA = ("/opt/vertica/sbin/install_vertica -s %s" " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type" " --ignore-install-config") MARK_DESIGN_KSAFE = "SELECT MARK_DESIGN_KSAFE(%s)" NODE_STATUS = "SELECT node_state FROM nodes where node_state <> '%s'" STOP_DB = "/opt/vertica/bin/adminTools -t stop_db -F -d %s -p '%s'" START_DB = "/opt/vertica/bin/adminTools -t start_db -d %s -p '%s'" STATUS_ACTIVE_DB = "/opt/vertica/bin/adminTools -t show_active_db" STATUS_DB_DOWN = "/opt/vertica/bin/adminTools -t db_status -s DOWN" SET_RESTART_POLICY = ("/opt/vertica/bin/adminTools -t set_restart_policy " "-d %s -p '%s'") SEND_CONF_TO_SERVER = ("rsync -v -e 'ssh -o " "UserKnownHostsFile=/dev/null -o " "StrictHostKeyChecking=no' --perms --owner --group " "%s %s:%s") SSH_KEY_GEN = "ssh-keygen -f %s/.ssh/id_rsa -t rsa -N ''" UPDATE_VERTICA = ("/opt/vertica/sbin/update_vertica %s %s " " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type" " --ignore-install-config") UPDATE_REMOVE = ("/opt/vertica/sbin/update_vertica --remove-hosts %s " " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type" " --ignore-install-config") UPDATE_ADD = ("/opt/vertica/sbin/update_vertica --add-hosts %s " " -d %s -X -N -S default -r" " /vertica.deb -L CE -Y --no-system-checks" " --ignore-aws-instance-type" " --ignore-install-config") USER_EXISTS = ("/opt/vertica/bin/vsql -w '%s' -c " "\"select 1 from users where user_name = '%s'\" " "| grep row | awk '{print $1}' | cut -c2-") VERTICA_ADMIN = "dbadmin" VERTICA_ADMIN_GRP = "verticadba" VERTICA_AGENT_SERVICE_COMMAND = "service vertica_agent %s" VERTICA_CONF = "/etc/vertica.cnf" INSTALL_TIMEOUT = 1000 CREATE_LIBRARY = "CREATE LIBRARY %s AS '%s'" CREATE_SOURCE = "CREATE SOURCE %s AS LANGUAGE '%s' NAME '%s' LIBRARY %s" UDL_LIBS = [ { 'func_name': "curl", 'lib_name': "curllib", 'language': "C++", 'factory': "CurlSourceFactory", 'path': "/opt/vertica/sdk/examples/build/cURLLib.so" }, ] def shell_execute(command, command_executor="root"): # This method encapsulates utils.execute for 2 purpose: # 1. Helps in safe testing. # 2. Helps in executing commands as other user, using their environment. # Note: This method uses su because using sudo -i -u # does not works with vertica installer # and it has problems while executing remote commands. return utils.execute("sudo", "su", "-", command_executor, "-c", "%s" % command) class VSqlError(object): def __init__(self, stderr): """Parse the stderr part of the VSql output. stderr looks like: "ERROR 3117: Division by zero" :param stderr: string from executing statement via vsql """ parse = re.match(r"^(ERROR|WARNING) (\d+): (.+)$", stderr) if not parse: raise ValueError(_("VSql stderr %(msg)s not recognized.") % {'msg': stderr}) self.type = parse.group(1) self.code = int(parse.group(2)) self.msg = parse.group(3) def is_warning(self): return bool(self.type == "WARNING") def __str__(self): return "Vertica %s (%s): %s" % (self.type, self.code, self.msg) def exec_vsql_command(dbadmin_password, command): """Executes a VSQL command with the given dbadmin password.""" out, err = shell_execute("/opt/vertica/bin/vsql -w \'%s\' -c \"%s\"" % (dbadmin_password, command), VERTICA_ADMIN) if err: err = VSqlError(err) return out, err ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/galera_common/0000755000175000017500000000000000000000000024245 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/galera_common/__init__.py0000644000175000017500000000000000000000000026344 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/galera_common/manager.py0000644000175000017500000000622400000000000026235 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import instance as rd_instance from trove.guestagent.datastore.mysql_common import manager LOG = logging.getLogger(__name__) class GaleraManager(manager.MySqlManager): def __init__(self, mysql_app, mysql_app_status, mysql_admin, manager_name='galera'): super(GaleraManager, self).__init__( mysql_app, mysql_app_status, mysql_admin, manager_name) self._mysql_app = mysql_app self._mysql_app_status = mysql_app_status self._mysql_admin = mysql_admin self.volume_do_not_start_on_reboot = False def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): self.volume_do_not_start_on_reboot = True super(GaleraManager, self).do_prepare( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) def install_cluster(self, context, replication_user, cluster_configuration, bootstrap): app = self.mysql_app(self.mysql_app_status.get()) try: app.install_cluster( replication_user, cluster_configuration, bootstrap) LOG.debug("install_cluster call has finished.") except Exception: LOG.exception('Cluster installation failed.') app.status.set_status( rd_instance.ServiceStatuses.FAILED) raise def reset_admin_password(self, context, admin_password): LOG.debug("Storing the admin password on the instance.") app = self.mysql_app(self.mysql_app_status.get()) app.reset_admin_password(admin_password) def get_cluster_context(self, context): LOG.debug("Getting the cluster context.") app = self.mysql_app(self.mysql_app_status.get()) return app.get_cluster_context() def write_cluster_configuration_overrides(self, context, cluster_configuration): LOG.debug("Apply the updated cluster configuration.") app = self.mysql_app(self.mysql_app_status.get()) app.write_cluster_configuration_overrides(cluster_configuration) def enable_root_with_password(self, context, root_password=None): return self.mysql_admin().enable_root(root_password) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/galera_common/service.py0000644000175000017500000000701000000000000026255 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from oslo_log import log as logging from sqlalchemy.sql.expression import text from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import sql_query from trove.guestagent.datastore.mysql_common import service LOG = logging.getLogger(__name__) CONF = service.CONF class GaleraApp(service.BaseMySqlApp): def __init__(self, status, local_sql_client, keep_alive_connection_cls): super(GaleraApp, self).__init__(status, local_sql_client, keep_alive_connection_cls) def _grant_cluster_replication_privilege(self, replication_user): LOG.info("Granting Replication Slave privilege.") with self.local_sql_client(self.get_engine()) as client: perms = ['REPLICATION CLIENT', 'RELOAD', 'LOCK TABLES'] g = sql_query.Grant(permissions=perms, user=replication_user['name'], clear=replication_user['password']) t = text(str(g)) client.execute(t) def _bootstrap_cluster(self, timeout=120): LOG.info("Bootstraping cluster.") try: utils.execute_with_timeout( self.mysql_service['cmd_bootstrap_galera_cluster'], shell=True, timeout=timeout) except KeyError: LOG.exception("Error bootstrapping cluster.") raise RuntimeError(_("Service is not discovered.")) def write_cluster_configuration_overrides(self, cluster_configuration): self.configuration_manager.apply_system_override( cluster_configuration, 'cluster') def install_cluster(self, replication_user, cluster_configuration, bootstrap=False): LOG.info("Installing cluster configuration.") self._grant_cluster_replication_privilege(replication_user) self.stop_db() self.write_cluster_configuration_overrides(cluster_configuration) self.wipe_ib_logfiles() LOG.debug("bootstrap the instance? : %s", bootstrap) # Have to wait to sync up the joiner instances with the donor instance. if bootstrap: self._bootstrap_cluster(timeout=CONF.restore_usage_timeout) else: self.start_mysql(timeout=CONF.restore_usage_timeout) @abc.abstractproperty def cluster_configuration(self): """ Returns the cluster section from the configuration manager. """ def get_cluster_context(self): auth = self.cluster_configuration.get( "wsrep_sst_auth").replace('"', '') cluster_name = self.cluster_configuration.get("wsrep_cluster_name") return { 'replication_user': { 'name': auth.split(":")[0], 'password': auth.split(":")[1], }, 'cluster_name': cluster_name, 'admin_password': self.get_auth_password() } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/manager.py0000644000175000017500000011623100000000000023432 0ustar00coreycorey00000000000000# Copyright 2015 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import operator import os from oslo_config import cfg as oslo_cfg from oslo_log import log as logging from oslo_service import periodic_task from oslo_utils import encodeutils from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance from trove.common.notification import EndNotification from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent import dbaas from trove.guestagent import guest_log from trove.guestagent.module import driver_manager from trove.guestagent.module import module_manager from trove.guestagent.strategies import replication as repl_strategy from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(periodic_task.PeriodicTasks): """This is the base class for all datastore managers. Over time, common functionality should be pulled back here from the existing managers. """ GUEST_LOG_TYPE_LABEL = 'type' GUEST_LOG_USER_LABEL = 'user' GUEST_LOG_FILE_LABEL = 'file' GUEST_LOG_SECTION_LABEL = 'section' GUEST_LOG_ENABLE_LABEL = 'enable' GUEST_LOG_DISABLE_LABEL = 'disable' GUEST_LOG_RESTART_LABEL = 'restart' GUEST_LOG_BASE_DIR = '/var/log/trove' GUEST_LOG_DATASTORE_DIRNAME = 'datastore' GUEST_LOG_DEFS_GUEST_LABEL = 'guest' GUEST_LOG_DEFS_GENERAL_LABEL = 'general' GUEST_LOG_DEFS_ERROR_LABEL = 'error' GUEST_LOG_DEFS_SLOW_QUERY_LABEL = 'slow_query' MODULE_APPLY_TO_ALL = module_manager.ModuleManager.MODULE_APPLY_TO_ALL def __init__(self, manager_name): super(Manager, self).__init__(CONF) # Manager properties self.__manager_name = manager_name self.__manager = None self.__prepare_error = False # Guest log self._guest_log_context = None self._guest_log_loaded_context = None self._guest_log_cache = None self._guest_log_defs = None # Module self.module_driver_manager = driver_manager.ModuleDriverManager() @property def manager_name(self): """This returns the passed-in name of the manager.""" return self.__manager_name @property def manager(self): """This returns the name of the manager.""" if not self.__manager: self.__manager = CONF.datastore_manager or self.__manager_name return self.__manager @property def prepare_error(self): return self.__prepare_error @prepare_error.setter def prepare_error(self, prepare_error): self.__prepare_error = prepare_error @property def replication(self): """If the datastore supports replication, return an instance of the strategy. """ try: return repl_strategy.get_instance(self.manager) except Exception as ex: LOG.warning("Cannot get replication instance for '%(manager)s': " "%(msg)s", {'manager': self.manager, 'msg': str(ex)}) return None @property def replication_strategy(self): """If the datastore supports replication, return the strategy.""" try: return repl_strategy.get_strategy(self.manager) except Exception as ex: LOG.debug("Cannot get replication strategy for '%(manager)s': " "%(msg)s", {'manager': self.manager, 'msg': str(ex)}) return None @abc.abstractproperty def status(self): """This should return an instance of a status class that has been inherited from datastore.service.BaseDbStatus. Each datastore must implement this property. """ return None @property def configuration_manager(self): """If the datastore supports the new-style configuration manager, it should override this to return it. """ return None def get_datastore_log_defs(self): """Any datastore-specific log files should be overridden in this dict by the corresponding Manager class. Format of a dict entry: 'name_of_log': {self.GUEST_LOG_TYPE_LABEL: Specified by the Enum in guest_log.LogType, self.GUEST_LOG_USER_LABEL: User that owns the file, self.GUEST_LOG_FILE_LABEL: Path on filesystem where the log resides, self.GUEST_LOG_SECTION_LABEL: Section where to put config (if ini style) self.GUEST_LOG_ENABLE_LABEL: { Dict of config_group settings to enable log}, self.GUEST_LOG_DISABLE_LABEL: { Dict of config_group settings to disable log}, See guestagent_log_defs for an example. """ return {} @property def guestagent_log_defs(self): """These are log files that should be available on every Trove instance. By definition, these should be of type LogType.SYS """ log_dir = CONF.get('log_dir', '/var/log/trove/') log_file = CONF.get('log_file', 'trove-guestagent.log') guestagent_log = guestagent_utils.build_file_path(log_dir, log_file) return { self.GUEST_LOG_DEFS_GUEST_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS, self.GUEST_LOG_USER_LABEL: None, self.GUEST_LOG_FILE_LABEL: guestagent_log, }, } def get_guest_log_defs(self): """Return all the guest log defs.""" if not self._guest_log_defs: self._guest_log_defs = dict(self.get_datastore_log_defs()) self._guest_log_defs.update(self.guestagent_log_defs) return self._guest_log_defs @property def guest_log_context(self): return self._guest_log_context @guest_log_context.setter def guest_log_context(self, context): self._guest_log_context = context def get_guest_log_cache(self): """Make sure the guest_log_cache is loaded and return it.""" self._refresh_guest_log_cache() return self._guest_log_cache def _refresh_guest_log_cache(self): if self._guest_log_cache: # Replace the context if it's changed if self._guest_log_loaded_context != self.guest_log_context: for log_name in self._guest_log_cache.keys(): self._guest_log_cache[log_name].context = ( self.guest_log_context) else: # Load the initial cache self._guest_log_cache = {} if self.guest_log_context: gl_defs = self.get_guest_log_defs() try: exposed_logs = CONF.get(self.manager).get( 'guest_log_exposed_logs') except oslo_cfg.NoSuchOptError: exposed_logs = '' LOG.debug("Available log defs: %s", ",".join(gl_defs.keys())) exposed_logs = exposed_logs.lower().replace(',', ' ').split() LOG.debug("Exposing log defs: %s", ",".join(exposed_logs)) expose_all = 'all' in exposed_logs for log_name in gl_defs.keys(): gl_def = gl_defs[log_name] exposed = expose_all or log_name in exposed_logs LOG.debug("Building guest log '%(name)s' from def: %(def)s" " (exposed: %(exposed)s)", {'name': log_name, 'def': gl_def, 'exposed': exposed}) self._guest_log_cache[log_name] = guest_log.GuestLog( self.guest_log_context, log_name, gl_def[self.GUEST_LOG_TYPE_LABEL], gl_def[self.GUEST_LOG_USER_LABEL], gl_def[self.GUEST_LOG_FILE_LABEL], exposed) self._guest_log_loaded_context = self.guest_log_context def get_service_status(self): return self.status._get_actual_db_status() @periodic_task.periodic_task def update_status(self, context): """Update the status of the trove instance.""" if not self.status.is_installed or self.status._is_restarting: LOG.info("Database service is not installed or is in restart " "mode, skip status check") return LOG.debug("Starting to check database service status") status = self.get_service_status() self.status.set_status(status) def rpc_ping(self, context): LOG.debug("Responding to RPC ping.") return True ################# # Instance related ################# def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): """Set up datastore on a Guest Instance.""" with EndNotification(context, instance_id=CONF.guest_id): self._prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, modules) def _prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, modules): LOG.info("Starting datastore prepare for '%s'.", self.manager) self.status.begin_install() post_processing = True if cluster_config else False try: # Since all module handling is common, don't pass it down to the # individual 'do_prepare' methods. self.do_prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) if overrides: LOG.info("Applying user-specified configuration " "(called from 'prepare').") self.apply_overrides_on_prepare(context, overrides) except Exception as ex: self.prepare_error = True LOG.exception("An error occurred preparing datastore: %s", encodeutils.exception_to_unicode(ex)) raise finally: LOG.info("Ending datastore prepare for '%s'.", self.manager) self.status.end_install(error_occurred=self.prepare_error, post_processing=post_processing) # At this point critical 'prepare' work is done and the instance # is now in the correct 'ACTIVE' 'INSTANCE_READY' or 'ERROR' state. # Of cource if an error has occurred, none of the code that follows # will run. LOG.info("Completed setup of '%s' datastore successfully.", self.manager) # The following block performs additional instance initialization. # Failures will be recorded, but won't stop the provisioning # or change the instance state. try: if modules: LOG.info("Applying modules (called from 'prepare').") self.module_apply(context, modules) LOG.info('Module apply completed.') except Exception as ex: LOG.exception("An error occurred applying modules: " "%s", str(ex)) # The following block performs single-instance initialization. # Failures will be recorded, but won't stop the provisioning # or change the instance state. if not cluster_config: try: if databases: LOG.info("Creating databases (called from 'prepare').") self.create_database(context, databases) LOG.info('Databases created successfully.') except Exception as ex: LOG.exception("An error occurred creating databases: " "%s", str(ex)) try: if users: LOG.info("Creating users (called from 'prepare')") self.create_user(context, users) LOG.info('Users created successfully.') except Exception as ex: LOG.exception("An error occurred creating users: " "%s", str(ex)) # We only enable-root automatically if not restoring a backup # that may already have root enabled in which case we keep it # unchanged. if root_password and not backup_info: try: LOG.info("Enabling root user (with password).") self.enable_root_on_prepare(context, root_password) LOG.info('Root enabled successfully.') except Exception as ex: LOG.exception("An error occurred enabling root user: " "%s", str(ex)) try: LOG.info("Calling post_prepare for '%s' datastore.", self.manager) self.post_prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) LOG.info("Post prepare for '%s' datastore completed.", self.manager) except Exception as ex: LOG.exception("An error occurred in post prepare: %s", str(ex)) raise def apply_overrides_on_prepare(self, context, overrides): self.update_overrides(context, overrides) self.restart(context) def enable_root_on_prepare(self, context, root_password): self.enable_root_with_password(context, root_password) @abc.abstractmethod def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare when the Trove instance first comes online. 'Prepare' is the first rpc message passed from the task manager. do_prepare handles all the base configuration of the instance and is where the actual work is done. Once this method completes, the datastore is considered either 'ready' for use (or for final connections to other datastores) or in an 'error' state, and the status is changed accordingly. Each datastore must implement this method. """ pass def post_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called after prepare has completed successfully. Processing done here should be limited to things that will not affect the actual 'running' status of the datastore (for example, creating databases and users, although these are now handled automatically). Any exceptions are caught, logged and rethrown, however no status changes are made and the end-user will not be informed of the error. """ LOG.info('No post_prepare work has been defined.') pass def pre_upgrade(self, context): """Prepares the guest for upgrade, returning a dict to be passed to post_upgrade """ return {} def post_upgrade(self, context, upgrade_info): """Recovers the guest after the image is upgraded using information from the pre_upgrade step """ pass def _restore_directory(self, restore_dir, target_dir, owner=None): restore_path = os.path.join(restore_dir, ".") operating_system.copy(restore_path, target_dir, preserve=True, as_root=True) if owner is not None: operating_system.chown(path=target_dir, user=owner, group=owner, recursive=True, as_root=True) def _restore_home_directory(self, saved_home_dir): home_dir = os.path.expanduser("~") home_owner = operating_system.get_current_user() self._restore_directory(restore_dir=saved_home_dir, target_dir=home_dir, owner=home_owner) ################# # Service related ################# @abc.abstractmethod def restart(self, context): """Restart the database service.""" pass ##################### # File System related ##################### def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" # TODO(peterstac) - note that fs_path is not used in this method. mount_point = CONF.get(self.manager).mount_point LOG.debug("Getting file system stats for '%s'", mount_point) return dbaas.get_filesystem_volume_stats(mount_point) def mount_volume(self, context, device_path=None, mount_point=None, write_to_fstab=False): LOG.debug("Mounting the device %(path)s at the mount point " "%(mount_point)s.", {'path': device_path, 'mount_point': mount_point}) device = volume.VolumeDevice(device_path) device.mount(mount_point, write_to_fstab=write_to_fstab) def unmount_volume(self, context, device_path=None, mount_point=None): LOG.debug("Unmounting the device %(path)s from the mount point " "%(mount_point)s.", {'path': device_path, 'mount_point': mount_point}) device = volume.VolumeDevice(device_path) device.unmount(mount_point) def resize_fs(self, context, device_path=None, mount_point=None): LOG.debug("Resizing the filesystem at %s.", mount_point) device = volume.VolumeDevice(device_path) device.resize_fs(mount_point) ############### # Configuration ############### def reset_configuration(self, context, configuration): """The default implementation should be sufficient if a configuration_manager is provided. Even if one is not, this method needs to be implemented to allow the rollback of flavor-resize on the guestagent side. """ LOG.debug("Resetting configuration.") if self.configuration_manager: config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) ################# # Cluster related ################# def cluster_complete(self, context): LOG.debug("Cluster creation complete, starting status checks.") self.status.end_install() ############# # Log related ############# def guest_log_list(self, context): LOG.info("Getting list of guest logs.") self.guest_log_context = context gl_cache = self.get_guest_log_cache() result = filter(None, [gl_cache[log_name].show() if gl_cache[log_name].exposed else None for log_name in gl_cache.keys()]) LOG.info("Returning list of logs: %s", result) return result def guest_log_action(self, context, log_name, enable, disable, publish, discard): if enable and disable: raise exception.BadRequest("Cannot enable and disable log '%s'." % log_name) # Enable if we are publishing, unless told to disable if publish and not disable: enable = True LOG.info("Processing guest log '%(log)s' " "(enable=%(en)s, disable=%(dis)s, " "publish=%(pub)s, discard=%(disc)s).", {'log': log_name, 'en': enable, 'dis': disable, 'pub': publish, 'disc': discard}) self.guest_log_context = context gl_cache = self.get_guest_log_cache() if log_name in gl_cache: if ((gl_cache[log_name].type == guest_log.LogType.SYS) and not publish): if enable or disable: if enable: action_text = "enable" else: action_text = "disable" raise exception.BadRequest("Cannot %s a SYSTEM log ('%s')." % (action_text, log_name)) if gl_cache[log_name].type == guest_log.LogType.USER: requires_change = ( (gl_cache[log_name].enabled and disable) or (not gl_cache[log_name].enabled and enable)) if requires_change: restart_required = self.guest_log_enable( context, log_name, disable) if restart_required: self.set_guest_log_status( guest_log.LogStatus.Restart_Required, log_name) gl_cache[log_name].enabled = enable log_details = gl_cache[log_name].show() if discard: log_details = gl_cache[log_name].discard_log() if publish: log_details = gl_cache[log_name].publish_log() LOG.info("Details for log '%(log)s': %(det)s", {'log': log_name, 'det': log_details}) return log_details raise exception.NotFound("Log '%s' is not defined." % log_name) def guest_log_enable(self, context, log_name, disable): """This method can be overridden by datastore implementations to facilitate enabling and disabling USER type logs. If the logs can be enabled with simple configuration group changes, however, the code here will probably suffice. Must return whether the datastore needs to be restarted in order for the logging to begin. """ restart_required = False verb = ("Disabling" if disable else "Enabling") if self.configuration_manager: LOG.debug("%(verb)s log '%(log)s'", {'verb': verb, 'log': log_name}) gl_def = self.get_guest_log_defs()[log_name] enable_cfg_label = "%s_%s_log" % (self.GUEST_LOG_ENABLE_LABEL, log_name) disable_cfg_label = "%s_%s_log" % (self.GUEST_LOG_DISABLE_LABEL, log_name) restart_required = gl_def.get(self.GUEST_LOG_RESTART_LABEL, restart_required) if disable: self._apply_log_overrides( context, enable_cfg_label, disable_cfg_label, gl_def.get(self.GUEST_LOG_DISABLE_LABEL), gl_def.get(self.GUEST_LOG_SECTION_LABEL), restart_required) else: self._apply_log_overrides( context, disable_cfg_label, enable_cfg_label, gl_def.get(self.GUEST_LOG_ENABLE_LABEL), gl_def.get(self.GUEST_LOG_SECTION_LABEL), restart_required) else: log_fmt = ("%(verb)s log '%(log)s' not supported - " "no configuration manager defined!") exc_fmt = _("%(verb)s log '%(log)s' not supported - " "no configuration manager defined!") msg_content = {'verb': verb, 'log': log_name} LOG.error(log_fmt, msg_content) raise exception.GuestError( original_message=(exc_fmt % msg_content)) return restart_required def _apply_log_overrides(self, context, remove_label, apply_label, cfg_values, section_label, restart_required): self.configuration_manager.remove_system_override( change_id=remove_label) if cfg_values: config_man_values = cfg_values if section_label: config_man_values = {section_label: cfg_values} self.configuration_manager.apply_system_override( config_man_values, change_id=apply_label, pre_user=True) if restart_required: self.status.set_status(instance.ServiceStatuses.RESTART_REQUIRED) else: self.apply_overrides(context, cfg_values) def set_guest_log_status(self, status, log_name=None): """Sets the status of log_name to 'status' - if log_name is not provided, sets the status on all logs. """ gl_cache = self.get_guest_log_cache() names = [log_name] if not log_name or log_name not in gl_cache: names = gl_cache.keys() for name in names: # If we're already in restart mode and we're asked to set the # status to restart, assume enable/disable has been flipped # without a restart and set the status to restart done if (gl_cache[name].status == guest_log.LogStatus.Restart_Required and status == guest_log.LogStatus.Restart_Required): gl_cache[name].status = guest_log.LogStatus.Restart_Completed else: gl_cache[name].status = status def build_log_file_name(self, log_name, owner, datastore_dir=None): """Build a log file name based on the log_name and make sure the directories exist and are accessible by owner. """ if datastore_dir is None: base_dir = self.GUEST_LOG_BASE_DIR if not operating_system.exists(base_dir, is_directory=True): operating_system.create_directory( base_dir, user=owner, group=owner, force=True, as_root=True) datastore_dir = guestagent_utils.build_file_path( base_dir, self.GUEST_LOG_DATASTORE_DIRNAME) if not operating_system.exists(datastore_dir, is_directory=True): operating_system.create_directory( datastore_dir, user=owner, group=owner, force=True, as_root=True) log_file_name = guestagent_utils.build_file_path( datastore_dir, '%s-%s.log' % (self.manager, log_name)) return self.validate_log_file(log_file_name, owner) def validate_log_file(self, log_file, owner): """Make sure the log file exists and is accessible by owner. """ if not operating_system.exists(log_file, as_root=True): operating_system.write_file(log_file, '', as_root=True) operating_system.chown(log_file, user=owner, group=owner, as_root=True) operating_system.chmod(log_file, FileMode.ADD_USR_RW_GRP_RW_OTH_R, as_root=True) LOG.debug("Set log file '%s' as readable", log_file) return log_file ################ # Module related ################ def module_list(self, context, include_contents=False): LOG.info("Getting list of modules.") results = module_manager.ModuleManager.read_module_results( is_admin=context.is_admin, include_contents=include_contents) LOG.info("Returning list of modules: %s", results) return results def module_apply(self, context, modules=None): LOG.info("Applying modules.") results = [] modules = [data['module'] for data in modules] try: # make sure the modules are applied in the correct order modules.sort(key=operator.itemgetter('apply_order')) modules.sort(key=operator.itemgetter('priority_apply'), reverse=True) except KeyError: # If we don't have ordering info then maybe we're running # a version of the module feature before ordering was # introduced. In that case, since we don't have any # way to order the modules we should just continue. pass for module in modules: id = module.get('id', None) module_type = module.get('type', None) name = module.get('name', None) tenant = module.get('tenant', self.MODULE_APPLY_TO_ALL) datastore = module.get('datastore', self.MODULE_APPLY_TO_ALL) ds_version = module.get('datastore_version', self.MODULE_APPLY_TO_ALL) contents = module.get('contents', None) md5 = module.get('md5', None) auto_apply = module.get('auto_apply', True) visible = module.get('visible', True) is_admin = module.get('is_admin', None) if is_admin is None: # fall back to the old method of checking for an admin option is_admin = (tenant == self.MODULE_APPLY_TO_ALL or not visible or auto_apply) if not name: raise AttributeError(_("Module name not specified")) if not contents: raise AttributeError(_("Module contents not specified")) driver = self.module_driver_manager.get_driver(module_type) if not driver: raise exception.ModuleTypeNotFound( _("No driver implemented for module type '%s'") % module_type) if (datastore and datastore != self.MODULE_APPLY_TO_ALL and datastore != CONF.datastore_manager): reason = (_("Module not valid for datastore %s") % CONF.datastore_manager) raise exception.ModuleInvalid(reason=reason) result = module_manager.ModuleManager.apply_module( driver, module_type, name, tenant, datastore, ds_version, contents, id, md5, auto_apply, visible, is_admin) results.append(result) LOG.info("Returning list of modules: %s", results) return results def module_remove(self, context, module=None): LOG.info("Removing module.") module = module['module'] id = module.get('id', None) module_type = module.get('type', None) name = module.get('name', None) datastore = module.get('datastore', None) ds_version = module.get('datastore_version', None) if not name: raise AttributeError(_("Module name not specified")) driver = self.module_driver_manager.get_driver(module_type) if not driver: raise exception.ModuleTypeNotFound( _("No driver implemented for module type '%s'") % module_type) module_manager.ModuleManager.remove_module( driver, module_type, id, name, datastore, ds_version) LOG.info("Deleted module: %s", name) ############### # Not Supported ############### def change_passwords(self, context, users): LOG.debug("Changing passwords.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='change_passwords', datastore=self.manager) def get_root_password(self, context): LOG.debug("Getting root password.") raise exception.DatastoreOperationNotSupported( operation='get_root_password', datastore=self.manager) def enable_root(self, context): LOG.debug("Enabling root.") raise exception.DatastoreOperationNotSupported( operation='enable_root', datastore=self.manager) def enable_root_with_password(self, context, root_password=None): LOG.debug("Enabling root with password.") raise exception.DatastoreOperationNotSupported( operation='enable_root_with_password', datastore=self.manager) def disable_root(self, context): LOG.debug("Disabling root.") raise exception.DatastoreOperationNotSupported( operation='disable_root', datastore=self.manager) def is_root_enabled(self, context): LOG.debug("Checking if root was ever enabled.") raise exception.DatastoreOperationNotSupported( operation='is_root_enabled', datastore=self.manager) def create_backup(self, context, backup_info): LOG.debug("Creating backup.") raise exception.DatastoreOperationNotSupported( operation='create_backup', datastore=self.manager) def _perform_restore(self, backup_info, context, restore_location, app): LOG.debug("Performing restore.") raise exception.DatastoreOperationNotSupported( operation='_perform_restore', datastore=self.manager) def create_database(self, context, databases): LOG.debug("Creating databases.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='create_database', datastore=self.manager) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing databases.") raise exception.DatastoreOperationNotSupported( operation='list_databases', datastore=self.manager) def delete_database(self, context, database): LOG.debug("Deleting database.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='delete_database', datastore=self.manager) def create_user(self, context, users): LOG.debug("Creating users.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='create_user', datastore=self.manager) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing users.") raise exception.DatastoreOperationNotSupported( operation='list_users', datastore=self.manager) def delete_user(self, context, user): LOG.debug("Deleting user.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='delete_user', datastore=self.manager) def get_user(self, context, username, hostname): LOG.debug("Getting user.") raise exception.DatastoreOperationNotSupported( operation='get_user', datastore=self.manager) def update_attributes(self, context, username, hostname, user_attrs): LOG.debug("Updating user attributes.") with EndNotification(context): raise exception.DatastoreOperationNotSupported( operation='update_attributes', datastore=self.manager) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting user access.") raise exception.DatastoreOperationNotSupported( operation='grant_access', datastore=self.manager) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking user access.") raise exception.DatastoreOperationNotSupported( operation='revoke_access', datastore=self.manager) def list_access(self, context, username, hostname): LOG.debug("Listing user access.") raise exception.DatastoreOperationNotSupported( operation='list_access', datastore=self.manager) def get_config_changes(self, cluster_config, mount_point=None): LOG.debug("Get configuration changes.") raise exception.DatastoreOperationNotSupported( operation='get_configuration_changes', datastore=self.manager) def update_overrides(self, context, overrides, remove=False): LOG.debug("Updating overrides.") raise exception.DatastoreOperationNotSupported( operation='update_overrides', datastore=self.manager) def apply_overrides(self, context, overrides): LOG.debug("Applying overrides.") raise exception.DatastoreOperationNotSupported( operation='apply_overrides', datastore=self.manager) def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") raise exception.DatastoreOperationNotSupported( operation='get_replication_snapshot', datastore=self.manager) def attach_replication_slave(self, context, snapshot, slave_config): LOG.debug("Attaching replication slave.") raise exception.DatastoreOperationNotSupported( operation='attach_replication_slave', datastore=self.manager) def detach_replica(self, context, for_failover=False): LOG.debug("Detaching replica.") raise exception.DatastoreOperationNotSupported( operation='detach_replica', datastore=self.manager) def get_replica_context(self, context): LOG.debug("Getting replica context.") raise exception.DatastoreOperationNotSupported( operation='get_replica_context', datastore=self.manager) def make_read_only(self, context, read_only): LOG.debug("Making datastore read-only.") raise exception.DatastoreOperationNotSupported( operation='make_read_only', datastore=self.manager) def enable_as_master(self, context, replica_source_config): LOG.debug("Enabling as master.") raise exception.DatastoreOperationNotSupported( operation='enable_as_master', datastore=self.manager) def get_txn_count(self, context): LOG.debug("Getting transaction count.") raise exception.DatastoreOperationNotSupported( operation='get_txn_count', datastore=self.manager) def get_latest_txn_id(self, context): LOG.debug("Getting latest transaction id.") raise exception.DatastoreOperationNotSupported( operation='get_latest_txn_id', datastore=self.manager) def wait_for_txn(self, context, txn): LOG.debug("Waiting for transaction.") raise exception.DatastoreOperationNotSupported( operation='wait_for_txn', datastore=self.manager) def demote_replication_master(self, context): LOG.debug("Demoting replication master.") raise exception.DatastoreOperationNotSupported( operation='demote_replication_master', datastore=self.manager) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/mysql/0000755000175000017500000000000000000000000022607 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/mysql/__init__.py0000644000175000017500000000000000000000000024706 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/mysql/manager.py0000644000175000017500000000252600000000000024600 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_utils import importutils from trove.guestagent.datastore.mysql_common import manager MYSQL_APP = "trove.guestagent.datastore.mysql.service.MySqlApp" MYSQL_APP_STATUS = "trove.guestagent.datastore.mysql.service.MySqlAppStatus" MYSQL_ADMIN = "trove.guestagent.datastore.mysql.service.MySqlAdmin" class Manager(manager.MySqlManager): def __init__(self): mysql_app = importutils.import_class(MYSQL_APP) mysql_app_status = importutils.import_class(MYSQL_APP_STATUS) mysql_admin = importutils.import_class(MYSQL_ADMIN) super(Manager, self).__init__(mysql_app, mysql_app_status, mysql_admin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/mysql/service.py0000644000175000017500000000707400000000000024631 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.guestagent.datastore.mysql_common import service LOG = logging.getLogger(__name__) CONF = service.CONF class KeepAliveConnection(service.BaseKeepAliveConnection): pass class MySqlAppStatus(service.BaseMySqlAppStatus): pass class LocalSqlClient(service.BaseLocalSqlClient): pass class MySqlApp(service.BaseMySqlApp): def __init__(self, status): super(MySqlApp, self).__init__(status, LocalSqlClient, KeepAliveConnection) # DEPRECATED: Mantain for API Compatibility def get_txn_count(self): LOG.info("Retrieving latest txn id.") txn_count = 0 with self.local_sql_client(self.get_engine()) as client: result = client.execute('SELECT @@global.gtid_executed').first() for uuid_set in result[0].split(','): for interval in uuid_set.split(':')[1:]: if '-' in interval: iparts = interval.split('-') txn_count += int(iparts[1]) - int(iparts[0]) else: txn_count += 1 return txn_count def _get_slave_status(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SHOW SLAVE STATUS').first() def _get_master_UUID(self): slave_status = self._get_slave_status() return slave_status and slave_status['Master_UUID'] or None def _get_gtid_executed(self): with self.local_sql_client(self.get_engine()) as client: return client.execute('SELECT @@global.gtid_executed').first()[0] def get_last_txn(self): master_UUID = self._get_master_UUID() last_txn_id = '0' gtid_executed = self._get_gtid_executed() for gtid_set in gtid_executed.split(','): uuid_set = gtid_set.split(':') if uuid_set[0] == master_UUID: last_txn_id = uuid_set[-1].split('-')[-1] break return master_UUID, int(last_txn_id) def get_latest_txn_id(self): LOG.info("Retrieving latest txn id.") return self._get_gtid_executed() def wait_for_txn(self, txn): LOG.info("Waiting on txn '%s'.", txn) with self.local_sql_client(self.get_engine()) as client: client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')" % txn) class MySqlRootAccess(service.BaseMySqlRootAccess): def __init__(self): super(MySqlRootAccess, self).__init__(LocalSqlClient, MySqlApp(MySqlAppStatus.get())) class MySqlAdmin(service.BaseMySqlAdmin): def __init__(self): super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(), MySqlApp) get_engine = MySqlApp.get_engine ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/mysql_common/0000755000175000017500000000000000000000000024157 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/mysql_common/__init__.py0000644000175000017500000000000000000000000026256 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/mysql_common/manager.py0000644000175000017500000004425200000000000026152 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from oslo_log import log as logging from trove.common import cfg from trove.common import configurations from trove.common import exception from trove.common import instance as rd_instance from trove.common.notification import EndNotification from trove.guestagent import backup from trove.guestagent.common import operating_system from trove.guestagent.datastore import manager from trove.guestagent.datastore.mysql_common import service from trove.guestagent import guest_log from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF class MySqlManager(manager.Manager): def __init__(self, mysql_app, mysql_app_status, mysql_admin, manager_name='mysql'): super(MySqlManager, self).__init__(manager_name) self._mysql_app = mysql_app self._mysql_app_status = mysql_app_status self._mysql_admin = mysql_admin self.volume_do_not_start_on_reboot = False @property def mysql_app(self): return self._mysql_app @property def mysql_app_status(self): return self._mysql_app_status @property def mysql_admin(self): return self._mysql_admin @property def status(self): return self.mysql_app_status.get() @property def configuration_manager(self): return self.mysql_app( self.mysql_app_status.get()).configuration_manager def get_datastore_log_defs(self): owner = 'mysql' datastore_dir = self.mysql_app.get_data_dir() server_section = configurations.MySQLConfParser.SERVER_CONF_SECTION long_query_time = CONF.get(self.manager).get( 'guest_log_long_query_time') / 1000 general_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_GENERAL_LABEL, owner, datastore_dir=datastore_dir) error_log_file = self.validate_log_file('/var/log/mysqld.log', owner) slow_query_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_SLOW_QUERY_LABEL, owner, datastore_dir=datastore_dir) return { self.GUEST_LOG_DEFS_GENERAL_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: general_log_file, self.GUEST_LOG_SECTION_LABEL: server_section, self.GUEST_LOG_ENABLE_LABEL: { 'general_log': 'on', 'general_log_file': general_log_file, 'log_output': 'file', }, self.GUEST_LOG_DISABLE_LABEL: { 'general_log': 'off', }, }, self.GUEST_LOG_DEFS_SLOW_QUERY_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: slow_query_log_file, self.GUEST_LOG_SECTION_LABEL: server_section, self.GUEST_LOG_ENABLE_LABEL: { 'slow_query_log': 'on', 'slow_query_log_file': slow_query_log_file, 'long_query_time': long_query_time, }, self.GUEST_LOG_DISABLE_LABEL: { 'slow_query_log': 'off', }, }, self.GUEST_LOG_DEFS_ERROR_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: error_log_file, }, } def get_service_status(self): try: app = self.mysql_app(self.status) with service.BaseLocalSqlClient(app.get_engine()) as client: cmd = "SELECT 1;" client.execute(cmd) LOG.debug("Database service check: database query is responsive") return rd_instance.ServiceStatuses.HEALTHY except Exception as e: LOG.warning('Failed to query database, error: %s', str(e)) return super(MySqlManager, self).get_service_status() def change_passwords(self, context, users): with EndNotification(context): self.mysql_admin().change_passwords(users) def update_attributes(self, context, username, hostname, user_attrs): with EndNotification(context): self.mysql_admin().update_attributes( username, hostname, user_attrs) def reset_configuration(self, context, configuration): app = self.mysql_app(self.mysql_app_status.get()) app.reset_configuration(configuration) def create_database(self, context, databases): with EndNotification(context): return self.mysql_admin().create_database(databases) def create_user(self, context, users): with EndNotification(context): self.mysql_admin().create_user(users) def delete_database(self, context, database): with EndNotification(context): return self.mysql_admin().delete_database(database) def delete_user(self, context, user): with EndNotification(context): self.mysql_admin().delete_user(user) def get_user(self, context, username, hostname): return self.mysql_admin().get_user(username, hostname) def grant_access(self, context, username, hostname, databases): return self.mysql_admin().grant_access(username, hostname, databases) def revoke_access(self, context, username, hostname, database): return self.mysql_admin().revoke_access(username, hostname, database) def list_access(self, context, username, hostname): return self.mysql_admin().list_access(username, hostname) def list_databases(self, context, limit=None, marker=None, include_marker=False): return self.mysql_admin().list_databases(limit, marker, include_marker) def list_users(self, context, limit=None, marker=None, include_marker=False): return self.mysql_admin().list_users(limit, marker, include_marker) def enable_root(self, context): return self.mysql_admin().enable_root() def enable_root_with_password(self, context, root_password=None): return self.mysql_admin().enable_root(root_password) def is_root_enabled(self, context): return self.mysql_admin().is_root_enabled() def disable_root(self, context): return self.mysql_admin().disable_root() def _perform_restore(self, backup_info, context, restore_location, app): LOG.info("Restoring database from backup %s, backup_info: %s", backup_info['id'], backup_info) try: backup.restore(context, backup_info, restore_location) except Exception: LOG.exception("Error performing restore from backup %s.", backup_info['id']) app.status.set_status(rd_instance.ServiceStatuses.FAILED) raise LOG.info("Restored database successfully.") def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" app = self.mysql_app(self.mysql_app_status.get()) app.install_if_needed(packages) if device_path: LOG.info('Prepare the storage for %s', device_path) app.stop_db( do_not_start_on_reboot=self.volume_do_not_start_on_reboot ) device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync existing data to a "data" sub-directory # on the new volume device.migrate_data(mount_point, target_subdir="data") # mount the volume device.mount(mount_point) operating_system.chown(mount_point, service.MYSQL_OWNER, service.MYSQL_OWNER, recursive=False, as_root=True) LOG.debug("Mounted the volume at %s", mount_point) # We need to temporarily update the default my.cnf so that # mysql will start after the volume is mounted. Later on it # will be changed based on the config template # (see MySqlApp.secure()) and restart. app.set_data_dir(mount_point + '/data') app.start_mysql() LOG.info('Finish to prepare the storage for %s', device_path) if backup_info: self._perform_restore(backup_info, context, mount_point + "/data", app) app.secure(config_contents) enable_root_on_restore = (backup_info and self.mysql_admin().is_root_enabled()) if enable_root_on_restore: app.secure_root(secure_remote_root=False) self.mysql_app_status.get().report_root(context) else: app.secure_root(secure_remote_root=True) if snapshot: self.attach_replica(context, snapshot, snapshot['config']) def pre_upgrade(self, context): app = self.mysql_app(self.mysql_app_status.get()) data_dir = app.get_data_dir() mount_point, _data = os.path.split(data_dir) save_dir = "%s/etc_mysql" % mount_point save_etc_dir = "%s/etc" % mount_point home_save = "%s/trove_user" % mount_point app.status.begin_restart() app.stop_db() if operating_system.exists("/etc/my.cnf", as_root=True): operating_system.create_directory(save_etc_dir, as_root=True) operating_system.copy("/etc/my.cnf", save_etc_dir, preserve=True, as_root=True) operating_system.copy("/etc/mysql/.", save_dir, preserve=True, as_root=True) operating_system.copy("%s/." % os.path.expanduser('~'), home_save, preserve=True, as_root=True) self.unmount_volume(context, mount_point=mount_point) return { 'mount_point': mount_point, 'save_dir': save_dir, 'save_etc_dir': save_etc_dir, 'home_save': home_save } def post_upgrade(self, context, upgrade_info): app = self.mysql_app(self.mysql_app_status.get()) app.stop_db() if 'device' in upgrade_info: self.mount_volume(context, mount_point=upgrade_info['mount_point'], device_path=upgrade_info['device'], write_to_fstab=True) operating_system.chown(path=upgrade_info['mount_point'], user=service.MYSQL_OWNER, group=service.MYSQL_OWNER, recursive=True, as_root=True) self._restore_home_directory(upgrade_info['home_save']) if operating_system.exists(upgrade_info['save_etc_dir'], is_directory=True, as_root=True): self._restore_directory(upgrade_info['save_etc_dir'], "/etc") self._restore_directory("%s/." % upgrade_info['save_dir'], "/etc/mysql") self.configuration_manager.refresh_cache() app.start_mysql() app.status.end_restart() def restart(self, context): app = self.mysql_app(self.mysql_app_status.get()) app.restart() def start_db_with_conf_changes(self, context, config_contents): app = self.mysql_app(self.mysql_app_status.get()) app.start_db_with_conf_changes(config_contents) def stop_db(self, context, do_not_start_on_reboot=False): app = self.mysql_app(self.mysql_app_status.get()) app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) def create_backup(self, context, backup_info): """ Entry point for initiating a backup for this guest agents db instance. The call currently blocks until the backup is complete or errors. If device_path is specified, it will be mounted based to a point specified in configuration. :param backup_info: a dictionary containing the db instance id of the backup task, location, type, and other data. """ with EndNotification(context): backup.backup(context, backup_info) def update_overrides(self, context, overrides, remove=False): app = self.mysql_app(self.mysql_app_status.get()) if remove: app.remove_overrides() app.update_overrides(overrides) def apply_overrides(self, context, overrides): LOG.debug("Applying overrides (%s).", overrides) app = self.mysql_app(self.mysql_app_status.get()) app.apply_overrides(overrides) def backup_required_for_replication(self, context): return self.replication.backup_required_for_replication() def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.info("Getting replication snapshot, snapshot_info: %s", snapshot_info) app = self.mysql_app(self.mysql_app_status.get()) self.replication.enable_as_master(app, replica_source_config) snapshot_id, log_position = self.replication.snapshot_for_replication( context, app, None, snapshot_info) volume_stats = self.get_filesystem_stats(context, None) replication_snapshot = { 'dataset': { 'datastore_manager': self.manager, 'dataset_size': volume_stats.get('used', 0.0), 'volume_size': volume_stats.get('total', 0.0), 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': self.replication.get_master_ref(app, snapshot_info), 'log_position': log_position } return replication_snapshot def enable_as_master(self, context, replica_source_config): LOG.debug("Calling enable_as_master.") app = self.mysql_app(self.mysql_app_status.get()) self.replication.enable_as_master(app, replica_source_config) # DEPRECATED: Maintain for API Compatibility def get_txn_count(self, context): LOG.debug("Calling get_txn_count") return self.mysql_app(self.mysql_app_status.get()).get_txn_count() def get_last_txn(self, context): LOG.debug("Calling get_last_txn") return self.mysql_app(self.mysql_app_status.get()).get_last_txn() def get_latest_txn_id(self, context): LOG.debug("Calling get_latest_txn_id.") return self.mysql_app(self.mysql_app_status.get()).get_latest_txn_id() def wait_for_txn(self, context, txn): LOG.debug("Calling wait_for_txn.") self.mysql_app(self.mysql_app_status.get()).wait_for_txn(txn) def detach_replica(self, context, for_failover=False): LOG.debug("Detaching replica.") app = self.mysql_app(self.mysql_app_status.get()) replica_info = self.replication.detach_slave(app, for_failover) return replica_info def get_replica_context(self, context): LOG.debug("Getting replica context.") app = self.mysql_app(self.mysql_app_status.get()) replica_info = self.replication.get_replica_context(app) return replica_info def _validate_slave_for_replication(self, context, replica_info): if replica_info['replication_strategy'] != self.replication_strategy: raise exception.IncompatibleReplicationStrategy( replica_info.update({ 'guest_strategy': self.replication_strategy })) volume_stats = self.get_filesystem_stats(context, None) if (volume_stats.get('total', 0.0) < replica_info['dataset']['dataset_size']): raise exception.InsufficientSpaceForReplica( replica_info.update({ 'slave_volume_size': volume_stats.get('total', 0.0) })) def attach_replica(self, context, replica_info, slave_config): LOG.info("Attaching replica.") app = self.mysql_app(self.mysql_app_status.get()) try: if 'replication_strategy' in replica_info: self._validate_slave_for_replication(context, replica_info) self.replication.enable_as_slave(app, replica_info, slave_config) except Exception: LOG.exception("Error enabling replication.") app.status.set_status(rd_instance.ServiceStatuses.FAILED) raise def make_read_only(self, context, read_only): LOG.debug("Executing make_read_only(%s)", read_only) app = self.mysql_app(self.mysql_app_status.get()) app.make_read_only(read_only) def cleanup_source_on_replica_detach(self, context, replica_info): LOG.debug("Cleaning up the source on the detach of a replica.") self.replication.cleanup_source_on_replica_detach(self.mysql_admin(), replica_info) def demote_replication_master(self, context): LOG.debug("Demoting replication master.") app = self.mysql_app(self.mysql_app_status.get()) self.replication.demote_master(app) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/datastore/mysql_common/service.py0000644000175000017500000013076400000000000026204 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from collections import defaultdict import os import re import six import uuid from oslo_log import log as logging from oslo_utils import encodeutils from pymysql import err as pymysql_err from six.moves import urllib import sqlalchemy from sqlalchemy import exc from sqlalchemy import interfaces from sqlalchemy.sql.expression import text from trove.common import cfg from trove.common.configurations import MySQLConfParser from trove.common.db.mysql import models from trove.common import exception from trove.common.exception import PollTimeOut from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.stream_codecs import IniCodec from trove.common import utils from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common import sql_query from trove.guestagent.datastore import service from trove.guestagent import pkg ADMIN_USER_NAME = "os_admin" CONNECTION_STR_FORMAT = ("mysql+pymysql://%s:%s@localhost/?" "unix_socket=/var/run/mysqld/mysqld.sock") LOG = logging.getLogger(__name__) FLUSH = text(sql_query.FLUSH) ENGINE = None DATADIR = None PREPARING = False UUID = False TMP_MYCNF = "/tmp/my.cnf.tmp" MYSQL_BASE_DIR = "/var/lib/mysql" CONF = cfg.CONF INCLUDE_MARKER_OPERATORS = { True: ">=", False: ">" } OS_NAME = operating_system.get_os() MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf", operating_system.DEBIAN: "/etc/mysql/my.cnf", operating_system.SUSE: "/etc/my.cnf"}[OS_NAME] MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"] MYSQL_OWNER = 'mysql' CNF_EXT = 'cnf' CNF_INCLUDE_DIR = '/etc/mysql/conf.d' CNF_MASTER = 'master-replication' CNF_SLAVE = 'slave-replication' # Create a package impl packager = pkg.Package() def clear_expired_password(): """ Some mysql installations generate random root password and save it in /root/.mysql_secret, this password is expired and should be changed by client that supports expired passwords. """ LOG.debug("Removing expired password.") secret_file = "/root/.mysql_secret" try: out, err = utils.execute("cat", secret_file, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: LOG.warning("/root/.mysql_secret does not exist.") else: m = re.match('# The random password set for the root user at .*: (.*)', out) if m: try: out, err = utils.execute("mysqladmin", "-p%s" % m.group(1), "password", "", run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: LOG.exception("Cannot change mysql password.") return operating_system.remove(secret_file, force=True, as_root=True) LOG.debug("Expired password removed.") # The root user password will be changed in app.secure_root() later on LOG.debug('Initializae the root password to empty') try: utils.execute("mysqladmin", "--user=root", "password", "", run_as_root=True, root_helper="sudo") except Exception: LOG.exception("Failed to initializae the root password") def load_mysqld_options(): # find mysqld bin for bin in MYSQL_BIN_CANDIDATES: if os.path.isfile(bin): mysqld_bin = bin break else: return {} try: out, err = utils.execute(mysqld_bin, "--print-defaults", run_as_root=True, root_helper="sudo") arglist = re.split("\n", out)[1].split() args = defaultdict(list) for item in arglist: if "=" in item: key, value = item.split("=", 1) args[key.lstrip("--")].append(value) else: args[item.lstrip("--")].append(None) return args except exception.ProcessExecutionError: return {} class BaseMySqlAppStatus(service.BaseDbStatus): @classmethod def get(cls): if not cls._instance: cls._instance = BaseMySqlAppStatus() return cls._instance def _get_actual_db_status(self): """Check database service status. The checks which don't need service app can be put here. """ try: out, _ = utils.execute_with_timeout( "/bin/ps", "-C", "mysqld", "h", log_output_on_error=True ) pid = out.split()[0] LOG.debug('Database service check: service PID exists: %s', pid) return rd_instance.ServiceStatuses.RUNNING except exception.ProcessExecutionError: LOG.warning("Database service check: Failed to get database " "service status by ps, fall back to check PID file.") mysql_args = load_mysqld_options() pid_file = mysql_args.get('pid_file', ['/var/run/mysqld/mysqld.pid'])[0] if os.path.exists(pid_file): LOG.info("Database service check: MySQL Service Status is " "CRASHED.") return rd_instance.ServiceStatuses.CRASHED else: LOG.info("Database service check: MySQL Service Status is " "SHUTDOWN.") return rd_instance.ServiceStatuses.SHUTDOWN class BaseLocalSqlClient(object): """A sqlalchemy wrapper to manage transactions.""" def __init__(self, engine, use_flush=True): self.engine = engine self.use_flush = use_flush def __enter__(self): self.conn = self.engine.connect() self.trans = self.conn.begin() return self.conn def __exit__(self, type, value, traceback): if self.trans: if type is not None: # An error occurred self.trans.rollback() else: if self.use_flush: self.conn.execute(FLUSH) self.trans.commit() self.conn.close() def execute(self, t, **kwargs): try: return self.conn.execute(t, kwargs) except Exception: self.trans.rollback() self.trans = None raise @six.add_metaclass(abc.ABCMeta) class BaseMySqlAdmin(object): """Handles administrative tasks on the MySQL database.""" def __init__(self, local_sql_client, mysql_root_access, mysql_app): self._local_sql_client = local_sql_client self._mysql_root_access = mysql_root_access self._mysql_app = mysql_app(local_sql_client) @property def local_sql_client(self): return self._local_sql_client @property def mysql_root_access(self): return self._mysql_root_access @property def mysql_app(self): return self._mysql_app def _associate_dbs(self, user): """Internal. Given a MySQLUser, populate its databases attribute.""" LOG.debug("Associating dbs to user %(name)s at %(host)s.", {'name': user.name, 'host': user.host}) with self.local_sql_client(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] q.group = ["grantee", "table_schema"] q.where = ["privilege_type != 'USAGE'"] t = text(str(q)) db_result = client.execute(t) for db in db_result: LOG.debug("\t db: %s.", db) if db['grantee'] == "'%s'@'%s'" % (user.name, user.host): user.databases = db['table_schema'] def change_passwords(self, users): """Change the passwords of one or more existing users.""" LOG.debug("Changing the password of some users.") with self.local_sql_client(self.mysql_app.get_engine()) as client: for item in users: LOG.debug("Changing password for user %s.", item) user_dict = {'_name': item['name'], '_host': item['host'], '_password': item['password']} user = models.MySQLUser.deserialize(user_dict) LOG.debug("\tDeserialized: %s.", user.__dict__) uu = sql_query.SetPassword(user.name, host=user.host, new_password=user.password) t = text(str(uu)) client.execute(t) def update_attributes(self, username, hostname, user_attrs): """Change the attributes of an existing user.""" LOG.debug("Changing user attributes for user %s.", username) user = self._get_user(username, hostname) new_name = user_attrs.get('name') new_host = user_attrs.get('host') new_password = user_attrs.get('password') if new_name or new_host or new_password: with self.local_sql_client(self.mysql_app.get_engine()) as client: if new_password is not None: uu = sql_query.SetPassword(user.name, host=user.host, new_password=new_password) t = text(str(uu)) client.execute(t) if new_name or new_host: uu = sql_query.RenameUser(user.name, host=user.host, new_user=new_name, new_host=new_host) t = text(str(uu)) client.execute(t) def create_database(self, databases): """Create the list of specified databases.""" with self.local_sql_client(self.mysql_app.get_engine()) as client: for item in databases: mydb = models.MySQLSchema.deserialize(item) mydb.check_create() cd = sql_query.CreateDatabase(mydb.name, mydb.character_set, mydb.collate) t = text(str(cd)) LOG.debug('Creating database, command: %s', str(cd)) client.execute(t) def create_user(self, users): """Create users and grant them privileges for the specified databases. """ with self.local_sql_client(self.mysql_app.get_engine()) as client: for item in users: user = models.MySQLUser.deserialize(item) user.check_create() cu = sql_query.CreateUser(user.name, host=user.host, clear=user.password) t = text(str(cu)) client.execute(t, **cu.keyArgs) for database in user.databases: mydb = models.MySQLSchema.deserialize(database) g = sql_query.Grant(permissions='ALL', database=mydb.name, user=user.name, host=user.host) t = text(str(g)) LOG.debug('Creating user, command: %s', str(g)) client.execute(t) def delete_database(self, database): """Delete the specified database.""" with self.local_sql_client(self.mysql_app.get_engine()) as client: mydb = models.MySQLSchema.deserialize(database) mydb.check_delete() dd = sql_query.DropDatabase(mydb.name) t = text(str(dd)) client.execute(t) def delete_user(self, user): """Delete the specified user.""" mysql_user = models.MySQLUser.deserialize(user) mysql_user.check_delete() self.delete_user_by_name(mysql_user.name, mysql_user.host) def delete_user_by_name(self, name, host='%'): with self.local_sql_client(self.mysql_app.get_engine()) as client: du = sql_query.DropUser(name, host=host) t = text(str(du)) LOG.debug("delete_user_by_name: %s", t) client.execute(t) def get_user(self, username, hostname): user = self._get_user(username, hostname) if not user: return None return user.serialize() def _get_user(self, username, hostname): """Return a single user matching the criteria.""" user = None try: # Could possibly throw a ValueError here. user = models.MySQLUser(name=username) user.check_reserved() except ValueError as ve: LOG.exception("Error Getting user information") err_msg = encodeutils.exception_to_unicode(ve) raise exception.BadRequest(_("Username %(user)s is not valid" ": %(reason)s") % {'user': username, 'reason': err_msg} ) with self.local_sql_client(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ['User', 'Host'] q.tables = ['mysql.user'] q.where = ["Host != 'localhost'", "User = '%s'" % username, "Host = '%s'" % hostname] q.order = ['User', 'Host'] t = text(str(q)) result = client.execute(t).fetchall() LOG.debug("Getting user information %s.", result) if len(result) != 1: return None found_user = result[0] user.host = found_user['Host'] self._associate_dbs(user) return user def grant_access(self, username, hostname, databases): """Grant a user permission to use a given database.""" user = self._get_user(username, hostname) mydb = None # cache the model as we just want name validation with self.local_sql_client(self.mysql_app.get_engine()) as client: for database in databases: try: if mydb: mydb.name = database else: mydb = models.MySQLSchema(name=database) mydb.check_reserved() except ValueError: LOG.exception("Error granting access") raise exception.BadRequest(_( "Grant access to %s is not allowed") % database) g = sql_query.Grant(permissions='ALL', database=mydb.name, user=user.name, host=user.host, hashed=user.password) t = text(str(g)) client.execute(t) def is_root_enabled(self): """Return True if root access is enabled; False otherwise.""" LOG.debug("Class type of mysql_root_access is %s ", self.mysql_root_access) return self.mysql_root_access.is_root_enabled() def enable_root(self, root_password=None): """Enable the root user global access and/or reset the root password. """ return self.mysql_root_access.enable_root(root_password) def disable_root(self): """Disable the root user global access """ return self.mysql_root_access.disable_root() def list_databases(self, limit=None, marker=None, include_marker=False): """List databases the user created on this mysql instance.""" LOG.debug("---Listing Databases---") ignored_database_names = "'%s'" % "', '".join(cfg.get_ignored_dbs()) LOG.debug("The following database names are on ignore list and will " "be omitted from the listing: %s", ignored_database_names) databases = [] with self.local_sql_client(self.mysql_app.get_engine()) as client: # If you have an external volume mounted at /var/lib/mysql # the lost+found directory will show up in mysql as a database # which will create errors if you try to do any database ops # on it. So we remove it here if it exists. q = sql_query.Query() q.columns = [ 'schema_name as name', 'default_character_set_name as charset', 'default_collation_name as collation', ] q.tables = ['information_schema.schemata'] q.where = ["schema_name NOT IN (" + ignored_database_names + ")"] q.order = ['schema_name ASC'] if limit: q.limit = limit + 1 if marker: q.where.append("schema_name %s '%s'" % (INCLUDE_MARKER_OPERATORS[include_marker], marker)) t = text(str(q)) database_names = client.execute(t) next_marker = None LOG.debug("database_names = %r.", database_names) for count, database in enumerate(database_names): if limit is not None and count >= limit: break LOG.debug("database = %s.", str(database)) mysql_db = models.MySQLSchema(name=database[0], character_set=database[1], collate=database[2]) next_marker = mysql_db.name databases.append(mysql_db.serialize()) LOG.debug("databases = %s", str(databases)) if limit is not None and database_names.rowcount <= limit: next_marker = None return databases, next_marker def list_users(self, limit=None, marker=None, include_marker=False): """List users that have access to the database.""" ''' SELECT User, Host, Marker FROM (SELECT User, Host, CONCAT(User, '@', Host) as Marker FROM mysql.user ORDER BY 1, 2) as innerquery WHERE Marker > :marker ORDER BY Marker LIMIT :limit; ''' LOG.debug("---Listing Users---") ignored_user_names = "'%s'" % "', '".join(cfg.get_ignored_users()) LOG.debug("The following user names are on ignore list and will " "be omitted from the listing: %s", ignored_user_names) users = [] with self.local_sql_client(self.mysql_app.get_engine()) as client: iq = sql_query.Query() # Inner query. iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"] iq.tables = ['mysql.user'] iq.order = ['User', 'Host'] innerquery = str(iq).rstrip(';') oq = sql_query.Query() # Outer query. oq.columns = ['User', 'Host', 'Marker'] oq.tables = ['(%s) as innerquery' % innerquery] oq.where = [ "Host != 'localhost'", "User NOT IN (" + ignored_user_names + ")"] oq.order = ['Marker'] if marker: oq.where.append("Marker %s '%s'" % (INCLUDE_MARKER_OPERATORS[include_marker], marker)) if limit: oq.limit = limit + 1 t = text(str(oq)) result = client.execute(t) next_marker = None LOG.debug("result = %s", str(result)) for count, row in enumerate(result): if limit is not None and count >= limit: break LOG.debug("user = %s", str(row)) mysql_user = models.MySQLUser(name=row['User'], host=row['Host']) mysql_user.check_reserved() self._associate_dbs(mysql_user) next_marker = row['Marker'] users.append(mysql_user.serialize()) if limit is not None and result.rowcount <= limit: next_marker = None LOG.debug("users = %s", str(users)) return users, next_marker def revoke_access(self, username, hostname, database): """Revoke a user's permission to use a given database.""" user = self._get_user(username, hostname) with self.local_sql_client(self.mysql_app.get_engine()) as client: r = sql_query.Revoke(database=database, user=user.name, host=user.host) t = text(str(r)) client.execute(t) def list_access(self, username, hostname): """Show all the databases to which the user has more than USAGE granted. """ user = self._get_user(username, hostname) return user.databases class BaseKeepAliveConnection(interfaces.PoolListener): """ A connection pool listener that ensures live connections are returned from the connection pool at checkout. This alleviates the problem of MySQL connections timing out. """ def checkout(self, dbapi_con, con_record, con_proxy): """Event triggered when a connection is checked out from the pool.""" try: try: dbapi_con.ping(False) except TypeError: dbapi_con.ping() except dbapi_con.OperationalError as ex: if ex.args[0] in (2006, 2013, 2014, 2045, 2055): raise exc.DisconnectionError() else: raise # MariaDB seems to timeout the client in a different # way than MySQL and PXC except pymysql_err.InternalError as ex: if "Packet sequence number wrong" in str(ex): raise exc.DisconnectionError() elif 'Connection was killed' in str(ex): raise exc.DisconnectionError() else: raise @six.add_metaclass(abc.ABCMeta) class BaseMySqlApp(object): """Prepares DBaaS on a Guest container.""" TIME_OUT = 1000 CFG_CODEC = IniCodec() @property def local_sql_client(self): return self._local_sql_client @property def keep_alive_connection_cls(self): return self._keep_alive_connection_cls @property def service_candidates(self): return ["mysql", "mysqld", "mysql-server"] @property def mysql_service(self): service_candidates = self.service_candidates return operating_system.service_discovery(service_candidates) configuration_manager = ConfigurationManager( MYSQL_CONFIG, MYSQL_OWNER, MYSQL_OWNER, CFG_CODEC, requires_root=True, override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT)) def get_engine(self): """Create the default engine with the updated admin user. If admin user not created yet, use root instead. """ global ENGINE if ENGINE: return ENGINE user = ADMIN_USER_NAME password = "" try: password = self.get_auth_password() except exception.UnprocessableEntity: # os_admin user not created yet user = 'root' ENGINE = sqlalchemy.create_engine( CONNECTION_STR_FORMAT % (user, urllib.parse.quote(password.strip())), pool_recycle=120, echo=CONF.sql_query_logging, listeners=[self.keep_alive_connection_cls()]) return ENGINE @classmethod def get_auth_password(cls): auth_config = operating_system.read_file( cls.get_client_auth_file(), codec=cls.CFG_CODEC) return auth_config['client']['password'] @classmethod def get_data_dir(cls): return cls.configuration_manager.get_value( MySQLConfParser.SERVER_CONF_SECTION).get('datadir') @classmethod def set_data_dir(cls, value): cls.configuration_manager.apply_system_override( {MySQLConfParser.SERVER_CONF_SECTION: {'datadir': value}}) @classmethod def get_client_auth_file(cls): return guestagent_utils.build_file_path("~", ".my.cnf") def __init__(self, status, local_sql_client, keep_alive_connection_cls): """By default login with root no password for initial setup.""" self.state_change_wait_time = CONF.state_change_wait_time self.status = status self._local_sql_client = local_sql_client self._keep_alive_connection_cls = keep_alive_connection_cls def _create_admin_user(self, client, password): """ Create a os_admin user with a random password with all privileges similar to the root user. """ LOG.debug("Creating Trove admin user '%s'.", ADMIN_USER_NAME) host = "localhost" try: cu = sql_query.CreateUser(ADMIN_USER_NAME, host=host, clear=password) t = text(str(cu)) client.execute(t, **cu.keyArgs) except (exc.OperationalError, exc.InternalError) as err: # Ignore, user is already created, just reset the password # (user will already exist in a restore from backup) LOG.debug(err) uu = sql_query.SetPassword(ADMIN_USER_NAME, host=host, new_password=password) t = text(str(uu)) client.execute(t) g = sql_query.Grant(permissions='ALL', user=ADMIN_USER_NAME, host=host, grant_option=True) t = text(str(g)) client.execute(t) LOG.debug("Trove admin user '%s' created.", ADMIN_USER_NAME) @staticmethod def _generate_root_password(client): """Generate, set, and preserve a random password for root@localhost when invoking mysqladmin to determine the execution status of the mysql service. """ localhost = "localhost" new_password = utils.generate_random_password() uu = sql_query.SetPassword( models.MySQLUser.root_username, host=localhost, new_password=new_password) t = text(str(uu)) client.execute(t) # Save the password to root's private .my.cnf file root_sect = {'client': {'user': 'root', 'password': new_password, 'host': localhost}} operating_system.write_file('/root/.my.cnf', root_sect, codec=IniCodec(), as_root=True) def install_if_needed(self, packages): """Prepare the guest machine with a secure mysql server installation. """ LOG.info("Preparing Guest as MySQL Server.") if not packager.pkg_is_installed(packages): LOG.debug("Installing MySQL server.") self._clear_mysql_config() # set blank password on pkg configuration stage pkg_opts = {'root_password': '', 'root_password_again': ''} packager.pkg_install(packages, pkg_opts, self.TIME_OUT) self._create_mysql_confd_dir() LOG.info("Finished installing MySQL server.") self.start_mysql() def secure(self, config_contents): LOG.debug("Securing MySQL now.") clear_expired_password() LOG.debug("Generating admin password.") admin_password = utils.generate_random_password() # By default, MySQL does not require a password at all for connecting # as root engine = sqlalchemy.create_engine( CONNECTION_STR_FORMAT % ('root', ''), echo=True) with self.local_sql_client(engine, use_flush=False) as client: self._create_admin_user(client, admin_password) LOG.debug("Switching to the '%s' user now.", ADMIN_USER_NAME) engine = sqlalchemy.create_engine( CONNECTION_STR_FORMAT % (ADMIN_USER_NAME, urllib.parse.quote(admin_password)), echo=True) with self.local_sql_client(engine) as client: self._remove_anonymous_user(client) self.stop_db() self._reset_configuration(config_contents, admin_password) self.start_mysql() LOG.debug("MySQL secure complete.") def _reset_configuration(self, configuration, admin_password=None): if not admin_password: # Take the current admin password from the base configuration file # if not given. admin_password = self.get_auth_password() self.configuration_manager.save_configuration(configuration) self._save_authentication_properties(admin_password) self.wipe_ib_logfiles() def _save_authentication_properties(self, admin_password): # Use localhost to connect with mysql using unix socket instead of ip # and port. client_sect = {'client': {'user': ADMIN_USER_NAME, 'password': admin_password, 'host': 'localhost'}} operating_system.write_file(self.get_client_auth_file(), client_sect, codec=self.CFG_CODEC) def secure_root(self, secure_remote_root=True): with self.local_sql_client(self.get_engine()) as client: LOG.info("Preserving root access from restore.") self._generate_root_password(client) if secure_remote_root: self._remove_remote_root_access(client) def _clear_mysql_config(self): """Clear old configs, which can be incompatible with new version.""" LOG.debug("Clearing old MySQL config.") random_uuid = str(uuid.uuid4()) configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"] for config in configs: try: old_conf_backup = "%s_%s" % (config, random_uuid) operating_system.move(config, old_conf_backup, as_root=True) LOG.debug("%(cfg)s saved to %(saved_cfg)s_%(uuid)s.", {'cfg': config, 'saved_cfg': config, 'uuid': random_uuid}) except exception.ProcessExecutionError: pass def _create_mysql_confd_dir(self): LOG.debug("Creating %s.", CNF_INCLUDE_DIR) operating_system.create_directory(CNF_INCLUDE_DIR, as_root=True) def _enable_mysql_on_boot(self): LOG.debug("Enabling MySQL on boot.") try: utils.execute_with_timeout(self.mysql_service['cmd_enable'], shell=True) except KeyError: LOG.exception("Error enabling MySQL start on boot.") raise RuntimeError(_("Service is not discovered.")) def _disable_mysql_on_boot(self): try: utils.execute_with_timeout(self.mysql_service['cmd_disable'], shell=True) except KeyError: LOG.exception("Error disabling MySQL start on boot.") raise RuntimeError(_("Service is not discovered.")) def stop_db(self, update_db=False, do_not_start_on_reboot=False): LOG.info("Stopping MySQL.") if do_not_start_on_reboot: self._disable_mysql_on_boot() try: utils.execute_with_timeout(self.mysql_service['cmd_stop'], shell=True) except KeyError: LOG.exception("Error stopping MySQL.") raise RuntimeError(_("Service is not discovered.")) if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.SHUTDOWN, self.state_change_wait_time, update_db): LOG.error("Could not stop MySQL.") self.status.end_restart() raise RuntimeError(_("Could not stop MySQL!")) def _remove_anonymous_user(self, client): LOG.debug("Removing anonymous user.") t = text(sql_query.REMOVE_ANON) client.execute(t) LOG.debug("Anonymous user removed.") def _remove_remote_root_access(self, client): LOG.debug("Removing root access.") t = text(sql_query.REMOVE_ROOT) client.execute(t) LOG.debug("Root access removed.") def restart(self): try: self.status.begin_restart() self.stop_db() self.start_mysql() finally: self.status.end_restart() def update_overrides(self, overrides): self._apply_user_overrides(overrides) def _apply_user_overrides(self, overrides): # All user-defined values go to the server section of the configuration # file. if overrides: self.configuration_manager.apply_user_override( {MySQLConfParser.SERVER_CONF_SECTION: overrides}) def apply_overrides(self, overrides): LOG.debug("Applying overrides to MySQL.") with self.local_sql_client(self.get_engine()) as client: LOG.debug("Updating override values in running MySQL.") for k, v in overrides.items(): byte_value = guestagent_utils.to_bytes(v) q = sql_query.SetServerVariable(key=k, value=byte_value) t = text(str(q)) try: client.execute(t) except exc.OperationalError: output = {'key': k, 'value': byte_value} LOG.exception("Unable to set %(key)s with value " "%(value)s.", output) def make_read_only(self, read_only): with self.local_sql_client(self.get_engine()) as client: q = "set global read_only = %s" % read_only client.execute(text(str(q))) def wipe_ib_logfiles(self): """Destroys the iblogfiles. If for some reason the selected log size in the conf changes from the current size of the files MySQL will fail to start, so we delete the files to be safe. """ LOG.info("Wiping ib_logfiles.") for index in range(2): try: # On restarts, sometimes these are wiped. So it can be a race # to have MySQL start up before it's restarted and these have # to be deleted. That's why its ok if they aren't found and # that is why we use the "force" option to "remove". operating_system.remove("%s/ib_logfile%d" % (self.get_data_dir(), index), force=True, as_root=True) except exception.ProcessExecutionError: LOG.exception("Could not delete logfile.") raise def remove_overrides(self): self.configuration_manager.remove_user_override() def _remove_replication_overrides(self, cnf_file): LOG.info("Removing replication configuration file.") if os.path.exists(cnf_file): operating_system.remove(cnf_file, as_root=True) def exists_replication_source_overrides(self): return self.configuration_manager.has_system_override(CNF_MASTER) def write_replication_source_overrides(self, overrideValues): self.configuration_manager.apply_system_override(overrideValues, CNF_MASTER) def write_replication_replica_overrides(self, overrideValues): self.configuration_manager.apply_system_override(overrideValues, CNF_SLAVE) def remove_replication_source_overrides(self): self.configuration_manager.remove_system_override(CNF_MASTER) def remove_replication_replica_overrides(self): self.configuration_manager.remove_system_override(CNF_SLAVE) def grant_replication_privilege(self, replication_user): LOG.info("Granting Replication Slave privilege.") LOG.debug("grant_replication_privilege: %s", replication_user) with self.local_sql_client(self.get_engine()) as client: g = sql_query.Grant(permissions=['REPLICATION SLAVE'], user=replication_user['name'], clear=replication_user['password']) t = text(str(g)) client.execute(t) def get_port(self): with self.local_sql_client(self.get_engine()) as client: result = client.execute('SELECT @@port').first() return result[0] def get_binlog_position(self): with self.local_sql_client(self.get_engine()) as client: result = client.execute('SHOW MASTER STATUS').first() binlog_position = { 'log_file': result['File'], 'position': result['Position'] } return binlog_position def execute_on_client(self, sql_statement): LOG.debug("Executing SQL: %s", sql_statement) with self.local_sql_client(self.get_engine()) as client: return client.execute(sql_statement) def start_slave(self): LOG.info("Starting slave replication.") with self.local_sql_client(self.get_engine()) as client: client.execute('START SLAVE') self._wait_for_slave_status("ON", client, 180) def stop_slave(self, for_failover): replication_user = None LOG.info("Stopping slave replication.") with self.local_sql_client(self.get_engine()) as client: result = client.execute('SHOW SLAVE STATUS') replication_user = result.first()['Master_User'] client.execute('STOP SLAVE') client.execute('RESET SLAVE ALL') self._wait_for_slave_status("OFF", client, 180) if not for_failover: client.execute('DROP USER ' + replication_user) return { 'replication_user': replication_user } def stop_master(self): LOG.info("Stopping replication master.") with self.local_sql_client(self.get_engine()) as client: client.execute('RESET MASTER') def _wait_for_slave_status(self, status, client, max_time): def verify_slave_status(): actual_status = client.execute( "SHOW GLOBAL STATUS like 'slave_running'").first()[1] return actual_status.upper() == status.upper() LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status) try: utils.poll_until(verify_slave_status, sleep_time=3, time_out=max_time) LOG.info("Replication is now %s.", status.lower()) except PollTimeOut: raise RuntimeError( _("Replication is not %(status)s after %(max)d seconds.") % { 'status': status.lower(), 'max': max_time}) def start_mysql(self, update_db=False, disable_on_boot=False, timeout=120): LOG.info("Starting MySQL.") # This is the site of all the trouble in the restart tests. # Essentially what happens is that mysql start fails, but does not # die. It is then impossible to kill the original, so if disable_on_boot: self._disable_mysql_on_boot() else: self._enable_mysql_on_boot() try: utils.execute_with_timeout(self.mysql_service['cmd_start'], shell=True, timeout=timeout) except KeyError: raise RuntimeError(_("Service is not discovered.")) except exception.ProcessExecutionError: # it seems mysql (percona, at least) might come back with [Fail] # but actually come up ok. we're looking into the timing issue on # parallel, but for now, we'd like to give it one more chance to # come up. so regardless of the execute_with_timeout() response, # we'll assume mysql comes up and check its status for a while. pass if not self.status.wait_for_real_status_to_change_to( rd_instance.ServiceStatuses.RUNNING, self.state_change_wait_time, update_db): LOG.error("Start up of MySQL failed.") # If it won't start, but won't die either, kill it by hand so we # don't let a rouge process wander around. try: utils.execute_with_timeout("sudo", "pkill", "-9", "mysql") except exception.ProcessExecutionError: LOG.exception("Error killing stalled MySQL start command.") # There's nothing more we can do... self.status.end_restart() raise RuntimeError(_("Could not start MySQL!")) def start_db_with_conf_changes(self, config_contents): LOG.info("Starting MySQL with conf changes.") LOG.debug("Inside the guest - Status is_running = (%s).", self.status.is_running) if self.status.is_running: LOG.error("Cannot execute start_db_with_conf_changes because " "MySQL state == %s.", self.status) raise RuntimeError(_("MySQL not stopped.")) LOG.info("Resetting configuration.") self._reset_configuration(config_contents) self.start_mysql(True) def reset_configuration(self, configuration): config_contents = configuration['config_contents'] LOG.info("Resetting configuration.") self._reset_configuration(config_contents) def reset_admin_password(self, admin_password): """Replace the password in the my.cnf file.""" # grant the new admin password with self.local_sql_client(self.get_engine()) as client: self._create_admin_user(client, admin_password) # reset the ENGINE because the password could have changed global ENGINE ENGINE = None self._save_authentication_properties(admin_password) class BaseMySqlRootAccess(object): def __init__(self, local_sql_client, mysql_app): self._local_sql_client = local_sql_client self._mysql_app = mysql_app @property def mysql_app(self): return self._mysql_app @property def local_sql_client(self): return self._local_sql_client def is_root_enabled(self): """Return True if root access is enabled; False otherwise.""" with self.local_sql_client(self.mysql_app.get_engine()) as client: t = text(sql_query.ROOT_ENABLED) result = client.execute(t) LOG.debug("Found %s with remote root access.", result.rowcount) return result.rowcount != 0 def enable_root(self, root_password=None): """Enable the root user global access and/or reset the root password. """ user = models.MySQLUser.root(password=root_password) with self.local_sql_client(self.mysql_app.get_engine()) as client: try: cu = sql_query.CreateUser(user.name, host=user.host) t = text(str(cu)) client.execute(t, **cu.keyArgs) except (exc.OperationalError, exc.InternalError) as err: # Ignore, user is already created, just reset the password # TODO(rnirmal): More fine grained error checking later on LOG.debug(err) with self.local_sql_client(self.mysql_app.get_engine()) as client: uu = sql_query.SetPassword(user.name, host=user.host, new_password=user.password) t = text(str(uu)) client.execute(t) LOG.debug("CONF.root_grant: %(grant)s CONF.root_grant_option: " "%(grant_option)s.", {'grant': CONF.root_grant, 'grant_option': CONF.root_grant_option}) g = sql_query.Grant(permissions=CONF.root_grant, user=user.name, host=user.host, grant_option=CONF.root_grant_option) t = text(str(g)) client.execute(t) return user.serialize() def disable_root(self): """Reset the root password to an unknown value. """ self.enable_root(root_password=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/service.py0000644000175000017500000003504300000000000023461 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from oslo_log import log as logging from oslo_utils import timeutils from trove.common import cfg from trove.common import context as trove_context from trove.common.i18n import _ from trove.common import instance from trove.conductor import api as conductor_api from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system LOG = logging.getLogger(__name__) CONF = cfg.CONF class BaseDbStatus(object): """ Answers the question "what is the status of the DB application on this box?" The answer can be that the application is not installed, or the state of the application is determined by calling a series of commands. This class also handles saving and load the status of the DB application in the database. The status is updated whenever the update() method is called, except if the state is changed to building or restart mode using the "begin_install" and "begin_restart" methods. The building mode persists in the database while restarting mode does not (so if there is a Python Pete crash update() will set the status to show a failure). These modes are exited and functionality to update() returns when end_install or end_restart() is called, at which point the status again reflects the actual status of the DB app. This is a base class, subclasses must implement real logic for determining current status of DB in _get_actual_db_status() """ _instance = None GUESTAGENT_DIR = '~' PREPARE_START_FILENAME = '.guestagent.prepare.start' PREPARE_END_FILENAME = '.guestagent.prepare.end' def __init__(self): if self._instance is not None: raise RuntimeError(_("Cannot instantiate twice.")) self.status = None self.restart_mode = False self.__prepare_completed = None @property def prepare_completed(self): if self.__prepare_completed is None: # Force the file check self.__refresh_prepare_completed() return self.__prepare_completed def __refresh_prepare_completed(self): # Set the value of __prepared_completed based on the existence of # the file. This is required as the state is cached so this method # must be called any time the existence of the file changes. is_file = os.path.isfile( guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME)) self.__prepare_completed = is_file if is_file else None def begin_install(self): """First call of the DB prepare.""" prepare_start_file = guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_START_FILENAME) operating_system.write_file(prepare_start_file, '') self.__refresh_prepare_completed() self.set_status(instance.ServiceStatuses.BUILDING, True) def begin_restart(self): """Called before restarting DB server.""" self.restart_mode = True def set_ready(self): prepare_end_file = guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME) operating_system.write_file(prepare_end_file, '') self.__refresh_prepare_completed() def end_install(self, error_occurred=False, post_processing=False): """Called after prepare has ended.""" # Set the "we're done" flag if there's no error and # no post_processing is necessary if not (error_occurred or post_processing): self.set_ready() final_status = None if error_occurred: final_status = instance.ServiceStatuses.FAILED elif post_processing: final_status = instance.ServiceStatuses.INSTANCE_READY if final_status: LOG.info("Set final status to %s.", final_status) self.set_status(final_status, force=True) else: self._end_install_or_restart(True) def end_restart(self): self.restart_mode = False LOG.info("Ending restart.") self._end_install_or_restart(False) def _end_install_or_restart(self, force): """Called after DB is installed or restarted. Updates the database with the actual DB server status. """ real_status = self._get_actual_db_status() LOG.info("Current database status is '%s'.", real_status) self.set_status(real_status, force=force) def _get_actual_db_status(self): raise NotImplementedError() @property def is_installed(self): """ True if DB app should be installed and attempts to ascertain its status won't result in nonsense. """ return self.prepare_completed @property def _is_restarting(self): return self.restart_mode @property def is_running(self): """True if DB server is running.""" return (self.status is not None and self.status == instance.ServiceStatuses.RUNNING) def set_status(self, status, force=False): """Use conductor to update the DB app status.""" if force or self.is_installed: LOG.debug("Casting set_status message to conductor " "(status is '%s').", status.description) context = trove_context.TroveContext() heartbeat = {'service_status': status.description} conductor_api.API(context).heartbeat( CONF.guest_id, heartbeat, sent=timeutils.utcnow_ts(microsecond=True)) LOG.debug("Successfully cast set_status.") self.status = status else: LOG.debug("Prepare has not completed yet, skipping heartbeat.") def update(self): """Find and report status of DB on this machine. The database is updated and the status is also returned. """ if self.is_installed and not self._is_restarting: status = self._get_actual_db_status() self.set_status(status) def restart_db_service(self, service_candidates, timeout): """Restart the database. Do not change the service auto-start setting. Disable the Trove instance heartbeat updates during the restart. 1. Stop the database service. 2. Wait for the database to shutdown. 3. Start the database service. 4. Wait for the database to start running. :param service_candidates: List of possible system service names. :type service_candidates: list :param timeout: Wait timeout in seconds. :type timeout: integer :raises: :class:`RuntimeError` on failure. """ try: self.begin_restart() self.stop_db_service(service_candidates, timeout, disable_on_boot=False, update_db=False) self.start_db_service(service_candidates, timeout, enable_on_boot=False, update_db=False) except Exception as e: LOG.exception(e) raise RuntimeError(_("Database restart failed.")) finally: self.end_restart() def start_db_service(self, service_candidates, timeout, enable_on_boot=True, update_db=False): """Start the database service and wait for the database to become available. The service auto-start will be updated only if the service command succeeds. :param service_candidates: List of possible system service names. :type service_candidates: list :param timeout: Wait timeout in seconds. :type timeout: integer :param enable_on_boot: Enable service auto-start. The auto-start setting will be updated only if the service command succeeds. :type enable_on_boot: boolean :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :raises: :class:`RuntimeError` on failure. """ LOG.info("Starting database service.") operating_system.start_service(service_candidates, timeout=timeout) self.wait_for_database_service_start(timeout, update_db=update_db) if enable_on_boot: LOG.info("Enable service auto-start on boot.") operating_system.enable_service_on_boot(service_candidates) def wait_for_database_service_start(self, timeout, update_db=False): """Wait for the database to become available. :param timeout: Wait timeout in seconds. :type timeout: integer :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :raises: :class:`RuntimeError` on failure. """ LOG.debug("Waiting for database to start up.") if not self._wait_for_database_service_status( instance.ServiceStatuses.RUNNING, timeout, update_db): raise RuntimeError(_("Database failed to start.")) LOG.info("Database has started successfully.") def stop_db_service(self, service_candidates, timeout, disable_on_boot=False, update_db=False): """Stop the database service and wait for the database to shutdown. :param service_candidates: List of possible system service names. :type service_candidates: list :param timeout: Wait timeout in seconds. :type timeout: integer :param disable_on_boot: Disable service auto-start. The auto-start setting will be updated only if the service command succeeds. :type disable_on_boot: boolean :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :raises: :class:`RuntimeError` on failure. """ LOG.info("Stopping database service.") operating_system.stop_service(service_candidates, timeout=timeout) LOG.debug("Waiting for database to shutdown.") if not self._wait_for_database_service_status( instance.ServiceStatuses.SHUTDOWN, timeout, update_db): raise RuntimeError(_("Database failed to stop.")) LOG.info("Database has stopped successfully.") if disable_on_boot: LOG.info("Disable service auto-start on boot.") operating_system.disable_service_on_boot(service_candidates) def _wait_for_database_service_status(self, status, timeout, update_db): """Wait for the given database status. :param status: The status to wait for. :type status: BaseDbStatus :param timeout: Wait timeout in seconds. :type timeout: integer :param update_db: Suppress the Trove instance heartbeat. :type update_db: boolean :returns: True on success, False otherwise. """ if not self.wait_for_real_status_to_change_to( status, timeout, update_db): LOG.info("Service status did not change to %(status)s " "within the given timeout: %(timeout)ds", {'status': status, 'timeout': timeout}) LOG.debug("Attempting to cleanup stalled services.") try: self.cleanup_stalled_db_services() except Exception: LOG.debug("Cleanup failed.", exc_info=True) return False return True def wait_for_real_status_to_change_to(self, status, max_time, update_db=False): """Waits the given time for the real status to change to the one specified. The internal status is always updated. The public instance state stored in the Trove database is updated only if "update_db" is True. """ end_time = time.time() + max_time # since python does not support a real do-while loop, we have # to emulate one. Hence these shenanigans. We force at least # one pass into the loop and therefore it is safe that # actual_status is initialized in the loop while it is used # outside. loop = True while loop: self.status = self._get_actual_db_status() if self.status == status: if update_db: self.set_status(self.status) return True # should we remain in this loop? this is the thing # that emulates the do-while construct. loop = (time.time() < end_time) # no point waiting if our time is up and we're # just going to error out anyway. if loop: LOG.debug("Waiting for DB status to change from " "%(actual_status)s to %(status)s.", {"actual_status": self.status, "status": status}) time.sleep(CONF.state_change_poll_time) LOG.error("Timeout while waiting for database status to change." "Expected state %(status)s, " "current state is %(actual_status)s", {"status": status, "actual_status": self.status}) return False def cleanup_stalled_db_services(self): """An optional datastore-specific code to cleanup stalled database services and other resources after a status change timeout. """ LOG.debug("No cleanup action specified for this datastore.") def report_root(self, context): """Use conductor to update the root-enable status.""" LOG.debug("Casting report_root message to conductor.") conductor_api.API(context).report_root(CONF.guest_id) LOG.debug("Successfully cast report_root.") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/datastore/technical-preview/0000755000175000017500000000000000000000000025053 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/datastore/technical-preview/__init__.py0000644000175000017500000000000000000000000027152 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/dbaas.py0000644000175000017500000000561100000000000021103 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all processes within the Guest VM, considering it as a Platform The :py:class:`GuestManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to Platform specific operations. **Related Flags** """ from itertools import chain import os from oslo_log import log as logging from trove.common import cfg from trove.common.i18n import _ from trove.common import utils LOG = logging.getLogger(__name__) defaults = { 'mysql': 'trove.guestagent.datastore.mysql.manager.Manager', 'percona': 'trove.guestagent.datastore.experimental.percona.manager.Manager', 'pxc': 'trove.guestagent.datastore.experimental.pxc.manager.Manager', 'redis': 'trove.guestagent.datastore.experimental.redis.manager.Manager', 'cassandra': 'trove.guestagent.datastore.experimental.cassandra.manager.Manager', 'couchbase': 'trove.guestagent.datastore.experimental.couchbase.manager.Manager', 'mongodb': 'trove.guestagent.datastore.experimental.mongodb.manager.Manager', 'postgresql': 'trove.guestagent.datastore.experimental.postgresql.manager.Manager', 'couchdb': 'trove.guestagent.datastore.experimental.couchdb.manager.Manager', 'vertica': 'trove.guestagent.datastore.experimental.vertica.manager.Manager', 'db2': 'trove.guestagent.datastore.experimental.db2.manager.Manager', 'mariadb': 'trove.guestagent.datastore.experimental.mariadb.manager.Manager' } CONF = cfg.CONF def get_custom_managers(): return CONF.datastore_registry_ext def datastore_registry(): return dict(chain(defaults.items(), get_custom_managers().items())) def get_filesystem_volume_stats(fs_path): try: stats = os.statvfs(fs_path) except OSError: LOG.exception("Error getting volume stats.") raise RuntimeError(_("Filesystem not found (%s)") % fs_path) total = stats.f_blocks * stats.f_bsize free = stats.f_bfree * stats.f_bsize # return the size in GB used_gb = utils.to_gb(total - free) total_gb = utils.to_gb(total) output = { 'block_size': stats.f_bsize, 'total_blocks': stats.f_blocks, 'free_blocks': stats.f_bfree, 'total': total_gb, 'free': free, 'used': used_gb } return output ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/guest_log.py0000644000175000017500000003623500000000000022027 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum import hashlib import os from requests.exceptions import ConnectionError from oslo_log import log as logging from swiftclient.client import ClientException from trove.common import cfg from trove.common import clients from trove.common import exception from trove.common.i18n import _ from trove.common import stream_codecs from trove.common import timeutils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode LOG = logging.getLogger(__name__) CONF = cfg.CONF class LogType(enum.Enum): """Represent the type of the log object.""" # System logs. These are always enabled. SYS = 1 # User logs. These can be enabled or disabled. USER = 2 class LogStatus(enum.Enum): """Represent the status of the log object.""" # The log is disabled and potentially no data is being written to # the corresponding log file Disabled = 1 # Logging is on, but no determination has been made about data availability Enabled = 2 # Logging is on, but no log data is available to publish Unavailable = 3 # Logging is on and data is available to be published Ready = 4 # Logging is on and all data has been published Published = 5 # Logging is on and some data has been published Partial = 6 # Log file has been rotated, so next publish will discard log first Rotated = 7 # Waiting for a datastore restart to begin logging Restart_Required = 8 # Now that restart has completed, regular status can be reported again # This is an internal status Restart_Completed = 9 class GuestLog(object): MF_FILE_SUFFIX = '_metafile' MF_LABEL_LOG_NAME = 'log_name' MF_LABEL_LOG_TYPE = 'log_type' MF_LABEL_LOG_FILE = 'log_file' MF_LABEL_LOG_SIZE = 'log_size' MF_LABEL_LOG_HEADER = 'log_header_digest' def __init__(self, log_context, log_name, log_type, log_user, log_file, log_exposed): self._context = log_context self._name = log_name self._type = log_type self._user = log_user self._file = log_file self._exposed = log_exposed self._size = None self._published_size = None self._header_digest = 'abc' self._published_header_digest = None self._status = None self._cached_context = None self._cached_swift_client = None self._enabled = log_type == LogType.SYS self._file_readable = False self._container_name = None self._codec = stream_codecs.JsonCodec() self._set_status(self._type == LogType.USER, LogStatus.Disabled, LogStatus.Enabled) # The directory should already exist - make sure we have access to it log_dir = os.path.dirname(self._file) operating_system.chmod( log_dir, FileMode.ADD_GRP_RX_OTH_RX, as_root=True) @property def context(self): return self._context @context.setter def context(self, context): self._context = context @property def type(self): return self._type @property def swift_client(self): if not self._cached_swift_client or ( self._cached_context != self.context): self._cached_swift_client = clients.swift_client(self.context) self._cached_context = self.context return self._cached_swift_client @property def exposed(self): return self._exposed or self.context.is_admin @property def enabled(self): return self._enabled @enabled.setter def enabled(self, enabled): self._enabled = enabled @property def status(self): return self._status @status.setter def status(self, status): # Keep the status in Restart_Required until we're set # to Restart_Completed if (self.status != LogStatus.Restart_Required or (self.status == LogStatus.Restart_Required and status == LogStatus.Restart_Completed)): self._status = status LOG.debug("Log status for '%(name)s' set to %(status)s", {'name': self._name, 'status': status}) else: LOG.debug("Log status for '%(name)s' *not* set to %(status)s " "(currently %(current_status)s)", {'name': self._name, 'status': status, 'current_status': self.status}) def get_container_name(self, force=False): if not self._container_name or force: container_name = CONF.guest_log_container_name try: self.swift_client.get_container(container_name, prefix='dummy') except ClientException as ex: if ex.http_status == 404: LOG.debug("Container '%s' not found; creating now", container_name) self.swift_client.put_container( container_name, headers=self._get_headers()) else: LOG.exception("Could not retrieve container '%s'", container_name) raise self._container_name = container_name return self._container_name def _set_status(self, use_first, first_status, second_status): if use_first: self.status = first_status else: self.status = second_status def show(self): if self.exposed: self._refresh_details() container_name = 'None' prefix = 'None' if self._published_size: container_name = self.get_container_name() prefix = self._object_prefix() pending = self._size - self._published_size if self.status == LogStatus.Rotated: pending = self._size return { 'name': self._name, 'type': self._type.name, 'status': self.status.name.replace('_', ' '), 'published': self._published_size, 'pending': pending, 'container': container_name, 'prefix': prefix, 'metafile': self._metafile_name() } else: raise exception.LogAccessForbidden(action='show', log=self._name) def _refresh_details(self): if self._published_size is None: # Initializing, so get all the values try: meta_details = self._get_meta_details() self._published_size = int( meta_details[self.MF_LABEL_LOG_SIZE]) self._published_header_digest = ( meta_details[self.MF_LABEL_LOG_HEADER]) except ClientException as ex: if ex.http_status == 404: LOG.debug("No published metadata found for log '%s'", self._name) self._published_size = 0 else: LOG.exception("Could not get meta details for log '%s'", self._name) raise except ConnectionError as e: # A bad endpoint will cause a ConnectionError # This exception contains another exception that we want exc = e.args[0] raise exc self._update_details() LOG.debug("Log size for '%(name)s' set to %(size)d " "(published %(published)d)", {'name': self._name, 'size': self._size, 'published': self._published_size}) def _update_details(self): # Make sure we can read the file if not self._file_readable or not os.access(self._file, os.R_OK): if not os.access(self._file, os.R_OK): if operating_system.exists(self._file, as_root=True): operating_system.chmod( self._file, FileMode.ADD_ALL_R, as_root=True) self._file_readable = True if os.path.isfile(self._file): logstat = os.stat(self._file) self._size = logstat.st_size self._update_log_header_digest(self._file) if self._log_rotated(): self.status = LogStatus.Rotated # See if we have stuff to publish elif logstat.st_size > self._published_size: self._set_status(self._published_size, LogStatus.Partial, LogStatus.Ready) # We've published everything so far elif logstat.st_size == self._published_size: self._set_status(self._published_size, LogStatus.Published, LogStatus.Enabled) # We've already handled this case (log rotated) so what gives? else: raise Exception(_("Bug in _log_rotated ?")) else: self._published_size = 0 self._size = 0 if not self._size or not self.enabled: user_status = LogStatus.Disabled if self.enabled: user_status = LogStatus.Enabled self._set_status(self._type == LogType.USER, user_status, LogStatus.Unavailable) def _log_rotated(self): """If the file is smaller than the last reported size or the first line hash is different, we can probably assume the file changed under our nose. """ if (self._published_size > 0 and (self._size < self._published_size or self._published_header_digest != self._header_digest)): return True def _update_log_header_digest(self, log_file): with open(log_file, 'rb') as log: self._header_digest = hashlib.md5(log.readline()).hexdigest() def _get_headers(self): return {'X-Delete-After': str(CONF.guest_log_expiry)} def publish_log(self): if self.exposed: if self._log_rotated(): LOG.debug("Log file rotation detected for '%s' - " "discarding old log", self._name) self._delete_log_components() if os.path.isfile(self._file): self._publish_to_container(self._file) else: raise RuntimeError(_( "Cannot publish log file '%s' as it does not exist.") % self._file) return self.show() else: raise exception.LogAccessForbidden( action='publish', log=self._name) def discard_log(self): if self.exposed: self._delete_log_components() return self.show() else: raise exception.LogAccessForbidden( action='discard', log=self._name) def _delete_log_components(self): container_name = self.get_container_name(force=True) prefix = self._object_prefix() swift_files = [swift_file['name'] for swift_file in self.swift_client.get_container( container_name, prefix=prefix)[1]] swift_files.append(self._metafile_name()) for swift_file in swift_files: self.swift_client.delete_object(container_name, swift_file) self._set_status(self._type == LogType.USER, LogStatus.Disabled, LogStatus.Enabled) self._published_size = 0 def _publish_to_container(self, log_filename): log_component, log_lines = '', 0 chunk_size = CONF.guest_log_limit container_name = self.get_container_name(force=True) def _read_chunk(f): while True: current_chunk = f.read(chunk_size) if not current_chunk: break yield current_chunk def _write_log_component(): object_headers.update({'x-object-meta-lines': str(log_lines)}) component_name = '%s%s' % (self._object_prefix(), self._object_name()) self.swift_client.put_object(container_name, component_name, log_component, headers=object_headers) self._published_size = ( self._published_size + len(log_component)) self._published_header_digest = self._header_digest self._refresh_details() self._put_meta_details() object_headers = self._get_headers() with open(log_filename, 'r') as log: LOG.debug("seeking to %s", self._published_size) log.seek(self._published_size) for chunk in _read_chunk(log): for log_line in chunk.splitlines(): if len(log_component) + len(log_line) > chunk_size: _write_log_component() log_component, log_lines = '', 0 log_component = log_component + log_line + '\n' log_lines += 1 if log_lines > 0: _write_log_component() self._put_meta_details() def _put_meta_details(self): metafile_name = self._metafile_name() metafile_details = { self.MF_LABEL_LOG_NAME: self._name, self.MF_LABEL_LOG_TYPE: self._type.name, self.MF_LABEL_LOG_FILE: self._file, self.MF_LABEL_LOG_SIZE: self._published_size, self.MF_LABEL_LOG_HEADER: self._header_digest, } container_name = self.get_container_name() self.swift_client.put_object(container_name, metafile_name, self._codec.serialize(metafile_details), headers=self._get_headers()) LOG.debug("_put_meta_details has published log size as %s", self._published_size) def _metafile_name(self): return self._object_prefix().rstrip('/') + '_metafile' def _object_prefix(self): return '%(instance_id)s/%(datastore)s-%(log)s/' % { 'instance_id': CONF.guest_id, 'datastore': CONF.datastore_manager, 'log': self._name} def _object_name(self): return 'log-%s' % str(timeutils.utcnow()).replace(' ', 'T') def _get_meta_details(self): LOG.debug("Getting meta details for '%s'", self._name) metafile_name = self._metafile_name() container_name = self.get_container_name() headers, metafile_details = self.swift_client.get_object( container_name, metafile_name) LOG.debug("Found meta details for '%s'", self._name) return self._codec.deserialize(metafile_details) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/models.py0000644000175000017500000000575000000000000021320 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from datetime import timedelta from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common import timeutils from trove.common import utils from trove.db import get_db_api from trove.db import models as dbmodels LOG = logging.getLogger(__name__) CONF = cfg.CONF def persisted_models(): return {'agent_heartbeats': AgentHeartBeat} class AgentHeartBeat(dbmodels.DatabaseModelBase): """Defines the state of a Guest Agent.""" _data_fields = ['instance_id', 'updated_at', 'guest_agent_version', 'deleted', 'deleted_at'] _table_name = 'agent_heartbeats' def __init__(self, **kwargs): super(AgentHeartBeat, self).__init__(**kwargs) @classmethod def create(cls, **values): values['id'] = utils.generate_uuid() heartbeat = cls(**values).save() if not heartbeat.is_valid(): raise exception.InvalidModelError(errors=heartbeat.errors) return heartbeat def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated_at'] = timeutils.utcnow() LOG.debug("Saving %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self) @classmethod def find_all_by_version(cls, guest_agent_version, deleted=0): if guest_agent_version is None: raise exception.ModelNotFoundError() heartbeats = cls.find_all(guest_agent_version=guest_agent_version, deleted=deleted) if heartbeats is None or heartbeats.count() == 0: raise exception.ModelNotFoundError( guest_agent_version=guest_agent_version) return heartbeats @classmethod def find_by_instance_id(cls, instance_id): if instance_id is None: raise exception.ModelNotFoundError(instance_id=instance_id) try: return cls.find_by(instance_id=instance_id) except exception.NotFound: LOG.exception("Error finding instance %s", instance_id) raise exception.ModelNotFoundError(instance_id=instance_id) @staticmethod def is_active(agent): return (datetime.now() - agent.updated_at < timedelta(seconds=CONF.agent_heartbeat_time)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/module/0000755000175000017500000000000000000000000020741 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/module/__init__.py0000644000175000017500000000000000000000000023040 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/module/driver_manager.py0000644000175000017500000000707200000000000024306 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import encodeutils import stevedore from trove.common import base_exception as exception from trove.common import cfg from trove.common.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF class ModuleDriverManager(object): MODULE_DRIVER_NAMESPACE = 'trove.guestagent.module.drivers' def __init__(self): LOG.info('Initializing module driver manager.') self._drivers = {} self._module_types = [mt.lower() for mt in CONF.module_types] self._load_drivers() def _load_drivers(self): manager = stevedore.enabled.EnabledExtensionManager( namespace=self.MODULE_DRIVER_NAMESPACE, check_func=self._check_extension, invoke_on_load=True, invoke_kwds={}) try: manager.map(self.add_driver_extension) except stevedore.exception.NoMatches: LOG.info("No module drivers loaded") def _check_extension(self, extension): """Checks for required methods in driver objects.""" driver = extension.obj supported = False try: LOG.info('Loading Module driver: %s', driver.get_type()) if driver.get_type() != driver.get_type().lower(): raise AttributeError(_("Driver 'type' must be lower-case")) LOG.debug(' description: %s', driver.get_description()) LOG.debug(' updated : %s', driver.get_updated()) required_attrs = ['apply', 'remove'] for attr in required_attrs: if not hasattr(driver, attr): raise AttributeError( _("Driver '%(type)s' missing attribute: %(attr)s") % {'type': driver.get_type(), 'attr': attr}) if driver.get_type() in self._module_types: supported = True else: LOG.info("Driver '%s' not supported, skipping", driver.get_type()) except AttributeError as ex: LOG.exception("Exception loading module driver: %s", encodeutils.exception_to_unicode(ex)) return supported def add_driver_extension(self, extension): # Add a module driver from the extension. # If the stevedore manager is changed to one that doesn't # check the extension driver, then it should be done manually here # by calling self._check_extension(extension) driver = extension.obj driver_type = driver.get_type() LOG.info('Loaded module driver: %s', driver_type) if driver_type in self._drivers: raise exception.Error(_("Found duplicate driver: %s") % driver_type) self._drivers[driver_type] = driver def get_driver(self, driver_type): found = None if driver_type in self._drivers: found = self._drivers[driver_type] return found ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/module/drivers/0000755000175000017500000000000000000000000022417 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/module/drivers/__init__.py0000644000175000017500000000000000000000000024516 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/module/drivers/module_driver.py0000644000175000017500000001726400000000000025643 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import functools import re import six from oslo_log import log as logging from trove.common import exception LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class ModuleDriver(object): """Base class that defines the contract for module drivers. Note that you don't have to derive from this class to have a valid driver; it is purely a convenience. Any class that adheres to the 'interface' as dictated by this class' abstractmethod decorators (and other methods such as get_type, get_name and configure) will work. """ def __init__(self): super(ModuleDriver, self).__init__() # This is used to store any message args to be substituted by # the output decorator when logging/returning messages. self._module_message_args = {} self._message_args = None self._generated_name = None @property def message_args(self): """Return a dict of message args that can be used to enhance the output decorator messages. This shouldn't be overridden; use self.message_args = instead to append values. """ if not self._message_args: self._message_args = { 'name': self.get_name(), 'type': self.get_type()} self._message_args.update(self._module_message_args) return self._message_args @message_args.setter def message_args(self, values): """Set the message args that can be used to enhance the output decorator messages. """ values = values or {} self._module_message_args = values self._message_args = None @property def generated_name(self): if not self._generated_name: # Turn class name into 'module type' format. # For example: DoCustomWorkDriver -> do_custom_work temp = re.sub('(.)[Dd]river$', r'\1', self.__class__.__name__) temp2 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', temp) temp3 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp2) self._generated_name = temp3.lower() return self._generated_name def get_type(self): """This is used when setting up a module in Trove, and is here for code clarity. It just returns the name of the driver by default. """ return self.get_name() def get_name(self): """Use the generated name based on the class name. If overridden, must be in lower-case. """ return self.generated_name @abc.abstractmethod def get_description(self): """Description for the driver.""" pass @abc.abstractmethod def get_updated(self): """Date the driver was last updated.""" pass @abc.abstractmethod def apply(self, name, datastore, ds_version, data_file, admin_module): """Apply the module to the guest instance. Return status and message as a tuple. Passes in whether the module was created with 'admin' privileges. This can be used as a form of access control by having the driver refuse to apply a module if it wasn't created with options that indicate that it was done by an 'admin' user. """ return False, "Not a concrete driver" @abc.abstractmethod def remove(self, name, datastore, ds_version, data_file): """Remove the module from the guest instance. Return status and message as a tuple. """ return False, "Not a concrete driver" def configure(self, name, datastore, ds_version, data_file): """Configure the driver. This is particularly useful for adding values to message_args, by having a line such as: self.message_args = . These values will be appended to the default ones defined in the message_args @property. """ pass def output(log_message=None, success_message=None, fail_message=None): """This is a decorator to trap the typical exceptions that occur when applying and removing modules. It returns the proper output corresponding to the error messages automatically. If the function returns output (success_flag, message) then those are returned, otherwise success is assumed and the success_message returned. Using this removes a lot of potential boiler-plate code, however it is not necessary. Keyword arguments can be used in the message string. Default values can be found in the message_args @property, however a driver can add whatever it see fit, by setting message_args to a dict in the configure call (see above). Thus if you set self.message_args = {'my_key': 'my_key_val'} then the message string could look like "My key is '$(my_key)s'". """ success_message = success_message or "Success" fail_message = fail_message or "Fail" def output_decorator(func): """This is the actual decorator.""" @functools.wraps(func) def wrapper(*args, **kwargs): """Here's where we handle the error messages and return values from the actual function. """ log_msg = log_message success_msg = success_message fail_msg = fail_message if isinstance(args[0], ModuleDriver): # Try and insert any message args if they exist in the driver message_args = args[0].message_args if message_args: try: log_msg = log_msg % message_args success_msg = success_msg % message_args fail_msg = fail_msg % message_args except Exception: # if there's a problem, just log it and drive on LOG.warning("Could not apply message args: %s", message_args) pass if log_msg: LOG.info(log_msg) success = False try: rv = func(*args, **kwargs) if rv: # Use the actual values, if there are some success, message = rv else: success = True message = success_msg except exception.ProcessExecutionError as ex: message = ("%(msg)s: %(out)s\n%(err)s" % {'msg': fail_msg, 'out': ex.stdout, 'err': ex.stderr}) message = message.replace(': \n', ': ') message = message.rstrip() LOG.exception(message) except exception.TroveError as ex: message = ("%(msg)s: %(err)s" % {'msg': fail_msg, 'err': ex._error_string}) LOG.exception(message) except Exception as ex: message = ("%(msg)s: %(err)s" % {'msg': fail_msg, 'err': str(ex)}) LOG.exception(message) return success, message return wrapper return output_decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/module/drivers/new_relic_license_driver.py0000644000175000017500000000641100000000000030017 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from datetime import date from oslo_log import log as logging from trove.common import stream_codecs from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.module.drivers import module_driver LOG = logging.getLogger(__name__) NR_ADD_LICENSE_CMD = ['nrsysmond-config', '--set', 'license_key=%s'] NR_SRV_CONTROL_CMD = ['/etc/init.d/newrelic-sysmond'] class NewRelicLicenseDriver(module_driver.ModuleDriver): """Module to set up the license for the NewRelic service.""" def get_description(self): return "New Relic License Module Driver" def get_updated(self): return date(2016, 4, 12) @module_driver.output( log_message='Installing New Relic license key', success_message='New Relic license key installed', fail_message='New Relic license key not installed') def apply(self, name, datastore, ds_version, data_file, admin_module): license_key = None data = operating_system.read_file( data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'license_key' == key.lower(): license_key = value break if license_key: self._add_license_key(license_key) self._server_control('start') else: return False, "'license_key' not found in contents file" def _add_license_key(self, license_key): try: exec_args = {'timeout': 10, 'run_as_root': True, 'root_helper': 'sudo'} cmd = list(NR_ADD_LICENSE_CMD) cmd[-1] = cmd[-1] % license_key utils.execute_with_timeout(*cmd, **exec_args) except Exception: LOG.exception("Could not install license key '%s'", license_key) raise def _server_control(self, command): try: exec_args = {'timeout': 10, 'run_as_root': True, 'root_helper': 'sudo'} cmd = list(NR_SRV_CONTROL_CMD) cmd.append(command) utils.execute_with_timeout(*cmd, **exec_args) except Exception: LOG.exception("Could not %s New Relic server", command) raise @module_driver.output( log_message='Removing New Relic license key', success_message='New Relic license key removed', fail_message='New Relic license key not removed') def remove(self, name, datastore, ds_version, data_file): self._add_license_key("bad_key_that_is_exactly_40_characters_xx") self._server_control('stop') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/module/drivers/ping_driver.py0000644000175000017500000000364400000000000025310 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from datetime import date from trove.common import stream_codecs from trove.guestagent.common import operating_system from trove.guestagent.module.drivers import module_driver class PingDriver(module_driver.ModuleDriver): """Concrete module to show implementation and functionality. Responds like an actual module driver, but does nothing except return the value of the message key in the contents file. For example, if the file contains 'message=Hello' then the message returned by module-apply will be 'Hello.' """ def get_description(self): return "Ping Module Driver" def get_updated(self): return date(2016, 3, 4) @module_driver.output( log_message='Extracting %(type)s message', fail_message='Could not extract %(type)s message') def apply(self, name, datastore, ds_version, data_file, admin_module): data = operating_system.read_file( data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'message' == key.lower(): return True, value return False, 'Message not found in contents file' @module_driver.output( log_message='Removing %(type)s module') def remove(self, name, datastore, ds_version, data_file): return True, "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/module/module_manager.py0000644000175000017500000002155200000000000024277 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import operator import os from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import stream_codecs from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system LOG = logging.getLogger(__name__) class ModuleManager(object): """This is a Manager utility class (mixin) for managing module-related tasks. """ MODULE_APPLY_TO_ALL = 'all' MODULE_BASE_DIR = guestagent_utils.build_file_path('~', 'modules') MODULE_CONTENTS_FILENAME = 'contents.dat' MODULE_RESULT_FILENAME = 'result.json' @classmethod def get_current_timestamp(cls): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[0:22] @classmethod def apply_module(cls, driver, module_type, name, tenant, datastore, ds_version, contents, module_id, md5, auto_apply, visible, admin_module): tenant = tenant or cls.MODULE_APPLY_TO_ALL datastore = datastore or cls.MODULE_APPLY_TO_ALL ds_version = ds_version or cls.MODULE_APPLY_TO_ALL module_dir = cls.build_module_dir(module_type, module_id) data_file = cls.write_module_contents(module_dir, contents, md5) applied = True message = None now = cls.get_current_timestamp() default_result = cls.build_default_result( module_type, name, tenant, datastore, ds_version, module_id, md5, auto_apply, visible, now, admin_module) result = cls.read_module_result(module_dir, default_result) try: driver.configure(name, datastore, ds_version, data_file) applied, message = driver.apply( name, datastore, ds_version, data_file, admin_module) except Exception as ex: LOG.exception("Could not apply module '%s'", name) applied = False message = str(ex) finally: status = 'OK' if applied else 'ERROR' result['removed'] = None result['status'] = status result['message'] = message result['updated'] = now result['id'] = module_id result['md5'] = md5 result['type'] = module_type result['name'] = name result['datastore'] = datastore result['datastore_version'] = ds_version result['tenant'] = tenant result['auto_apply'] = auto_apply result['visible'] = visible result['is_admin'] = admin_module cls.write_module_result(module_dir, result) return result @classmethod def build_module_dir(cls, module_type, module_id): sub_dir = os.path.join(module_type, module_id) module_dir = guestagent_utils.build_file_path( cls.MODULE_BASE_DIR, sub_dir) if not operating_system.exists(module_dir, is_directory=True): operating_system.create_directory(module_dir, force=True) return module_dir @classmethod def write_module_contents(cls, module_dir, contents, md5): contents_file = cls.build_contents_filename(module_dir) operating_system.write_file(contents_file, contents, codec=stream_codecs.Base64Codec(), encode=False) return contents_file @classmethod def build_contents_filename(cls, module_dir): contents_file = guestagent_utils.build_file_path( module_dir, cls.MODULE_CONTENTS_FILENAME) return contents_file @classmethod def build_default_result(cls, module_type, name, tenant, datastore, ds_version, module_id, md5, auto_apply, visible, now, admin_module): result = { 'type': module_type, 'name': name, 'datastore': datastore, 'datastore_version': ds_version, 'tenant': tenant, 'id': module_id, 'md5': md5, 'status': None, 'message': None, 'created': now, 'updated': now, 'removed': None, 'auto_apply': auto_apply, 'visible': visible, 'is_admin': admin_module, 'contents': None, } return result @classmethod def is_admin_module(cls, tenant, auto_apply, visible): return (not visible or tenant == cls.MODULE_APPLY_TO_ALL or auto_apply) @classmethod def read_module_result(cls, result_file, default=None): result_file = cls.get_result_filename(result_file) result = default try: result = operating_system.read_file( result_file, codec=stream_codecs.JsonCodec()) except Exception: if not result: LOG.exception("Could not find module result in %s", result_file) raise return result @classmethod def get_result_filename(cls, file_or_dir): result_file = file_or_dir if operating_system.exists(file_or_dir, is_directory=True): result_file = guestagent_utils.build_file_path( file_or_dir, cls.MODULE_RESULT_FILENAME) return result_file @classmethod def write_module_result(cls, result_file, result): result_file = cls.get_result_filename(result_file) operating_system.write_file( result_file, result, codec=stream_codecs.JsonCodec()) @classmethod def read_module_results(cls, is_admin=False, include_contents=False): """Read all the module results on the guest and return a list of them. """ results = [] pattern = cls.MODULE_RESULT_FILENAME result_files = operating_system.list_files_in_directory( cls.MODULE_BASE_DIR, recursive=True, pattern=pattern) for result_file in result_files: result = cls.read_module_result(result_file) if (not result.get('removed') and (is_admin or result.get('visible'))): if include_contents: codec = stream_codecs.Base64Codec() # keep admin_only for backwards compatibility if not is_admin and (result.get('is_admin') or result.get('admin_only')): contents = ( "Must be admin to retrieve contents for module %s" % result.get('name', 'Unknown')) result['contents'] = codec.serialize(contents) else: contents_dir = os.path.dirname(result_file) contents_file = cls.build_contents_filename( contents_dir) result['contents'] = operating_system.read_file( contents_file, codec=codec, decode=False) results.append(result) results.sort(key=operator.itemgetter('updated'), reverse=True) return results @classmethod def remove_module(cls, driver, module_type, module_id, name, datastore, ds_version): datastore = datastore or cls.MODULE_APPLY_TO_ALL ds_version = ds_version or cls.MODULE_APPLY_TO_ALL module_dir = cls.build_module_dir(module_type, module_id) contents_file = cls.build_contents_filename(module_dir) if not operating_system.exists(cls.get_result_filename(module_dir)): raise exception.NotFound( _("Module '%s' has not been applied") % name) try: driver.configure(name, datastore, ds_version, contents_file) removed, message = driver.remove( name, datastore, ds_version, contents_file) cls.remove_module_result(module_dir) except Exception: LOG.exception("Could not remove module '%s'", name) raise return removed, message @classmethod def remove_module_result(cls, result_file): now = cls.get_current_timestamp() result = cls.read_module_result(result_file, None) result['removed'] = now cls.write_module_result(result_file, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/pkg.py0000644000175000017500000003777200000000000020627 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manages packages on the Guest VM. """ import os import re import subprocess from tempfile import NamedTemporaryFile from oslo_log import log as logging from oslo_utils import encodeutils import pexpect import six from trove.common import exception from trove.common.exception import ProcessExecutionError from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system LOG = logging.getLogger(__name__) OK = 0 RUN_DPKG_FIRST = 1 REINSTALL_FIRST = 2 CONFLICT_REMOVED = 3 def getoutput(*cmd): """Get the stdout+stderr of a command, ignore errors. Similar to commands.getstatusoutput(cmd)[1] of Python 2. """ try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except OSError: # ignore errors like program not found return six.text_type("") stdout = proc.communicate()[0] return encodeutils.safe_decode(stdout) class PkgAdminLockError(exception.TroveError): pass class PkgPermissionError(exception.TroveError): pass class PkgPackageStateError(exception.TroveError): pass class PkgNotFoundError(exception.NotFound): pass class PkgTimeout(exception.TroveError): pass class PkgScriptletError(exception.TroveError): pass class PkgDownloadError(exception.TroveError): pass class PkgSignError(exception.TroveError): pass class PkgBrokenError(exception.TroveError): pass class PkgConfigureError(exception.TroveError): pass class BasePackagerMixin(object): def pexpect_kill_proc(self, child): child.delayafterclose = 1 child.delayafterterminate = 1 try: child.close(force=True) except pexpect.ExceptionPexpect: # Close fails to terminate a sudo process on some OSes. subprocess.call(['sudo', 'kill', str(child.pid)]) def pexpect_wait_and_close_proc(self, child): child.expect(pexpect.EOF) child.close() def pexpect_run(self, cmd, output_expects, time_out): child = pexpect.spawn(cmd, timeout=time_out) try: i = child.expect(output_expects) match = child.match self.pexpect_wait_and_close_proc(child) except pexpect.TIMEOUT: self.pexpect_kill_proc(child) raise PkgTimeout(_("Process timeout after %i seconds.") % time_out) return (i, match) class RPMPackagerMixin(BasePackagerMixin): def _rpm_remove_nodeps(self, package_name): """ Sometimes transaction errors happens, easy way is to remove conflicted package without dependencies and hope it will replaced by another package """ try: utils.execute("rpm", "-e", "--nodeps", package_name, run_as_root=True, root_helper="sudo") except ProcessExecutionError: LOG.exception("Error removing conflict %(package)s", package_name) def _install(self, packages, time_out): """must be overridden by an RPM based PackagerMixin""" raise NotImplementedError() def _remove(self, package_name, time_out): """must be overridden by an RPM based PackagerMixin""" raise NotImplementedError() def pkg_install(self, packages, config_opts, time_out): result = self._install(packages, time_out) if result != OK: while result == CONFLICT_REMOVED: result = self._install(packages, time_out) if result != OK: raise PkgPackageStateError(_("Cannot install packages.")) def pkg_is_installed(self, packages): packages = packages if isinstance(packages, list) else packages.split() std_out = getoutput("rpm", "-qa") for pkg in packages: found = False for line in std_out.split("\n"): if line.find(pkg) != -1: found = True break if not found: return False return True def pkg_version(self, package_name): std_out = getoutput("rpm", "-qa", "--qf", "'%{VERSION}-%{RELEASE}\n'", package_name) # Need to capture the version string # check the command output for line in std_out.split("\n"): regex = re.compile("[0-9.]+-.*") matches = regex.match(line) if matches: line = matches.group() return line LOG.error("Unexpected output from rpm command. (%(output)s)", {'output': std_out}) def pkg_remove(self, package_name, time_out): """Removes a package.""" if self.pkg_version(package_name) is None: return result = self._remove(package_name, time_out) if result != OK: raise PkgPackageStateError(_("Package %s is in a bad state.") % package_name) class RedhatPackagerMixin(RPMPackagerMixin): def _install(self, packages, time_out): """Attempts to install packages. Returns OK if the packages are installed or a result code if a recoverable-error occurred. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo yum --color=never -y install %s" % " ".join(packages) output_expects = [r'\[sudo\] password for .*:', 'No package (.*) available.', ('file .* from install of .* conflicts with file' ' from package (.*?)\r\n'), 'Error: (.*?) conflicts with .*?\r\n', 'Processing Conflict: .* conflicts (.*?)\r\n', '.*scriptlet failed*', 'HTTP Error', 'No more mirrors to try.', 'GPG key retrieval failed:', '.*already installed and latest version', 'Updated:', 'Installed:'] LOG.debug("Running package install command: %s", cmd) i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError(_("Invalid permissions.")) elif i == 1: raise PkgNotFoundError(_("Could not find package %s") % match.group(1)) elif i == 2 or i == 3 or i == 4: self._rpm_remove_nodeps(match.group(1)) return CONFLICT_REMOVED elif i == 5: raise PkgScriptletError(_("Package scriptlet failed")) elif i == 6 or i == 7: raise PkgDownloadError(_("Package download problem")) elif i == 8: raise PkgSignError(_("GPG key retrieval failed")) return OK def _remove(self, package_name, time_out): """Removes a package. Returns OK if the package is removed successfully or a result code if a recoverable-error occurs. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo yum --color=never -y remove %s" % package_name LOG.debug("Running package remove command: %s", cmd) output_expects = [r'\[sudo\] password for .*:', 'No Packages marked for removal', 'Removed:'] i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError(_("Invalid permissions.")) elif i == 1: raise PkgNotFoundError(_("Could not find package %s") % package_name) return OK class DebianPackagerMixin(BasePackagerMixin): def _fix(self, time_out): """Sometimes you have to run this command before a package will install. """ try: utils.execute("dpkg", "--configure", "-a", run_as_root=True, root_helper="sudo") except ProcessExecutionError: LOG.exception("Error fixing dpkg") def _fix_package_selections(self, packages, config_opts): """ Sometimes you have to run this command before a package will install. This command sets package selections to configure package. """ selections = "" for package in packages: m = re.match('(.+)=(.+)', package) if m: package_name = m.group(1) else: package_name = package std_out = getoutput("sudo", "debconf-show", package_name) for line in std_out.split("\n"): for selection, value in config_opts.items(): m = re.match(".* (.*/%s):.*" % selection, line) if m: selections += ("%s %s string '%s'\n" % (package_name, m.group(1), value)) if selections: with NamedTemporaryFile(delete=False) as f: fname = f.name f.write(encodeutils.safe_encode(selections)) try: utils.execute("debconf-set-selections", fname, run_as_root=True, root_helper="sudo") utils.execute("dpkg", "--configure", "-a", run_as_root=True, root_helper="sudo") except ProcessExecutionError: raise PkgConfigureError(_("Error configuring package.")) finally: os.remove(fname) def _install(self, packages, time_out): """Attempts to install packages. Returns OK if the packages are installed or a result code if a recoverable-error occurred. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo -E DEBIAN_FRONTEND=noninteractive apt-get -y " \ "--force-yes --allow-unauthenticated -o " \ "DPkg::options::=--force-confmiss --reinstall " \ "install %s" % " ".join(packages) output_expects = ['.*password*', 'E: Unable to locate package (.*)', "Couldn't find package (.*)", "E: Version '.*' for '(.*)' was not found", ("dpkg was interrupted, you must manually run " "'sudo dpkg --configure -a'"), "Unable to lock the administration directory", ("E: Unable to correct problems, you have held " "broken packages."), "Setting up (.*)", "is already the newest version"] LOG.debug("Running package install command: %s", cmd) i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError(_("Invalid permissions.")) elif i == 1 or i == 2 or i == 3: raise PkgNotFoundError(_("Could not find package %s") % match.group(1)) elif i == 4: return RUN_DPKG_FIRST elif i == 5: raise PkgAdminLockError() elif i == 6: raise PkgBrokenError() return OK def _remove(self, package_name, time_out): """Removes a package. Returns OK if the package is removed successfully or a result code if a recoverable-error occurs. Raises an exception if a non-recoverable error or timeout occurs. """ cmd = "sudo -E apt-get -y --allow-unauthenticated remove %s" \ % package_name output_expects = ['.*password*', 'E: Unable to locate package %s' % package_name, 'Package is in a very bad inconsistent state', 'Sub-process /usr/bin/dpkg returned an error code', ("dpkg was interrupted, you must manually run " "'sudo dpkg --configure -a'"), "Unable to lock the administration directory", "Removing %s*" % package_name] LOG.debug("Running remove package command %s", cmd) i, match = self.pexpect_run(cmd, output_expects, time_out) if i == 0: raise PkgPermissionError(_("Invalid permissions.")) elif i == 1: raise PkgNotFoundError(_("Could not find package %s") % package_name) elif i == 2 or i == 3: return REINSTALL_FIRST elif i == 4: return RUN_DPKG_FIRST elif i == 5: raise PkgAdminLockError() return OK def pkg_install(self, packages, config_opts, time_out): """Installs packages.""" try: utils.execute("apt-get", "update", run_as_root=True, root_helper="sudo") except ProcessExecutionError: LOG.exception("Error updating the apt sources") result = self._install(packages, time_out) if result != OK: if result == RUN_DPKG_FIRST: self._fix(time_out) result = self._install(packages, time_out) if result != OK: raise PkgPackageStateError(_("Packages are in a bad state.")) # even after successful install, packages can stay unconfigured # config_opts - is dict with name/value for questions asked by # interactive configure script if config_opts: self._fix_package_selections(packages, config_opts) def pkg_version(self, package_name): std_out = getoutput("apt-cache", "policy", package_name) for line in std_out.split("\n"): m = re.match(r"\s+Installed: (.*)", line) if m: version = m.group(1) if version == "(none)": version = None return version def pkg_is_installed(self, packages): packages = packages if isinstance(packages, list) else packages.split() for pkg in packages: m = re.match('(.+)=(.+)', pkg) if m: package_name = m.group(1) package_version = m.group(2) else: package_name = pkg package_version = None installed_version = self.pkg_version(package_name) if ((package_version and installed_version == package_version) or (installed_version and not package_version)): LOG.debug("Package %s already installed.", package_name) else: return False return True def pkg_remove(self, package_name, time_out): """Removes a package.""" if self.pkg_version(package_name) is None: return result = self._remove(package_name, time_out) if result != OK: if result == REINSTALL_FIRST: self._install(package_name, time_out) elif result == RUN_DPKG_FIRST: self._fix(time_out) result = self._remove(package_name, time_out) if result != OK: raise PkgPackageStateError(_("Package %s is in a bad state.") % package_name) if operating_system.get_os() == operating_system.REDHAT: class Package(RedhatPackagerMixin): pass else: class Package(DebianPackagerMixin): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/service.py0000644000175000017500000000225400000000000021471 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import routes from trove.common import wsgi class Controller(wsgi.Controller): """Base controller class.""" pass class API(wsgi.Router): """Defines the API routes.""" def __init__(self): mapper = routes.Mapper() super(API, self).__init__(mapper) self._instance_router(mapper) def _instance_router(self, mapper): resource = Controller().create_resource() path = "/guests" mapper.resource("guest", path, controller=resource) def app_factory(global_conf, **local_conf): return API() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/strategies/0000755000175000017500000000000000000000000021626 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/__init__.py0000644000175000017500000000000000000000000023725 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7641106 trove-12.1.0.dev92/trove/guestagent/strategies/backup/0000755000175000017500000000000000000000000023073 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/__init__.py0000644000175000017500000000157400000000000025213 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) def get_backup_strategy(backup_driver, ns=__name__): return Strategy.get_strategy(backup_driver, ns) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/base.py0000644000175000017500000001057200000000000024364 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import signal from oslo_log import log as logging from eventlet.green import subprocess from trove.common import cfg, utils from trove.common.strategies.strategy import Strategy CONF = cfg.CONF LOG = logging.getLogger(__name__) class BackupError(Exception): """Error running the Backup Command.""" class UnknownBackupType(Exception): """Unknown backup type.""" class BackupRunner(Strategy): """Base class for Backup Strategy implementations.""" __strategy_type__ = 'backup_runner' __strategy_ns__ = 'trove.guestagent.strategies.backup' # The actual system call to run the backup cmd = None is_zipped = CONF.backup_use_gzip_compression is_encrypted = CONF.backup_use_openssl_encryption encrypt_key = CONF.backup_aes_cbc_key def __init__(self, filename, **kwargs): self.base_filename = filename self.process = None self.pid = None kwargs.update({'filename': filename}) self.command = self.cmd % kwargs super(BackupRunner, self).__init__() @property def backup_type(self): return type(self).__name__ def _run(self): LOG.debug("BackupRunner running cmd: %s", self.command) self.process = subprocess.Popen(self.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid) self.pid = self.process.pid def __enter__(self): """Start up the process.""" self._run_pre_backup() self._run() return self def __exit__(self, exc_type, exc_value, traceback): """Clean up everything.""" # NOTE(zhaochao): all child processes should always be killed even the # context exits by an exception. if getattr(self, 'process', None): try: # Send a sigterm to the session leader, so that all # child processes are killed and cleaned up on terminate # (Ensures zombie processes aren't left around on a FAILURE) # https://bugs.launchpad.net/trove/+bug/1253850 os.killpg(self.process.pid, signal.SIGTERM) self.process.terminate() except OSError: # Already stopped pass if exc_type is not None: return False utils.raise_if_process_errored(self.process, BackupError) if not self.check_process(): raise BackupError self._run_post_backup() return True def metadata(self): """Hook for subclasses to store metadata from the backup.""" return {} @property def filename(self): """Subclasses may overwrite this to declare a format (.tar).""" return self.base_filename @property def manifest(self): return "%s%s%s" % (self.filename, self.zip_manifest, self.encrypt_manifest) @property def zip_cmd(self): return ' | gzip' if self.is_zipped else '' @property def zip_manifest(self): return '.gz' if self.is_zipped else '' @property def encrypt_cmd(self): return (' | openssl enc -aes-256-cbc -salt -pass pass:%s' % self.encrypt_key) if self.is_encrypted else '' @property def encrypt_manifest(self): return '.enc' if self.is_encrypted else '' def check_process(self): """Hook for subclasses to check process for errors.""" return True def read(self, chunk_size): return self.process.stdout.read(chunk_size) def _run_pre_backup(self): pass def _run_post_backup(self): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7681108 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/0000755000175000017500000000000000000000000025570 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/__init__.py0000644000175000017500000000000000000000000027667 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/cassandra_impl.py0000644000175000017500000001120100000000000031115 0ustar00coreycorey00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # Copyright 2015 Tesora Inc. # All Rights Reserved.s # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import service from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class NodetoolSnapshot(base.BackupRunner): """Implementation of backup using the Nodetool (http://goo.gl/QtXVsM) utility. """ # It is recommended to include the system keyspace in the backup. # Keeping the system keyspace will reduce the restore time # by avoiding need to rebuilding indexes. __strategy_name__ = 'nodetoolsnapshot' _SNAPSHOT_EXTENSION = 'db' def __init__(self, filename, **kwargs): self._app = service.CassandraApp() super(NodetoolSnapshot, self).__init__(filename, **kwargs) def _run_pre_backup(self): """Take snapshot(s) for all keyspaces. Remove existing ones first if any. Snapshot(s) will be stored in the data directory tree: //
/snapshots/ """ self._remove_snapshot(self.filename) self._snapshot_all_keyspaces(self.filename) # Commonly 'self.command' gets resolved in the base constructor, # but we can build the full command only after having taken the # keyspace snapshot(s). self.command = self._backup_cmd + self.command def _run_post_backup(self): """Remove the created snapshot(s). """ self._remove_snapshot(self.filename) def _remove_snapshot(self, snapshot_name): LOG.debug('Clearing snapshot(s) for all keyspaces with snapshot name ' '"%s".', snapshot_name) utils.execute('nodetool', 'clearsnapshot', '-t %s' % snapshot_name) def _snapshot_all_keyspaces(self, snapshot_name): LOG.debug('Creating snapshot(s) for all keyspaces with snapshot name ' '"%s".', snapshot_name) utils.execute('nodetool', 'snapshot', '-t %s' % snapshot_name) @property def cmd(self): return self.zip_cmd + self.encrypt_cmd @property def _backup_cmd(self): """Command to collect and package keyspace snapshot(s). """ return self._build_snapshot_package_cmd(self._app.cassandra_data_dir, self.filename) def _build_snapshot_package_cmd(self, data_dir, snapshot_name): """Collect all files for a given snapshot and build a package command for them. Transform the paths such that the backup can be restored simply by extracting the archive right to an existing data directory (i.e. place the root into the and remove the 'snapshots/' portion of the path). Attempt to preserve access modifiers on the archived files. Assert the backup is not empty as there should always be at least the system keyspace. Fail if there is nothing to backup. """ LOG.debug('Searching for all snapshot(s) with name "%s".', snapshot_name) snapshot_files = operating_system.list_files_in_directory( data_dir, recursive=True, include_dirs=False, pattern=r'.*/snapshots/%s/.*\.%s' % (snapshot_name, self._SNAPSHOT_EXTENSION), as_root=True) num_snapshot_files = len(snapshot_files) LOG.debug('Found %(num)d snapshot (*.%(ext)s) files.', {'num': num_snapshot_files, 'ext': self._SNAPSHOT_EXTENSION}) if num_snapshot_files > 0: return ('sudo tar ' '--transform="s#snapshots/%s/##" -cpPf - -C "%s" "%s"' % (snapshot_name, data_dir, '" "'.join(snapshot_files))) # There should always be at least the system keyspace snapshot. raise exception.BackupCreationError(_("No data found.")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/couchbase_impl.py0000644000175000017500000000774400000000000031133 0ustar00coreycorey00000000000000# Copyright (c) 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import json from oslo_log import log as logging from trove.common import exception from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchbase import service from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) OUTFILE = '/tmp' + system.BUCKETS_JSON class CbBackup(base.BackupRunner): """ Implementation of Backup Strategy for Couchbase. """ __strategy_name__ = 'cbbackup' pre_backup_commands = [ ['rm', '-rf', system.COUCHBASE_DUMP_DIR], ['mkdir', '-p', system.COUCHBASE_DUMP_DIR], ] post_backup_commands = [ ['rm', '-rf', system.COUCHBASE_DUMP_DIR], ] @property def cmd(self): """ Creates backup dump dir, tars it up, and encrypts it. """ cmd = 'tar cpPf - ' + system.COUCHBASE_DUMP_DIR return cmd + self.zip_cmd + self.encrypt_cmd def _save_buckets_config(self, password): url = system.COUCHBASE_REST_API + '/pools/default/buckets' utils.execute_with_timeout('curl -u root:' + password + ' ' + url + ' > ' + OUTFILE, shell=True, timeout=300) def _backup(self, password): utils.execute_with_timeout('/opt/couchbase/bin/cbbackup', system.COUCHBASE_REST_API, system.COUCHBASE_DUMP_DIR, '-u', 'root', '-p', password, timeout=600, run_as_root=True, root_helper='sudo') def _run_pre_backup(self): try: for cmd in self.pre_backup_commands: utils.execute_with_timeout(*cmd) root = service.CouchbaseRootAccess() pw = root.get_password() self._save_buckets_config(pw) with open(OUTFILE, "r") as f: out = f.read() if out != "[]": d = json.loads(out) all_memcached = True for i in range(len(d)): bucket_type = d[i]["bucketType"] if bucket_type != "memcached": all_memcached = False break if not all_memcached: self._backup(pw) else: LOG.info("All buckets are memcached. " "Skipping backup.") operating_system.move(OUTFILE, system.COUCHBASE_DUMP_DIR) if pw != "password": # Not default password, backup generated root password operating_system.copy(system.pwd_file, system.COUCHBASE_DUMP_DIR, preserve=True, as_root=True) except exception.ProcessExecutionError: LOG.exception("Error during pre-backup phase.") raise def _run_post_backup(self): try: for cmd in self.post_backup_commands: utils.execute_with_timeout(*cmd) except exception.ProcessExecutionError: LOG.exception("Error during post-backup phase.") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/couchdb_impl.py0000644000175000017500000000251100000000000030571 0ustar00coreycorey00000000000000# Copyright 2016 IBM Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.guestagent.datastore.experimental.couchdb import service from trove.guestagent.strategies.backup import base class CouchDBBackup(base.BackupRunner): __strategy_name__ = 'couchdbbackup' @property def cmd(self): """ CouchDB backup is based on a simple filesystem copy of the database files. Each database is a single fully contained append only file. For example, if a user creates a database 'foo', then a corresponding 'foo.couch' file will be created in the database directory which by default is in '/var/lib/couchdb'. """ cmd = 'sudo tar cpPf - ' + service.COUCHDB_LIB_DIR return cmd + self.zip_cmd + self.encrypt_cmd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/db2_impl.py0000644000175000017500000001435600000000000027643 0ustar00coreycorey00000000000000# Copyright 2016 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.db import models from trove.common import exception from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.db2 import service from trove.guestagent.datastore.experimental.db2 import system from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class DB2Backup(base.BackupRunner): """ Base class for DB2 backups """ def __init__(self, *args, **kwargs): super(DB2Backup, self).__init__(*args, **kwargs) self.admin = service.DB2Admin() self.databases = self.list_dbnames() def list_dbnames(self): dbNames = [] databases, marker = self.admin.list_databases() for database in databases: mydb = models.DatastoreSchema.deserialize(database) dbNames.append(mydb.name) return dbNames def estimate_backup_size(self): """ Estimating the size of the backup based on the size of the data returned from the get_db_size procedure. The size of the backup is always going to be smaller than the size of the data. """ try: size = 0 for dbname in self.databases: out = service.run_command(system.GET_DB_SIZE % {'dbname': dbname}) size = size + int(out[0]) except exception.ProcessExecutionError: LOG.exception("An error occurred while trying to " "estimate backup size") LOG.debug("Estimated size for databases: %d", size) return size def estimate_log_size(self): return 0.0 def run_backup(self): pass def execute_backup_cmd(self, backup_command): service.create_db2_dir(system.DB2_BACKUP_DIR) for dbName in self.databases: service.run_command(backup_command % {'dbname': dbName}) def _run_pre_backup(self): """ Before performing the actual backup we need to make sure that there is enough space to store the backups. The backup size is the sum of the size of the databases and if it is an online backup, the size of the archived logs is also factored in. """ backup_size_bytes = self.estimate_backup_size() log_size_bytes = self.estimate_log_size() total_backup_size_gb = utils.to_gb(backup_size_bytes + log_size_bytes) free_bytes = operating_system.get_bytes_free_on_fs(system.MOUNT_POINT) free_gb = utils.to_gb(free_bytes) if total_backup_size_gb > free_gb: raise exception.InsufficientSpaceForBackup % { 'backup_size': total_backup_size_gb, 'free': free_gb } self.run_backup() @property def cmd(self): cmd = 'sudo tar cPf - ' + system.DB2_BACKUP_DIR return cmd + self.zip_cmd + self.encrypt_cmd def cleanup(self): service.remove_db2_dir(system.DB2_BACKUP_DIR) def _run_post_backup(self): self.cleanup() class DB2OnlineBackup(DB2Backup): """ Implementation of Online Backup Strategy for DB2 using archive logging. """ __strategy_name__ = 'db2onlinebackup' def __init__(self, *args, **kwargs): super(DB2OnlineBackup, self).__init__(*args, **kwargs) def estimate_log_size(self): """ Estimate the log utilization for all databases. The LOG_UTILIZATION administrative view returns information about log utilization for the connected database. The TOTAL_LOG_USED_KB returns the log utilization in KB. """ log_size = 0 try: for dbname in self.databases: out = service.run_command( system.LOG_UTILIZATION % {'dbname': dbname}) log_size = log_size + int(out[0]) log_size = log_size * 1024 except exception.ProcessExecutionError: LOG.exception("An error occurred while trying to estimate log " "size") LOG.debug("Estimated log size for all databases: %d", log_size) return log_size def run_backup(self): try: self.execute_backup_cmd(system.ONLINE_BACKUP_DB) except exception.ProcessExecutionError: LOG.exception("An exception occurred while doing an online " "backup.") self.cleanup() raise def cleanup(self): super(DB2OnlineBackup, self).cleanup() ''' After a backup operation, we can delete the archived logs from the archived log directory but we do not want to delete the directory itself. Since archive logging is enabled for all databases, this directory is needed to store archive logs. ''' service.remove_db2_dir(system.DB2_ARCHIVE_LOGS_DIR + "/*") class DB2OfflineBackup(DB2Backup): """ Implementation of Offline Backup Strategy for DB2 using circular logging which is the default. """ __strategy_name__ = 'db2offlinebackup' def __init__(self, *args, **kwargs): super(DB2OfflineBackup, self).__init__(*args, **kwargs) def run_backup(self): """Create archival contents in dump dir""" try: service.run_command(system.QUIESCE_DB2) self.execute_backup_cmd(system.OFFLINE_BACKUP_DB) service.run_command(system.UNQUIESCE_DB2) except exception.ProcessExecutionError: LOG.exception("An exception occurred while doing an offline " "backup.") self.cleanup() raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/mariadb_impl.py0000644000175000017500000000745100000000000030571 0ustar00coreycorey00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log as logging from trove.common.i18n import _ from trove.guestagent.datastore.mysql import service as mysql_service from trove.guestagent.datastore.mysql_common import service as common_service from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) BACKUP_LOG = '/tmp/mariabackup.log' class MariaBackup(base.BackupRunner): """Implementation of Backup Strategy for mariabackup.""" __strategy_name__ = 'mariabackup' @property def user_and_pass(self): return ('--user=%(user)s --password=%(password)s --host=localhost ' '--socket=%(socket_file)s' % {'user': common_service.ADMIN_USER_NAME, 'password': mysql_service.MySqlApp.get_auth_password(), 'socket_file': '/var/run/mysqld/mysqld.sock'}) @property def cmd(self): cmd = ('sudo mariabackup --backup --stream=xbstream ' + self.user_and_pass + ' 2>' + BACKUP_LOG) return cmd + self.zip_cmd + self.encrypt_cmd def check_process(self): """Check the output of mariabackup command for 'completed OK!'. Return True if no error, otherwise return False. """ LOG.debug('Checking mariabackup process output.') with open(BACKUP_LOG, 'r') as backup_log: output = backup_log.read() if not output: LOG.error("mariabackup log file empty.") return False LOG.debug(output) last_line = output.splitlines()[-1].strip() if not re.search('completed OK!', last_line): LOG.error("mariabackup command failed.") return False return True def metadata(self): LOG.debug('Getting metadata for backup %s', self.base_filename) meta = {} lsn = re.compile(r"The latest check point \(for incremental\): " r"'(\d+)'") with open(BACKUP_LOG, 'r') as backup_log: output = backup_log.read() match = lsn.search(output) if match: meta = {'lsn': match.group(1)} LOG.info("Metadata for backup %s: %s", self.base_filename, meta) return meta @property def filename(self): return '%s.xbstream' % self.base_filename class MariaBackupIncremental(MariaBackup): def __init__(self, *args, **kwargs): if not kwargs.get('lsn'): raise AttributeError(_('lsn attribute missing, bad parent?')) super(MariaBackupIncremental, self).__init__(*args, **kwargs) self.parent_location = kwargs.get('parent_location') self.parent_checksum = kwargs.get('parent_checksum') @property def cmd(self): cmd = ( 'sudo mariabackup --backup --stream=xbstream' ' --incremental-lsn=%(lsn)s ' + self.user_and_pass + ' 2>' + BACKUP_LOG ) return cmd + self.zip_cmd + self.encrypt_cmd def metadata(self): meta = super(MariaBackupIncremental, self).metadata() meta.update({ 'parent_location': self.parent_location, 'parent_checksum': self.parent_checksum, }) return meta ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/mongo_impl.py0000644000175000017500000000776500000000000030321 0ustar00coreycorey00000000000000# Copyright (c) 2014 eBay Software Foundation # Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import ( service as mongo_service) from trove.guestagent.datastore.experimental.mongodb import ( system as mongo_system) from trove.guestagent.strategies.backup import base CONF = cfg.CONF LOG = logging.getLogger(__name__) MONGODB_DBPATH = CONF.mongodb.mount_point MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump" LARGE_TIMEOUT = 1200 class MongoDump(base.BackupRunner): """Implementation of Backup Strategy for MongoDump.""" __strategy_name__ = 'mongodump' def __init__(self, *args, **kwargs): self.app = mongo_service.MongoDBApp() super(MongoDump, self).__init__(*args, **kwargs) def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(MONGODB_DBPATH) if est_dump_size > avail: self.cleanup() # TODO(atomic77) Though we can fully recover from this error # BackupRunner will leave the trove instance in a BACKUP state raise OSError(_("Need more free space to run mongodump, " "estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % {'est_dump_size': est_dump_size, 'avail': avail}) operating_system.create_directory(MONGO_DUMP_DIR, as_root=True) operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER, mongo_system.MONGO_USER, as_root=True) # high timeout here since mongodump can take a long time utils.execute_with_timeout( 'mongodump', '--out', MONGO_DUMP_DIR, *(self.app.admin_cmd_auth_params()), run_as_root=True, root_helper='sudo', timeout=LARGE_TIMEOUT ) except exception.ProcessExecutionError: LOG.debug("Caught exception when creating the dump") self.cleanup() raise @property def cmd(self): """Tars and streams the dump dir contents to the stdout """ cmd = 'sudo tar cPf - ' + MONGO_DUMP_DIR return cmd + self.zip_cmd + self.encrypt_cmd def cleanup(self): operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True) def _run_post_backup(self): self.cleanup() def estimate_dump_size(self): """ Estimate the space that the mongodump will take based on the output of db.stats().dataSize. This seems to be conservative, as the actual bson output in many cases is a fair bit smaller. """ dbs = self.app.list_all_dbs() # mongodump does not dump the content of the local database dbs.remove('local') dbstats = dict([(d, 0) for d in dbs]) for d in dbstats: dbstats[d] = self.app.db_data_size(d) LOG.debug("Estimated size for databases: " + str(dbstats)) return sum(dbstats.values()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/postgresql_impl.py0000644000175000017500000002346700000000000031402 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import stat from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp from trove.guestagent.strategies.backup import base CONF = cfg.CONF LOG = logging.getLogger(__name__) WAL_ARCHIVE_DIR = CONF.postgresql.wal_archive_location class PgDump(base.BackupRunner): """Implementation of Backup Strategy for pg_dump.""" __strategy_name__ = 'pg_dump' @property def cmd(self): cmd = 'sudo -u postgres pg_dumpall ' return cmd + self.zip_cmd + self.encrypt_cmd class PgBaseBackupUtil(object): def most_recent_backup_wal(self, pos=0): """ Return the WAL file for the most recent backup """ mrb_file = self.most_recent_backup_file(pos=pos) # just return the first part of the filename return mrb_file.split('.')[0] def most_recent_backup_file(self, pos=0): """ Look for the most recent .backup file that basebackup creates :return: a string like 000000010000000000000006.00000168.backup """ walre = re.compile("[0-9A-F]{24}.*.backup") wal_files = [wal_file for wal_file in os.listdir(WAL_ARCHIVE_DIR) if walre.search(wal_file)] wal_files = sorted(wal_files, reverse=True) if not wal_files: return None return wal_files[pos] def log_files_since_last_backup(self, pos=0): """Return the WAL files since the provided last backup pg_archivebackup depends on alphanumeric sorting to decide wal order, so we'll do so too: https://github.com/postgres/postgres/blob/REL9_4_STABLE/contrib /pg_archivecleanup/pg_archivecleanup.c#L122 """ last_wal = self.most_recent_backup_wal(pos=pos) walre = re.compile("^[0-9A-F]{24}$") wal_files = [wal_file for wal_file in os.listdir(WAL_ARCHIVE_DIR) if walre.search(wal_file) and wal_file >= last_wal] return wal_files class PgBaseBackup(base.BackupRunner, PgBaseBackupUtil): """Base backups are taken with the pg_basebackup filesystem-level backup tool pg_basebackup creates a copy of the binary files in the PostgreSQL cluster data directory and enough WAL segments to allow the database to be brought back to a consistent state. Associated with each backup is a log location, normally indicated by the WAL file name and the position inside the file. """ __strategy_name__ = 'pg_basebackup' def __init__(self, *args, **kwargs): self._app = None super(PgBaseBackup, self).__init__(*args, **kwargs) self.label = None self.stop_segment = None self.start_segment = None self.start_wal_file = None self.stop_wal_file = None self.checkpoint_location = None self.mrb = None @property def app(self): if self._app is None: self._app = self._build_app() return self._app def _build_app(self): return PgSqlApp() @property def cmd(self): cmd = ("pg_basebackup -h %s -U %s --pgdata=-" " --label=%s --format=tar --xlog " % (self.app.pgsql_run_dir, self.app.ADMIN_USER, self.base_filename)) return cmd + self.zip_cmd + self.encrypt_cmd def base_backup_metadata(self, metadata_file): """Parse the contents of the .backup file""" metadata = {} operating_system.chmod( metadata_file, FileMode(add=[stat.S_IROTH]), as_root=True) start_re = re.compile(r"START WAL LOCATION: (.*) \(file (.*)\)") stop_re = re.compile(r"STOP WAL LOCATION: (.*) \(file (.*)\)") checkpt_re = re.compile("CHECKPOINT LOCATION: (.*)") label_re = re.compile("LABEL: (.*)") metadata_contents = operating_system.read_file(metadata_file) match = start_re.search(metadata_contents) if match: self.start_segment = match.group(1) metadata['start-segment'] = self.start_segment self.start_wal_file = match.group(2) metadata['start-wal-file'] = self.start_wal_file match = stop_re.search(metadata_contents) if match: self.stop_segment = match.group(1) metadata['stop-segment'] = self.stop_segment self.stop_wal_file = match.group(2) metadata['stop-wal-file'] = self.stop_wal_file match = checkpt_re.search(metadata_contents) if match: self.checkpoint_location = match.group(1) metadata['checkpoint-location'] = self.checkpoint_location match = label_re.search(metadata_contents) if match: self.label = match.group(1) metadata['label'] = self.label return metadata def check_process(self): # If any of the below variables were not set by either metadata() # or direct retrieval from the pgsql backup commands, then something # has gone wrong if not self.start_segment or not self.start_wal_file: LOG.info("Unable to determine starting WAL file/segment") return False if not self.stop_segment or not self.stop_wal_file: LOG.info("Unable to determine ending WAL file/segment") return False if not self.label: LOG.info("No backup label found") return False return True def metadata(self): """pg_basebackup may complete, and we arrive here before the history file is written to the wal archive. So we need to handle two possibilities: - this is the first backup, and no history file exists yet - this isn't the first backup, and so the history file we retrieve isn't the one we just ran! """ def _metadata_found(): LOG.debug("Polling for backup metadata... ") self.mrb = self.most_recent_backup_file() if not self.mrb: LOG.debug("No history files found!") return False metadata = self.base_backup_metadata( os.path.join(WAL_ARCHIVE_DIR, self.mrb)) LOG.debug("Label to pg_basebackup: %(base_filename)s " "label found: %(label)s", {'base_filename': self.base_filename, 'label': metadata['label']}) LOG.info("Metadata for backup: %s.", str(metadata)) return metadata['label'] == self.base_filename try: utils.poll_until(_metadata_found, sleep_time=5, time_out=60) except exception.PollTimeOut: raise RuntimeError(_("Timeout waiting for backup metadata for" " backup %s") % self.base_filename) return self.base_backup_metadata( os.path.join(WAL_ARCHIVE_DIR, self.mrb)) def _run_post_backup(self): """Get rid of WAL data we don't need any longer""" arch_cleanup_bin = os.path.join(self.app.pgsql_extra_bin_dir, "pg_archivecleanup") bk_file = os.path.basename(self.most_recent_backup_file()) cmd_full = " ".join((arch_cleanup_bin, WAL_ARCHIVE_DIR, bk_file)) utils.execute("sudo", "su", "-", self.app.pgsql_owner, "-c", "%s" % cmd_full) class PgBaseBackupIncremental(PgBaseBackup): """To restore an incremental backup from a previous backup, in PostgreSQL, is effectively to replay the WAL entries to a designated point in time. All that is required is the most recent base backup, and all WAL files """ def __init__(self, *args, **kwargs): if (not kwargs.get('parent_location') or not kwargs.get('parent_checksum')): raise AttributeError(_('Parent missing!')) super(PgBaseBackupIncremental, self).__init__(*args, **kwargs) self.parent_location = kwargs.get('parent_location') self.parent_checksum = kwargs.get('parent_checksum') def _run_pre_backup(self): self.backup_label = self.base_filename self.start_segment = self.app.pg_start_backup(self.backup_label) self.start_wal_file = self.app.pg_xlogfile_name(self.start_segment) self.stop_segment = self.app.pg_stop_backup() # We have to hack this because self.command is # initialized in the base class before we get here, which is # when we will know exactly what WAL files we want to archive self.command = self._cmd() def _cmd(self): wal_file_list = self.log_files_since_last_backup(pos=1) cmd = 'sudo tar -cf - -C {wal_dir} {wal_list} '.format( wal_dir=WAL_ARCHIVE_DIR, wal_list=" ".join(wal_file_list)) return cmd + self.zip_cmd + self.encrypt_cmd def metadata(self): _meta = super(PgBaseBackupIncremental, self).metadata() _meta.update({ 'parent_location': self.parent_location, 'parent_checksum': self.parent_checksum, }) return _meta ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/experimental/redis_impl.py0000644000175000017500000000257700000000000030304 0ustar00coreycorey00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.guestagent.datastore.experimental.redis import service from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class RedisBackup(base.BackupRunner): """Implementation of Backup Strategy for Redis.""" __strategy_name__ = 'redisbackup' def __init__(self, filename, **kwargs): self.app = service.RedisApp() super(RedisBackup, self).__init__(filename, **kwargs) @property def cmd(self): cmd = 'sudo cat %s' % self.app.get_persistence_filepath() return cmd + self.zip_cmd + self.encrypt_cmd def _run_pre_backup(self): self.app.admin.persist_data() LOG.debug('Redis data persisted.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/strategies/backup/mysql_impl.py0000644000175000017500000001224400000000000025636 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re from oslo_log import log as logging from trove.common.i18n import _ from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.datastore.mysql_common.service import ADMIN_USER_NAME from trove.guestagent.strategies.backup import base LOG = logging.getLogger(__name__) class MySQLDump(base.BackupRunner): """Implementation of Backup Strategy for MySQLDump.""" __strategy_name__ = 'mysqldump' @property def cmd(self): user_and_pass = ( ' --password=%(password)s -u %(user)s ' '2>/tmp/mysqldump.log' % {'password': MySqlApp.get_auth_password(), 'user': ADMIN_USER_NAME}) cmd = ('mysqldump' ' --all-databases' ' %(extra_opts)s' ' --opt' + user_and_pass) return cmd + self.zip_cmd + self.encrypt_cmd def check_process(self): """Check the output from mysqldump ignoring 'Warning'.""" LOG.debug('Checking mysqldump process output.') with open('/tmp/mysqldump.log', 'r') as backup_log: output = backup_log.read() if not output: return True LOG.debug(output) for line in output.splitlines(): if not re.search('Warning', line.strip()): LOG.error("Mysqldump did not complete successfully.") return False return True class InnoBackupEx(base.BackupRunner): """Implementation of Backup Strategy for InnoBackupEx.""" __strategy_name__ = 'innobackupex' @property def user_and_pass(self): return ('--user=%(user)s --password=%(password)s --host=localhost ' '--socket=%(socket_file)s' % {'user': ADMIN_USER_NAME, 'password': MySqlApp.get_auth_password(), 'socket_file': '/var/run/mysqld/mysqld.sock'}) @property def cmd(self): cmd = ('sudo innobackupex' ' --stream=xbstream' ' %(extra_opts)s ' + self.user_and_pass + ' ' + MySqlApp.get_data_dir() + ' 2>/tmp/innobackupex.log' ) return cmd + self.zip_cmd + self.encrypt_cmd def check_process(self): """Check the output from innobackupex for 'completed OK!'.""" LOG.debug('Checking innobackupex process output.') with open('/tmp/innobackupex.log', 'r') as backup_log: output = backup_log.read() if not output: LOG.error("Innobackupex log file empty.") return False LOG.debug(output) last_line = output.splitlines()[-1].strip() if not re.search('completed OK!', last_line): LOG.error("Innobackupex did not complete successfully.") return False return True def metadata(self): LOG.debug('Getting metadata for backup %s', self.base_filename) meta = {} lsn = re.compile(r"The latest check point \(for incremental\): " r"'(\d+)'") with open('/tmp/innobackupex.log', 'r') as backup_log: output = backup_log.read() match = lsn.search(output) if match: meta = {'lsn': match.group(1)} LOG.info("Metadata for backup %s: %s", self.base_filename, meta) return meta @property def filename(self): return '%s.xbstream' % self.base_filename class InnoBackupExIncremental(InnoBackupEx): """InnoBackupEx incremental backup.""" def __init__(self, *args, **kwargs): if not kwargs.get('lsn'): raise AttributeError(_('lsn attribute missing, bad parent?')) super(InnoBackupExIncremental, self).__init__(*args, **kwargs) self.parent_location = kwargs.get('parent_location') self.parent_checksum = kwargs.get('parent_checksum') @property def cmd(self): cmd = ('sudo innobackupex' ' --stream=xbstream' ' --incremental' ' --incremental-lsn=%(lsn)s' ' %(extra_opts)s ' + self.user_and_pass + ' ' + MySqlApp.get_data_dir() + ' 2>/tmp/innobackupex.log') return cmd + self.zip_cmd + self.encrypt_cmd def metadata(self): _meta = super(InnoBackupExIncremental, self).metadata() _meta.update({ 'parent_location': self.parent_location, 'parent_checksum': self.parent_checksum, }) return _meta ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7681108 trove-12.1.0.dev92/trove/guestagent/strategies/replication/0000755000175000017500000000000000000000000024137 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/__init__.py0000644000175000017500000000376100000000000026257 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) CONF = cfg.CONF __replication_instance = None __replication_manager = None __replication_namespace = None __replication_strategy = None def get_instance(manager): global __replication_instance global __replication_manager global __replication_namespace if not __replication_instance or manager != __replication_manager: replication_strategy = get_strategy(manager) __replication_namespace = CONF.get(manager).replication_namespace replication_strategy_cls = get_strategy_cls( replication_strategy, __replication_namespace) __replication_instance = replication_strategy_cls() __replication_manager = manager LOG.debug('Got replication instance from: %(namespace)s.%(strategy)s', {'namespace': __replication_namespace, 'strategy': __replication_strategy}) return __replication_instance def get_strategy(manager): global __replication_strategy if not __replication_strategy or manager != __replication_manager: __replication_strategy = CONF.get(manager).replication_strategy return __replication_strategy def get_strategy_cls(replication_driver, ns=__name__): return Strategy.get_strategy(replication_driver, ns) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/base.py0000644000175000017500000000510100000000000025420 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import six from trove.common.strategies.strategy import Strategy @six.add_metaclass(abc.ABCMeta) class Replication(Strategy): """Base class for Replication Strategy implementation.""" __strategy_type__ = 'replication' __strategy_ns__ = 'trove.guestagent.strategies.replication' def __init__(self): super(Replication, self).__init__() @abc.abstractmethod def get_master_ref(self, service, snapshot_info): """Get reference to master site for replication strategy.""" def backup_required_for_replication(self): """Indicates whether a backup is required for replication.""" return True @abc.abstractmethod def snapshot_for_replication(self, context, service, location, snapshot_info): """Capture snapshot of master db.""" @abc.abstractmethod def enable_as_master(self, service, master_config): """Configure underlying database to act as master for replication.""" @abc.abstractmethod def enable_as_slave(self, service, snapshot, slave_config): """Configure underlying database as a slave of the given master.""" @abc.abstractmethod def detach_slave(self, service, for_failover): """Turn off replication on a slave site.""" @abc.abstractmethod def cleanup_source_on_replica_detach(self, service, replica_info): """Clean up the source on the detach of a replica.""" @abc.abstractmethod def demote_master(self, service): """Turn off replication on a master site.""" @property def repl_backup_runner(self): """Backup runner to be used to snapshot for replication""" return None @property def repl_incr_backup_runner(self): """Incremental backup runner to be used to snapshot for replication""" return None @property def repl_backup_extra_opts(self): """Extra options to be passed to the backup agent""" return None ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7681108 trove-12.1.0.dev92/trove/guestagent/strategies/replication/experimental/0000755000175000017500000000000000000000000026634 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/experimental/__init__.py0000644000175000017500000000000000000000000030733 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/experimental/mariadb_gtid.py0000644000175000017500000000464000000000000031620 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from trove.common import cfg from trove.guestagent.strategies import backup from trove.guestagent.strategies.replication import mysql_base CONF = cfg.CONF LOG = logging.getLogger(__name__) class MariaDBGTIDReplication(mysql_base.MysqlReplicationBase): """MariaDB Replication coordinated by GTIDs.""" @property def repl_backup_runner(self): return backup.get_backup_strategy( CONF.mariadb.backup_strategy, CONF.mariadb.backup_namespace ) @property def repl_incr_backup_runner(self): strategy = CONF.mariadb.backup_incremental_strategy.get( CONF.mariadb.backup_strategy, CONF.mariadb.backup_strategy ) return backup.get_backup_strategy( strategy, CONF.mariadb.backup_namespace ) @property def repl_backup_extra_opts(self): return CONF.backup_runner_options.get( CONF.mariadb.backup_strategy, '' ) def connect_to_master(self, service, snapshot): logging_config = snapshot['log_position'] LOG.debug("connect_to_master %s", logging_config['replication_user']) change_master_cmd = ( "CHANGE MASTER TO MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_CONNECT_RETRY=15, " "MASTER_USE_GTID=slave_pos" % { 'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'] }) service.execute_on_client(change_master_cmd) service.start_slave() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/experimental/postgresql_impl.py0000644000175000017500000003040600000000000032435 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common.db.postgresql import models from trove.common import exception from trove.common.i18n import _ from trove.common import stream_codecs from trove.common import utils from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.strategies import backup from trove.guestagent.strategies.replication import base AGENT = BackupAgent() CONF = cfg.CONF REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.experimental' \ '.postgresql_impl' LOG = logging.getLogger(__name__) TRIGGER_FILE = '/tmp/postgresql.trigger' REPL_USER = 'replicator' SLAVE_STANDBY_OVERRIDE = 'SlaveStandbyOverride' class PostgresqlReplicationStreaming(base.Replication): def __init__(self, *args, **kwargs): super(PostgresqlReplicationStreaming, self).__init__(*args, **kwargs) @property def repl_backup_runner(self): return backup.get_backup_strategy('PgBaseBackup', REPL_BACKUP_NAMESPACE) @property def repl_incr_backup_runner(self): return backup.get_backup_strategy('PgBaseBackupIncremental', REPL_BACKUP_NAMESPACE) @property def repl_backup_extra_opts(self): return CONF.backup_runner_options.get('PgBaseBackup', '') def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': cfg.get_configuration_property('postgresql_port') } return master_ref def backup_required_for_replication(self): return True def snapshot_for_replication(self, context, service, location, snapshot_info): snapshot_id = snapshot_info['id'] replica_number = snapshot_info.get('replica_number', 1) LOG.debug("Acquiring backup for replica number %d.", replica_number) # Only create a backup if it's the first replica if replica_number == 1: AGENT.execute_backup( context, snapshot_info, runner=self.repl_backup_runner, extra_opts=self.repl_backup_extra_opts, incremental_runner=self.repl_incr_backup_runner) else: LOG.info("Using existing backup created for previous replica.") repl_user_info = self._get_or_create_replication_user(service) log_position = { 'replication_user': repl_user_info } return snapshot_id, log_position def _get_or_create_replication_user(self, service): """There are three scenarios we need to deal with here: - This is a fresh master, with no replicator user created. Generate a new u/p - We are attaching a new slave and need to give it the login creds Send the creds we have stored in PGDATA/.replpass - This is a failed-over-to slave, who will have the replicator user but not the credentials file. Recreate the repl user in this case """ LOG.debug("Checking for replicator user") pwfile = os.path.join(service.pgsql_data_dir, ".replpass") admin = service.build_admin() if admin.user_exists(REPL_USER): if operating_system.exists(pwfile, as_root=True): LOG.debug("Found existing .replpass, returning pw") pw = operating_system.read_file(pwfile, as_root=True) else: LOG.debug("Found user but not .replpass, recreate") u = models.PostgreSQLUser(REPL_USER) admin._drop_user(context=None, user=u) pw = self._create_replication_user(service, admin, pwfile) else: LOG.debug("Found no replicator user, create one") pw = self._create_replication_user(service, admin, pwfile) repl_user_info = { 'name': REPL_USER, 'password': pw } return repl_user_info def _create_replication_user(self, service, admin, pwfile): """Create the replication user. Unfortunately, to be able to run pg_rewind, we need SUPERUSER, not just REPLICATION privilege """ pw = utils.generate_random_password() operating_system.write_file(pwfile, pw, as_root=True) operating_system.chown(pwfile, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True) operating_system.chmod(pwfile, FileMode.SET_USR_RWX(), as_root=True) repl_user = models.PostgreSQLUser(name=REPL_USER, password=pw) admin._create_user(context=None, user=repl_user) admin.alter_user(None, repl_user, True, 'REPLICATION', 'SUPERUSER', 'LOGIN') return pw def enable_as_master(self, service, master_config, for_failover=False): """For a server to be a master in postgres, we need to enable the replication user in pg_hba and ensure that WAL logging is at the appropriate level (use the same settings as backups) """ LOG.debug("Enabling as master, with cfg: %s ", master_config) self._get_or_create_replication_user(service) hba_entry = "host replication replicator 0.0.0.0/0 md5 \n" tmp_hba = '/tmp/pg_hba' operating_system.copy(service.pgsql_hba_config, tmp_hba, force=True, as_root=True) operating_system.chmod(tmp_hba, FileMode.SET_ALL_RWX(), as_root=True) with open(tmp_hba, 'a+') as hba_file: hba_file.write(hba_entry) operating_system.copy(tmp_hba, service.pgsql_hba_config, force=True, as_root=True) operating_system.chmod(service.pgsql_hba_config, FileMode.SET_USR_RWX(), as_root=True) operating_system.remove(tmp_hba, as_root=True) service.reload_configuration() def enable_as_slave(self, service, snapshot, slave_config): """Adds appropriate config options to postgresql.conf, and writes out the recovery.conf file used to set up replication """ LOG.debug("Got slave_config: %s", str(slave_config)) self._write_standby_recovery_file(service, snapshot, sslmode='prefer') self.enable_hot_standby(service) # Ensure the WAL arch is empty before restoring service.recreate_wal_archive_dir() def detach_slave(self, service, for_failover): """Touch trigger file in to disable recovery mode""" LOG.info("Detaching slave, use trigger to disable recovery mode") operating_system.write_file(TRIGGER_FILE, '') operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True) def _wait_for_failover(): """Wait until slave has switched out of recovery mode""" return not service.pg_is_in_recovery() try: utils.poll_until(_wait_for_failover, time_out=120) except exception.PollTimeOut: raise RuntimeError(_("Timeout occurred waiting for slave to exit " "recovery mode")) def cleanup_source_on_replica_detach(self, admin_service, replica_info): pass def _rewind_against_master(self, service): """Call pg_rewind to resync datadir against state of new master We should already have a recovery.conf file in PGDATA """ rconf = operating_system.read_file( service.pgsql_recovery_config, codec=stream_codecs.KeyValueCodec(line_terminator='\n'), as_root=True) conninfo = rconf['primary_conninfo'].strip() # The recovery.conf file we want should already be there, but pg_rewind # will delete it, so copy it out first rec = service.pgsql_recovery_config tmprec = "/tmp/recovery.conf.bak" operating_system.move(rec, tmprec, as_root=True) cmd_full = " ".join(["pg_rewind", '--target-pgdata=' + service.pgsql_data_dir, '--source-server=' + conninfo]) out, err = utils.execute("sudo", "su", "-", service.pgsql_owner, "-c", "%s" % cmd_full, check_exit_code=0) LOG.debug("Got stdout %(out)s and stderr %(err)s from pg_rewind", {'out': str(out), 'err': str(err)}) operating_system.move(tmprec, rec, as_root=True) def demote_master(self, service): """In order to demote a master we need to shutdown the server and call pg_rewind against the new master to enable a proper timeline switch. """ service.stop_db() self._rewind_against_master(service) service.start_db() def connect_to_master(self, service, snapshot): """All that is required in postgresql to connect to a slave is to restart with a recovery.conf file in the data dir, which contains the connection information for the master. """ assert operating_system.exists(service.pgsql_recovery_config, as_root=True) service.restart() def _remove_recovery_file(self, service): operating_system.remove(service.pgsql_recovery_config, as_root=True) def _write_standby_recovery_file(self, service, snapshot, sslmode='prefer'): LOG.info("Snapshot data received: %s", str(snapshot)) logging_config = snapshot['log_position'] conninfo_params = \ {'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'repl_user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'], 'sslmode': sslmode} conninfo = 'host=%(host)s ' \ 'port=%(port)s ' \ 'dbname=os_admin ' \ 'user=%(repl_user)s ' \ 'password=%(password)s ' \ 'sslmode=%(sslmode)s ' % conninfo_params recovery_conf = "standby_mode = 'on'\n" recovery_conf += "primary_conninfo = '" + conninfo + "'\n" recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n" recovery_conf += "recovery_target_timeline='latest'\n" operating_system.write_file(service.pgsql_recovery_config, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True) operating_system.chown(service.pgsql_recovery_config, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True) def enable_hot_standby(self, service): # Only support pg version > 9.6, wal_level set to replica, and # remove parameter "checkpoint_segments". opts = {'hot_standby': 'on', 'wal_level': 'replica', 'wal_log_hints': 'on'} service.configuration_manager.\ apply_system_override(opts, SLAVE_STANDBY_OVERRIDE) def get_replica_context(self, service): LOG.debug("Calling get_replica_context") repl_user_info = self._get_or_create_replication_user(service) log_position = { 'replication_user': repl_user_info } return { 'master': self.get_master_ref(None, None), 'log_position': log_position } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/experimental/redis_sync.py0000644000175000017500000000670100000000000031354 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import netutils from trove.guestagent.strategies.replication import base LOG = logging.getLogger(__name__) class RedisSyncReplication(base.Replication): """Redis Replication strategy.""" __strategy_ns__ = 'trove.guestagent.strategies.replication.experimental' __strategy_name__ = 'RedisSyncReplication' CONF_LABEL_REPLICATION_MASTER = 'replication_master' CONF_LABEL_REPLICATION_SLAVE = 'replication_slave' def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': service.get_port(), 'requirepass': service.get_auth_password(), } return master_ref def backup_required_for_replication(self): LOG.debug('Request for replication backup: no backup required') return False def snapshot_for_replication(self, context, service, location, snapshot_info): return None, None def enable_as_master(self, service, master_config): service.configuration_manager.apply_system_override( master_config, change_id=self.CONF_LABEL_REPLICATION_MASTER) service.restart() def enable_as_slave(self, service, snapshot, slave_config): service.configuration_manager.apply_system_override( slave_config, change_id=self.CONF_LABEL_REPLICATION_SLAVE) master_info = snapshot['master'] master_host = master_info['host'] master_port = master_info['port'] connect_options = {'slaveof': [master_host, master_port]} master_passwd = master_info.get('requirepass') if master_passwd: connect_options['masterauth'] = master_passwd service.admin.config_set('masterauth', master_passwd) else: service.admin.config_set('masterauth', "") service.configuration_manager.apply_system_override( connect_options, change_id=self.CONF_LABEL_REPLICATION_SLAVE) service.admin.set_master(host=master_host, port=master_port) LOG.debug('Enabled as slave.') def detach_slave(self, service, for_failover): service.configuration_manager.remove_system_override( change_id=self.CONF_LABEL_REPLICATION_SLAVE) service.admin.set_master(host=None, port=None) service.admin.config_set('masterauth', "") return None def cleanup_source_on_replica_detach(self, service, replica_info): # Nothing needs to be done to the master when a replica goes away. pass def get_replica_context(self, service): return { 'master': self.get_master_ref(service, None), } def demote_master(self, service): service.configuration_manager.remove_system_override( change_id=self.CONF_LABEL_REPLICATION_MASTER) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/mysql_base.py0000644000175000017500000001354100000000000026654 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import uuid from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common.db.mysql import models from trove.common import utils from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.datastore.mysql.service import MySqlAdmin from trove.guestagent.strategies import backup from trove.guestagent.strategies.replication import base AGENT = BackupAgent() CONF = cfg.CONF REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.mysql_impl' LOG = logging.getLogger(__name__) class MysqlReplicationBase(base.Replication): """Base class for MySql Replication strategies.""" @property def repl_backup_runner(self): return backup.get_backup_strategy('InnoBackupEx', REPL_BACKUP_NAMESPACE) @property def repl_incr_backup_runner(self): return backup.get_backup_strategy('InnoBackupExIncremental', REPL_BACKUP_NAMESPACE) @property def repl_backup_extra_opts(self): return CONF.backup_runner_options.get('InnoBackupEx', '') def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': service.get_port() } return master_ref def _create_replication_user(self): replication_user = None replication_password = utils.generate_random_password(16) mysql_user = None # cache the model as we just want name validation retry_count = 0 while replication_user is None: try: name = 'slave_' + str(uuid.uuid4())[:8] if mysql_user: mysql_user.name = name else: mysql_user = models.MySQLUser( name=name, password=replication_password ) mysql_user.check_create() MySqlAdmin().create_user([mysql_user.serialize()]) LOG.debug("Trying to create replication user " + mysql_user.name) replication_user = { 'name': mysql_user.name, 'password': replication_password } except Exception: retry_count += 1 if retry_count > 5: LOG.error("Replication user retry count exceeded") raise return replication_user def snapshot_for_replication(self, context, service, location, snapshot_info): snapshot_id = snapshot_info['id'] replica_number = snapshot_info.get('replica_number', 1) LOG.debug("Acquiring backup for replica number %d.", replica_number) # Only create a backup if it's the first replica if replica_number == 1: AGENT.execute_backup( context, snapshot_info, runner=self.repl_backup_runner, extra_opts=self.repl_backup_extra_opts, incremental_runner=self.repl_incr_backup_runner) else: LOG.debug("Using existing backup created for previous replica.") LOG.debug("Replication snapshot %(snapshot_id)s used for replica " "number %(replica_number)d.", {'snapshot_id': snapshot_id, 'replica_number': replica_number}) replication_user = self._create_replication_user() service.grant_replication_privilege(replication_user) # With streamed InnobackupEx, the log position is in # the stream and will be decoded by the slave log_position = { 'replication_user': replication_user } return snapshot_id, log_position def enable_as_master(self, service, master_config): if not service.exists_replication_source_overrides(): service.write_replication_source_overrides(master_config) service.restart() @abc.abstractmethod def connect_to_master(self, service, snapshot): """Connects a slave to a master""" def enable_as_slave(self, service, snapshot, slave_config): try: service.write_replication_replica_overrides(slave_config) service.restart() self.connect_to_master(service, snapshot) except Exception: LOG.exception("Exception enabling guest as replica") raise def detach_slave(self, service, for_failover): replica_info = service.stop_slave(for_failover) service.remove_replication_replica_overrides() service.restart() return replica_info def get_replica_context(self, service): replication_user = self._create_replication_user() service.grant_replication_privilege(replication_user) return { 'master': self.get_master_ref(service, None), 'log_position': { 'replication_user': replication_user } } def cleanup_source_on_replica_detach(self, admin_service, replica_info): admin_service.delete_user_by_name(replica_info['replication_user']) def demote_master(self, service): service.remove_replication_source_overrides() service.restart() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/mysql_binlog.py0000644000175000017500000000614500000000000027216 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import csv from oslo_log import log as logging from trove.common import exception from trove.common.i18n import _ from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.strategies.replication import mysql_base AGENT = BackupAgent() LOG = logging.getLogger(__name__) class MysqlBinlogReplication(mysql_base.MysqlReplicationBase): """MySql Replication coordinated by binlog position.""" class UnableToDetermineBinlogPosition(exception.TroveError): message = _("Unable to determine binlog position " "(from file %(binlog_file)s).") def connect_to_master(self, service, snapshot): logging_config = snapshot['log_position'] logging_config.update(self._read_log_position()) change_master_cmd = ( "CHANGE MASTER TO MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_LOG_FILE='%(log_file)s', " "MASTER_LOG_POS=%(log_pos)s, " "MASTER_CONNECT_RETRY=15" % { 'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'], 'log_file': logging_config['log_file'], 'log_pos': logging_config['log_position'] }) service.execute_on_client(change_master_cmd) service.start_slave() def _read_log_position(self): INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir()) LOG.info("Setting read permissions on %s", INFO_FILE) operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True) LOG.info("Reading log position from %s", INFO_FILE) try: with open(INFO_FILE, 'rb') as f: row = next(csv.reader(f, delimiter='\t', skipinitialspace=True)) return { 'log_file': row[0], 'log_position': int(row[1]) } except (IOError, IndexError) as ex: LOG.exception(ex) raise self.UnableToDetermineBinlogPosition( {'binlog_file': INFO_FILE}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/replication/mysql_gtid.py0000644000175000017500000000710500000000000026670 0ustar00coreycorey00000000000000# Copyright 2014 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import encodeutils from trove.common import exception from trove.common.i18n import _ from trove.guestagent.backup.backupagent import BackupAgent from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.strategies.replication import mysql_base AGENT = BackupAgent() LOG = logging.getLogger(__name__) class MysqlGTIDReplication(mysql_base.MysqlReplicationBase): """MySql Replication coordinated by GTIDs.""" class UnableToDetermineLastMasterGTID(exception.TroveError): message = _("Unable to determine last GTID executed on master " "(from file %(binlog_file)s).") def connect_to_master(self, service, snapshot): if 'dataset' in snapshot: # pull the last executed GTID from the master via # the xtrabackup metadata file. If that value is # provided we need to set the gtid_purged variable # before executing the CHANGE MASTER TO command last_gtid = self._read_last_master_gtid() LOG.debug("last_gtid value is %s", last_gtid) # fix ['mysql-bin.000001', '154', '\n'] still existed last_gtid # with '\n' value if last_gtid and len(last_gtid) != 1: set_gtid_cmd = "SET GLOBAL gtid_purged='%s'" % last_gtid LOG.debug("set gtid_purged with %s", set_gtid_cmd) service.execute_on_client(set_gtid_cmd) logging_config = snapshot['log_position'] LOG.debug("connect_to_master %s", logging_config['replication_user']) change_master_cmd = ( "CHANGE MASTER TO MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_AUTO_POSITION=1, " "MASTER_CONNECT_RETRY=15" % { 'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'] }) service.execute_on_client(change_master_cmd) service.start_slave() def _read_last_master_gtid(self): INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir()) LOG.info("Setting read permissions on %s", INFO_FILE) operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True) LOG.info("Reading last master GTID from %s", INFO_FILE) try: with open(INFO_FILE, 'rb') as f: row = f.read().split(b'\t') return encodeutils.safe_decode(row[2]) except (IOError, IndexError) as ex: LOG.exception(ex) raise self.UnableToDetermineLastMasterGTID( {'binlog_file': INFO_FILE}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7681108 trove-12.1.0.dev92/trove/guestagent/strategies/restore/0000755000175000017500000000000000000000000023311 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/__init__.py0000644000175000017500000000160400000000000025423 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from trove.common.strategies.strategy import Strategy LOG = logging.getLogger(__name__) def get_restore_strategy(restore_driver, ns=__name__): LOG.debug("Getting restore strategy: %s.", restore_driver) return Strategy.get_strategy(restore_driver, ns) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/base.py0000644000175000017500000000726000000000000024602 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from eventlet.green import subprocess from oslo_log import log as logging from trove.common import cfg from trove.common.strategies.strategy import Strategy from trove.common import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF CHUNK_SIZE = CONF.backup_chunk_size BACKUP_USE_GZIP = CONF.backup_use_gzip_compression BACKUP_USE_OPENSSL = CONF.backup_use_openssl_encryption BACKUP_DECRYPT_KEY = CONF.backup_aes_cbc_key class RestoreError(Exception): """Error running the Backup Command.""" class RestoreRunner(Strategy): """Base class for Restore Strategy implementations.""" """Restore a database from a previous backup.""" __strategy_type__ = 'restore_runner' __strategy_ns__ = 'trove.guestagent.strategies.restore' # The actual system calls to run the restore and prepare restore_cmd = None # The backup format type restore_type = None # Decryption Parameters is_zipped = BACKUP_USE_GZIP is_encrypted = BACKUP_USE_OPENSSL decrypt_key = BACKUP_DECRYPT_KEY def __init__(self, storage, **kwargs): self.storage = storage self.process = None self.location = kwargs.pop('location') self.checksum = kwargs.pop('checksum') self.restore_location = kwargs.get('restore_location') self.restore_cmd = (self.decrypt_cmd + self.unzip_cmd + (self.base_restore_cmd % kwargs)) super(RestoreRunner, self).__init__() def pre_restore(self): """Hook that is called before the restore command.""" pass def post_restore(self): """Hook that is called after the restore command.""" pass def restore(self): self.pre_restore() content_length = self._run_restore() self.post_restore() return content_length def _run_restore(self): return self._unpack(self.location, self.checksum, self.restore_cmd) def _unpack(self, location, checksum, command): stream = self.storage.load(location, checksum) self.process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE) content_length = 0 for chunk in stream: self.process.stdin.write(chunk) content_length += len(chunk) self.process.stdin.close() utils.raise_if_process_errored(self.process, RestoreError) if not self.check_process(): raise RestoreError LOG.debug("Restored %s bytes from stream.", content_length) return content_length @property def decrypt_cmd(self): if self.is_encrypted: return ('openssl enc -d -aes-256-cbc -salt -pass pass:%s | ' % self.decrypt_key) else: return '' @property def unzip_cmd(self): return 'gzip -d -c | ' if self.is_zipped else '' def check_process(self): """Hook for subclasses to check the restore process for errors.""" return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7681108 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/0000755000175000017500000000000000000000000026006 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/__init__.py0000644000175000017500000000000000000000000030105 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/cassandra_impl.py0000644000175000017500000000500100000000000031334 0ustar00coreycorey00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import service from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class NodetoolSnapshot(base.RestoreRunner): """Implementation of restore using the Nodetool (http://goo.gl/QtXVsM) utility. """ __strategy_name__ = 'nodetoolsnapshot' def __init__(self, storage, **kwargs): self._app = service.CassandraApp() kwargs.update({'restore_location': self._app.cassandra_data_dir}) super(NodetoolSnapshot, self).__init__(storage, **kwargs) def pre_restore(self): """Prepare the data directory for restored files. The directory itself is not included in the backup archive (i.e. the archive is rooted inside the data directory). This is to make sure we can always restore an old backup even if the standard guest agent data directory changes. """ LOG.debug('Initializing a data directory.') operating_system.create_directory( self.restore_location, user=self._app.cassandra_owner, group=self._app.cassandra_owner, force=True, as_root=True) def post_restore(self): """Updated ownership on the restored files. """ LOG.debug('Updating ownership of the restored files.') operating_system.chown( self.restore_location, self._app.cassandra_owner, self._app.cassandra_owner, recursive=True, force=True, as_root=True) @property def base_restore_cmd(self): """Command to extract a backup archive into a given location. Attempt to preserve access modifiers on the archived files. """ return 'sudo tar -xpPf - -C "%(restore_location)s"' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/couchbase_impl.py0000644000175000017500000002252300000000000031341 0ustar00coreycorey00000000000000# Copyright (c) 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import json import os.path import time from oslo_log import log as logging from trove.common import exception from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchbase import service from trove.guestagent.datastore.experimental.couchbase import system from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class CbBackup(base.RestoreRunner): """ Implementation of Restore Strategy for Couchbase. """ __strategy_name__ = 'cbbackup' base_restore_cmd = 'sudo tar xpPf -' def __init__(self, *args, **kwargs): super(CbBackup, self).__init__(*args, **kwargs) def pre_restore(self): try: operating_system.remove(system.COUCHBASE_DUMP_DIR, force=True) except exception.ProcessExecutionError: LOG.exception("Error during pre-restore phase.") raise def post_restore(self): try: # Root enabled for the backup pwd_file = system.COUCHBASE_DUMP_DIR + system.SECRET_KEY if os.path.exists(pwd_file): with open(pwd_file, "r") as f: pw = f.read().rstrip("\n") root = service.CouchbaseRootAccess() root.set_password(pw) # Get current root password root = service.CouchbaseRootAccess() root_pwd = root.get_password() # Iterate through each bucket config buckets_json = system.COUCHBASE_DUMP_DIR + system.BUCKETS_JSON with open(buckets_json, "r") as f: out = f.read() if out == "[]": # No buckets or data to restore. Done. return d = json.loads(out) for i in range(len(d)): bucket_name = d[i]["name"] bucket_type = d[i]["bucketType"] if bucket_type == "membase": bucket_type = "couchbase" ram = int(utils.to_mb(d[i]["quota"]["ram"])) auth_type = d[i]["authType"] password = d[i]["saslPassword"] port = d[i]["proxyPort"] replica_number = d[i]["replicaNumber"] replica_index = 1 if d[i]["replicaIndex"] else 0 threads = d[i]["threadsNumber"] flush = 1 if "flush" in d[i]["controllers"] else 0 # cbrestore requires you to manually create dest buckets create_bucket_cmd = ('curl -X POST -u root:' + root_pwd + ' -d name="' + bucket_name + '"' + ' -d bucketType="' + bucket_type + '"' + ' -d ramQuotaMB="' + str(ram) + '"' + ' -d authType="' + auth_type + '"' + ' -d saslPassword="' + password + '"' + ' -d proxyPort="' + str(port) + '"' + ' -d replicaNumber="' + str(replica_number) + '"' + ' -d replicaIndex="' + str(replica_index) + '"' + ' -d threadsNumber="' + str(threads) + '"' + ' -d flushEnabled="' + str(flush) + '" ' + system.COUCHBASE_REST_API + '/pools/default/buckets') utils.execute_with_timeout(create_bucket_cmd, shell=True, timeout=300) if bucket_type == "memcached": continue # Wait for couchbase (membase) bucket creation to complete # (follows same logic as --wait for couchbase-cli) timeout_in_seconds = 120 start = time.time() bucket_exist = False while ((time.time() - start) <= timeout_in_seconds and not bucket_exist): url = (system.COUCHBASE_REST_API + '/pools/default/buckets/') outfile = system.COUCHBASE_DUMP_DIR + '/buckets.all' utils.execute_with_timeout('curl -u root:' + root_pwd + ' ' + url + ' > ' + outfile, shell=True, timeout=300) with open(outfile, "r") as file: out = file.read() buckets = json.loads(out) for bucket in buckets: if bucket["name"] == bucket_name: bucket_exist = True break if not bucket_exist: time.sleep(2) if not bucket_exist: raise base.RestoreError("Failed to create bucket '%s' " "within %s seconds" % (bucket_name, timeout_in_seconds)) # Query status # (follows same logic as --wait for couchbase-cli) healthy = False while ((time.time() - start) <= timeout_in_seconds): url = (system.COUCHBASE_REST_API + '/pools/default/buckets/' + bucket_name) outfile = system.COUCHBASE_DUMP_DIR + '/' + bucket_name utils.execute_with_timeout('curl -u root:' + root_pwd + ' ' + url + ' > ' + outfile, shell=True, timeout=300) all_node_ready = True with open(outfile, "r") as file: out = file.read() bucket = json.loads(out) for node in bucket["nodes"]: if node["status"] != "healthy": all_node_ready = False break if not all_node_ready: time.sleep(2) else: healthy = True break if not healthy: raise base.RestoreError("Bucket '%s' is created but " "not ready to use within %s " "seconds" % (bucket_name, timeout_in_seconds)) # Restore restore_cmd = ('/opt/couchbase/bin/cbrestore ' + system.COUCHBASE_DUMP_DIR + ' ' + system.COUCHBASE_REST_API + ' --bucket-source=' + bucket_name + ' --bucket-destination=' + bucket_name + ' -u root' + ' -p ' + root_pwd) try: utils.execute_with_timeout(restore_cmd, shell=True, timeout=300) except exception.ProcessExecutionError: # cbrestore fails or hangs at times: # http://www.couchbase.com/issues/browse/MB-10832 # Retrying typically works LOG.exception("cbrestore failed. Retrying...") utils.execute_with_timeout(restore_cmd, shell=True, timeout=300) except exception.ProcessExecutionError as p: LOG.error(p) raise base.RestoreError("Couchbase restore failed.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/couchdb_impl.py0000644000175000017500000000302400000000000031007 0ustar00coreycorey00000000000000# Copyright 2016 IBM Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.couchdb import service from trove.guestagent.strategies.restore import base class CouchDBBackup(base.RestoreRunner): __strategy_name__ = 'couchdbbackup' base_restore_cmd = 'sudo tar xPf -' def __init__(self, *args, **kwargs): self.appStatus = service.CouchDBAppStatus() self.app = service.CouchDBApp(self.appStatus) super(CouchDBBackup, self).__init__(*args, **kwargs) def post_restore(self): """ To restore from backup, all we need to do is untar the compressed database files into the database directory and change its ownership. """ operating_system.chown(service.COUCHDB_LIB_DIR, 'couchdb', 'couchdb', as_root=True) self.app.restart() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/db2_impl.py0000644000175000017500000000624100000000000030053 0ustar00coreycorey00000000000000# Copyright 2016 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common import exception from trove.common import utils from trove.guestagent.datastore.experimental.db2 import service from trove.guestagent.datastore.experimental.db2 import system from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class DB2Backup(base.RestoreRunner): """ Base class implementation of Restore strategy for DB2 """ base_restore_cmd = 'sudo tar xPf -' def __init__(self, *args, **kwargs): super(DB2Backup, self).__init__(*args, **kwargs) self.appStatus = service.DB2AppStatus() self.app = service.DB2App(self.appStatus) self.admin = service.DB2Admin() self.restore_location = system.DB2_BACKUP_DIR def _post_restore(self, restore_command, rollforward_command=None): """ Restore from the directory that we untarred into """ out = "" try: out, err = utils.execute_with_timeout(system.GET_DB_NAMES, shell=True) except exception.ProcessExecutionError: LOG.exception("Couldn't find any databases.") dbNames = out.split() for dbName in dbNames: service.run_command(restore_command % {'dbname': dbName}) if rollforward_command: service.run_command(system.ROLL_FORWARD_DB % {'dbname': dbName}) LOG.info("Cleaning out restore location: %s.", system.DB2_BACKUP_DIR) service.remove_db2_dir(system.DB2_BACKUP_DIR) class DB2OfflineBackup(DB2Backup): """ Implementation of Restore Strategy for full offline backups using the default circular logging """ __strategy_name__ = 'db2offlinebackup' def post_restore(self): self._post_restore(system.RESTORE_OFFLINE_DB) class DB2OnlineBackup(DB2Backup): """ Implementation of restore strategy for full online backups using archived logging. """ __strategy_name__ = 'db2onlinebackup' def post_restore(self): """ Once the databases are restored from a backup, we have to roll forward the logs to the point of where the backup was taken. This brings the database to a state were it can used, otherwise it remains in a BACKUP PENDING state. After roll forwarding the logs, we can delete the archived logs. """ self._post_restore(system.RESTORE_ONLINE_DB, system.ROLL_FORWARD_DB) service.remove_db2_dir(system.DB2_ARCHIVE_LOGS_DIR + '/*') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/mariadb_impl.py0000644000175000017500000001437400000000000031011 0ustar00coreycorey00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os from oslo_log import log as logging from trove.common import cfg from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mariadb import service from trove.guestagent.datastore.mysql_common import service as mysql_service from trove.guestagent.strategies.restore import base from trove.guestagent.strategies.restore import mysql_impl LOG = logging.getLogger(__name__) PREPARE_LOG = '/tmp/innoprepare.log' class MariaBackup(base.RestoreRunner, mysql_impl.MySQLRestoreMixin): __strategy_name__ = 'mariabackup' base_restore_cmd = ('sudo mbstream -x -C %(restore_location)s ' '2>/tmp/xbstream_extract.log') def __init__(self, *args, **kwargs): self._app = None super(MariaBackup, self).__init__(*args, **kwargs) @property def app(self): if self._app is None: self._app = service.MariaDBApp( mysql_service.BaseMySqlAppStatus.get() ) return self._app def pre_restore(self): self.app.stop_db() LOG.debug("Cleaning out restore location: %s.", self.restore_location) operating_system.chmod(self.restore_location, operating_system.FileMode.SET_FULL, as_root=True) utils.clean_out(self.restore_location) def post_restore(self): operating_system.chown(self.restore_location, 'mysql', None, force=True, as_root=True) # When using Mariabackup from versions prior to MariaDB 10.2.10, you # would also have to remove any pre-existing InnoDB redo log files. self._delete_old_binlogs() self.app.start_mysql() LOG.debug("Finished post restore.") def _delete_old_binlogs(self): files = glob.glob(os.path.join(self.restore_location, "ib_logfile*")) for f in files: os.unlink(f) def check_process(self): LOG.debug('Checking return code of mbstream restore process.') return_code = self.process.wait() if return_code != 0: LOG.error('mbstream exited with %s', return_code) return False return True class MariaBackupIncremental(MariaBackup): __strategy_name__ = 'mariabackupincremental' incremental_prep = ('sudo mariabackup --prepare ' '--target-dir=%(restore_location)s ' '%(incremental_args)s ' '2>/tmp/innoprepare.log') def __init__(self, *args, **kwargs): super(MariaBackupIncremental, self).__init__(*args, **kwargs) self.content_length = 0 def _incremental_restore_cmd(self, incremental_dir): """Return a command for a restore with a incremental location.""" args = {'restore_location': incremental_dir} return (self.decrypt_cmd + self.unzip_cmd + (self.base_restore_cmd % args)) def _incremental_prepare_cmd(self, incremental_dir): if incremental_dir is not None: incremental_arg = '--incremental-dir=%s' % incremental_dir else: incremental_arg = '' args = { 'restore_location': self.restore_location, 'incremental_args': incremental_arg, } return self.incremental_prep % args def _incremental_prepare(self, incremental_dir): prepare_cmd = self._incremental_prepare_cmd(incremental_dir) LOG.debug("Running mariabackup prepare: %s.", prepare_cmd) utils.execute(prepare_cmd, shell=True) LOG.debug("mariabackup prepare finished successfully.") def _incremental_restore(self, location, checksum): """Recursively apply backups from all parents. If we are the parent then we restore to the restore_location and we apply the logs to the restore_location only. Otherwise if we are an incremental we restore to a subfolder to prevent stomping on the full restore data. Then we run apply log with the '--incremental-dir' flag """ metadata = self.storage.load_metadata(location, checksum) incremental_dir = None if 'parent_location' in metadata: LOG.info("Restoring parent: %(parent_location)s" " checksum: %(parent_checksum)s.", metadata) parent_location = metadata['parent_location'] parent_checksum = metadata['parent_checksum'] # Restore parents recursively so backup are applied sequentially self._incremental_restore(parent_location, parent_checksum) # for *this* backup set the incremental_dir # just use the checksum for the incremental path as it is # sufficiently unique /var/lib/mysql/ incremental_dir = os.path.join( cfg.get_configuration_property('mount_point'), checksum) operating_system.create_directory(incremental_dir, as_root=True) command = self._incremental_restore_cmd(incremental_dir) else: # The parent (full backup) use the same command from InnobackupEx # super class and do not set an incremental_dir. command = self.restore_cmd self.content_length += self._unpack(location, checksum, command) self._incremental_prepare(incremental_dir) # Delete unpacked incremental backup metadata if incremental_dir: operating_system.remove(incremental_dir, force=True, as_root=True) def _run_restore(self): """Run incremental restore.""" self._incremental_restore(self.location, self.checksum) return self.content_length ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/mongo_impl.py0000644000175000017500000000342500000000000030524 0ustar00coreycorey00000000000000# Copyright (c) 2014 eBay Software Foundation # Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_utils import netutils from trove.common import cfg from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import ( service as mongo_service) from trove.guestagent.strategies.restore import base CONF = cfg.CONF IP = netutils.get_my_ipv4() LARGE_TIMEOUT = 1200 MONGODB_DBPATH = CONF.mongodb.mount_point MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump" class MongoDump(base.RestoreRunner): __strategy_name__ = 'mongodump' base_restore_cmd = 'sudo tar xPf -' def __init__(self, *args, **kwargs): super(MongoDump, self).__init__(*args, **kwargs) self.app = mongo_service.MongoDBApp() def post_restore(self): """ Restore from the directory that we untarred into """ params = self.app.admin_cmd_auth_params() params.append(MONGO_DUMP_DIR) utils.execute_with_timeout('mongorestore', *params, timeout=LARGE_TIMEOUT) operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/postgresql_impl.py0000644000175000017500000001701300000000000031606 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from eventlet.green import subprocess from oslo_log import log as logging from trove.common import cfg from trove.common import stream_codecs from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp from trove.guestagent.strategies.restore import base CONF = cfg.CONF LOG = logging.getLogger(__name__) WAL_ARCHIVE_DIR = CONF.postgresql.wal_archive_location class PgDump(base.RestoreRunner): """Implementation of Restore Strategy for pg_dump.""" __strategy_name__ = 'pg_dump' base_restore_cmd = 'psql -U os_admin' IGNORED_ERROR_PATTERNS = [ re.compile(r'ERROR:\s*role "postgres" already exists'), ] def restore(self): """We are overriding the base class behavior to perform custom error handling. """ self.pre_restore() content_length = self._execute_postgres_restore() self.post_restore() return content_length def _execute_postgres_restore(self): # Postgresql outputs few benign messages into the stderr stream # during a normal restore procedure. # We need to watch for those and avoid raising # an exception in response. # Message 'ERROR: role "postgres" already exists' # is expected and does not pose any problems to the restore operation. stream = self.storage.load(self.location, self.checksum) process = subprocess.Popen(self.restore_cmd, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE) content_length = 0 for chunk in stream: process.stdin.write(chunk) content_length += len(chunk) process.stdin.close() self._handle_errors(process) LOG.info("Restored %s bytes from stream.", content_length) return content_length def _handle_errors(self, process): # Handle messages in the error stream of a given process. # Raise an exception if the stream is not empty and # does not match the expected message sequence. try: err = process.stderr.read() # Empty error stream is always accepted as valid # for future compatibility. if err: for message in err.splitlines(False): if not any(regex.match(message) for regex in self.IGNORED_ERROR_PATTERNS): raise Exception(message) except OSError: pass class PgBaseBackup(base.RestoreRunner): """Implementation of Restore Strategy for pg_basebackup.""" __strategy_name__ = 'pg_basebackup' location = "" base_restore_cmd = '' IGNORED_ERROR_PATTERNS = [ re.compile(r'ERROR:\s*role "postgres" already exists'), ] def __init__(self, *args, **kwargs): self._app = None self.base_restore_cmd = 'sudo -u %s tar xCf %s - ' % ( self.app.pgsql_owner, self.app.pgsql_data_dir ) super(PgBaseBackup, self).__init__(*args, **kwargs) @property def app(self): if self._app is None: self._app = self._build_app() return self._app def _build_app(self): return PgSqlApp() def pre_restore(self): self.app.stop_db() LOG.info("Preparing WAL archive dir") self.app.recreate_wal_archive_dir() datadir = self.app.pgsql_data_dir operating_system.remove(datadir, force=True, recursive=True, as_root=True) operating_system.create_directory(datadir, user=self.app.pgsql_owner, group=self.app.pgsql_owner, force=True, as_root=True) def post_restore(self): operating_system.chmod(self.app.pgsql_data_dir, FileMode.SET_USR_RWX(), as_root=True, recursive=True, force=True) def write_recovery_file(self, restore=False): metadata = self.storage.load_metadata(self.location, self.checksum) recovery_conf = "" recovery_conf += "recovery_target_name = '%s' \n" % metadata['label'] recovery_conf += "recovery_target_timeline = '%s' \n" % 1 if restore: recovery_conf += ("restore_command = '" + self.pgsql_restore_cmd + "'\n") recovery_file = os.path.join(self.app.pgsql_data_dir, 'recovery.conf') operating_system.write_file(recovery_file, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True) operating_system.chown(recovery_file, user=self.app.pgsql_owner, group=self.app.pgsql_owner, as_root=True) class PgBaseBackupIncremental(PgBaseBackup): def __init__(self, *args, **kwargs): super(PgBaseBackupIncremental, self).__init__(*args, **kwargs) self.content_length = 0 self.incr_restore_cmd = 'sudo -u %s tar -xf - -C %s ' % ( self.app.pgsql_owner, WAL_ARCHIVE_DIR ) self.pgsql_restore_cmd = "cp " + WAL_ARCHIVE_DIR + '/%f "%p"' def pre_restore(self): self.app.stop_db() def post_restore(self): self.write_recovery_file(restore=True) def _incremental_restore_cmd(self, incr=False): args = {'restore_location': self.restore_location} cmd = self.base_restore_cmd if incr: cmd = self.incr_restore_cmd return self.decrypt_cmd + self.unzip_cmd + (cmd % args) def _incremental_restore(self, location, checksum): metadata = self.storage.load_metadata(location, checksum) if 'parent_location' in metadata: LOG.info("Found parent at %s", metadata['parent_location']) parent_location = metadata['parent_location'] parent_checksum = metadata['parent_checksum'] self._incremental_restore(parent_location, parent_checksum) cmd = self._incremental_restore_cmd(incr=True) self.content_length += self._unpack(location, checksum, cmd) else: # For the parent base backup, revert to the default restore cmd LOG.info("Recursed back to full backup.") super(PgBaseBackupIncremental, self).pre_restore() cmd = self._incremental_restore_cmd(incr=False) self.content_length += self._unpack(location, checksum, cmd) operating_system.chmod(self.app.pgsql_data_dir, FileMode.SET_USR_RWX(), as_root=True, recursive=True, force=True) def _run_restore(self): self._incremental_restore(self.location, self.checksum) # content-length restored return self.content_length ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/experimental/redis_impl.py0000644000175000017500000000624200000000000030513 0ustar00coreycorey00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.guestagent.datastore.experimental.redis import service from trove.guestagent.datastore.experimental.redis import system from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class RedisBackup(base.RestoreRunner): """Implementation of Restore Strategy for Redis.""" __strategy_name__ = 'redisbackup' CONF_LABEL_AOF_TEMP_OFF = 'restore_aof_temp_off' INFO_PERSISTENCE_SECTION = 'persistence' def __init__(self, storage, **kwargs): self.app = service.RedisApp() self.restore_location = self.app.get_persistence_filepath() self.base_restore_cmd = 'tee %s' % self.restore_location self.aof_set = self.app.is_appendonly_enabled() self.aof_off_cfg = {'appendonly': 'no'} kwargs.update({'restore_location': self.restore_location}) super(RedisBackup, self).__init__(storage, **kwargs) def pre_restore(self): self.app.stop_db() LOG.info("Removing old persistence file: %s.", self.restore_location) operating_system.remove(self.restore_location, force=True, as_root=True) dir = os.path.dirname(self.restore_location) operating_system.create_directory(dir, as_root=True) operating_system.chmod(dir, FileMode.SET_FULL, as_root=True) # IF AOF is set, we need to turn it off temporarily if self.aof_set: self.app.configuration_manager.apply_system_override( self.aof_off_cfg, change_id=self.CONF_LABEL_AOF_TEMP_OFF) def post_restore(self): operating_system.chown(self.restore_location, system.REDIS_OWNER, system.REDIS_OWNER, as_root=True) self.app.start_db() # IF AOF was set, we need to put back the original file if self.aof_set: self.app.admin.wait_until('loading', 0, section=self.INFO_PERSISTENCE_SECTION) self.app.admin.execute('BGREWRITEAOF') self.app.admin.wait_until('aof_rewrite_in_progress', 0, section=self.INFO_PERSISTENCE_SECTION) self.app.stop_db() self.app.configuration_manager.remove_system_override( change_id=self.CONF_LABEL_AOF_TEMP_OFF) self.app.start_db() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/guestagent/strategies/restore/mysql_impl.py0000644000175000017500000003602500000000000026057 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import glob import os import re import tempfile from oslo_log import log as logging import pexpect from trove.common import cfg from trove.common import exception from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode import trove.guestagent.datastore.mysql.service as dbaas from trove.guestagent.strategies.restore import base LOG = logging.getLogger(__name__) class MySQLRestoreMixin(object): """Common utils for restoring MySQL databases.""" RESET_ROOT_RETRY_TIMEOUT = 100 RESET_ROOT_SLEEP_INTERVAL = 10 RESET_ROOT_MYSQL_COMMANDS = ("SET PASSWORD FOR " "'root'@'localhost'='';") # This is a suffix MySQL appends to the file name given in # the '--log-error' startup parameter. _ERROR_LOG_SUFFIX = '.err' _ERROR_MESSAGE_PATTERN = re.compile(b"ERROR") def mysql_is_running(self): try: utils.execute_with_timeout("/usr/bin/mysqladmin", "ping") LOG.debug("MySQL is up and running.") return True except exception.ProcessExecutionError: LOG.debug("MySQL is not running.") return False def mysql_is_not_running(self): try: utils.execute_with_timeout("/usr/bin/pgrep", "mysqld") LOG.debug("MySQL is still running.") return False except exception.ProcessExecutionError: LOG.debug("MySQL is not running.") return True def poll_until_then_raise(self, event, exc): try: utils.poll_until(event, sleep_time=self.RESET_ROOT_SLEEP_INTERVAL, time_out=self.RESET_ROOT_RETRY_TIMEOUT) except exception.PollTimeOut: raise exc def _start_mysqld_safe_with_init_file(self, init_file, err_log_file): # This directory is added and removed by the mysql systemd service # as the database is started and stopped. The restore operation # takes place when the database is stopped, so the directory does # not exist, but it is assumed to exist by the mysqld_safe command # which starts the database. This command used to create this # directory if it didn't exist, but it was changed recently to # simply fail in this case. run_dir = "/var/run/mysqld" if not os.path.exists(run_dir): utils.execute("mkdir", run_dir, run_as_root=True, root_helper="sudo") utils.execute("chown", "mysql:mysql", run_dir, err_log_file.name, init_file.name, run_as_root=True, root_helper="sudo") command_mysql_safe = ("sudo mysqld_safe" " --init-file=%s" " --log-error=%s" % (init_file.name, err_log_file.name)) LOG.debug("Spawning: %s" % command_mysql_safe) child = pexpect.spawn(command_mysql_safe) try: index = child.expect(['Starting mysqld daemon']) if index == 0: LOG.info("Starting MySQL") except pexpect.TIMEOUT: LOG.exception("Got a timeout launching mysqld_safe") finally: # There is a race condition here where we kill mysqld before # the init file been executed. We need to ensure mysqld is up. # # mysqld_safe will start even if init-file statement(s) fail. # We therefore also check for errors in the log file. self.poll_until_then_raise( self.mysql_is_running, base.RestoreError("Reset root password failed:" " mysqld did not start!")) first_err_message = self._find_first_error_message(err_log_file) if first_err_message: raise base.RestoreError("Reset root password failed: %s" % first_err_message) LOG.info("Root password reset successfully.") LOG.debug("Cleaning up the temp mysqld process.") utils.execute_with_timeout("mysqladmin", "-uroot", "--protocol=tcp", "shutdown") LOG.debug("Polling for shutdown to complete.") try: utils.poll_until(self.mysql_is_not_running, sleep_time=self.RESET_ROOT_SLEEP_INTERVAL, time_out=self.RESET_ROOT_RETRY_TIMEOUT) LOG.debug("Database successfully shutdown") except exception.PollTimeOut: LOG.debug("Timeout shutting down database " "- performing killall on mysqld_safe.") utils.execute_with_timeout("killall", "mysqld_safe", root_helper="sudo", run_as_root=True) self.poll_until_then_raise( self.mysql_is_not_running, base.RestoreError("Reset root password failed: " "mysqld did not stop!")) def reset_root_password(self): """Reset the password of the localhost root account used by Trove for initial datastore configuration. """ try: # Do not attempt to delete these files as the 'trove' user. # The process writing into it may have assumed its ownership. # Only owners can delete temporary files (restricted deletion). init_file = tempfile.NamedTemporaryFile(mode='w', delete=False) operating_system.write_file(init_file.name, self.RESET_ROOT_MYSQL_COMMANDS) operating_system.chmod(init_file.name, FileMode.ADD_READ_ALL, as_root=True) err_log_file = tempfile.NamedTemporaryFile( suffix=self._ERROR_LOG_SUFFIX, delete=False) self._start_mysqld_safe_with_init_file(init_file, err_log_file) finally: init_file.close() err_log_file.close() operating_system.remove( init_file.name, force=True, as_root=True) operating_system.remove( err_log_file.name, force=True, as_root=True) def _find_first_error_message(self, fp): if self._is_non_zero_file(fp): return self._find_first_pattern_match( fp, self._ERROR_MESSAGE_PATTERN) return None def _is_non_zero_file(self, fp): file_path = fp.name return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0) def _find_first_pattern_match(self, fp, pattern): for line in fp: if pattern.match(line): return line return None class MySQLDump(base.RestoreRunner, MySQLRestoreMixin): """Implementation of Restore Strategy for MySQLDump.""" __strategy_name__ = 'mysqldump' base_restore_cmd = 'sudo mysql' class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin): """Implementation of Restore Strategy for InnoBackupEx.""" __strategy_name__ = 'innobackupex' base_restore_cmd = ('sudo xbstream -x -C %(restore_location)s' ' 2>/tmp/xbstream_extract.log') base_prepare_cmd = ('sudo innobackupex' ' --defaults-file=%(restore_location)s/backup-my.cnf' ' --ibbackup=xtrabackup' ' --apply-log' ' %(restore_location)s' ' 2>/tmp/innoprepare.log') def __init__(self, *args, **kwargs): self._app = None super(InnoBackupEx, self).__init__(*args, **kwargs) self.prepare_cmd = self.base_prepare_cmd % kwargs self.prep_retcode = None @property def app(self): if self._app is None: self._app = self._build_app() return self._app def _build_app(self): return dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) def pre_restore(self): self.app.stop_db() LOG.debug("Cleaning out restore location: %s.", self.restore_location) operating_system.chmod(self.restore_location, FileMode.SET_FULL, as_root=True) utils.clean_out(self.restore_location) def _run_prepare(self): LOG.info("Running innobackupex prepare: %s.", self.prepare_cmd) self.prep_retcode = utils.execute(self.prepare_cmd, shell=True) LOG.info("Innobackupex prepare finished successfully.") def post_restore(self): self._run_prepare() operating_system.chown(self.restore_location, 'mysql', None, force=True, as_root=True) self._delete_old_binlogs() self.reset_root_password() self.app.start_mysql() def _delete_old_binlogs(self): files = glob.glob(os.path.join(self.restore_location, "ib_logfile*")) for f in files: os.unlink(f) def check_process(self): """Check whether xbstream restore is successful.""" # We first check the restore process exits with 0, however # xbstream has a bug for creating new files: # https://jira.percona.com/browse/PXB-1542 # So we also check the stderr with ignorance of some known # non-error log lines. Currently we only need to ignore: # "encryption: using gcrypt x.x.x" # After PXB-1542 is fixed, we could just check the exit status. LOG.debug('Checking return code of xbstream restore process.') return_code = self.process.wait() if return_code != 0: LOG.error('xbstream exited with %s', return_code) return False LOG.debug('Checking xbstream restore process stderr output.') IGNORE_LINES = [ 'encryption: using gcrypt ', 'sudo: unable to resolve host ', ] with open('/tmp/xbstream_extract.log', 'r') as xbstream_log: for line in xbstream_log: # Ignore empty lines if not line.strip(): continue # Ignore known non-error log lines check_ignorance = [line.startswith(non_err) for non_err in IGNORE_LINES] if any(check_ignorance): continue else: LOG.error('xbstream restore failed with: %s', line.rstrip('\n')) return False return True class InnoBackupExIncremental(InnoBackupEx): __strategy_name__ = 'innobackupexincremental' incremental_prep = ('sudo innobackupex' ' --defaults-file=%(restore_location)s/backup-my.cnf' ' --ibbackup=xtrabackup' ' --apply-log' ' --redo-only' ' %(restore_location)s' ' %(incremental_args)s' ' 2>/tmp/innoprepare.log') def __init__(self, *args, **kwargs): super(InnoBackupExIncremental, self).__init__(*args, **kwargs) self.restore_location = kwargs.get('restore_location') self.content_length = 0 def _incremental_restore_cmd(self, incremental_dir): """Return a command for a restore with a incremental location.""" args = {'restore_location': incremental_dir} return (self.decrypt_cmd + self.unzip_cmd + (self.base_restore_cmd % args)) def _incremental_prepare_cmd(self, incremental_dir): if incremental_dir is not None: incremental_arg = '--incremental-dir=%s' % incremental_dir else: incremental_arg = '' args = { 'restore_location': self.restore_location, 'incremental_args': incremental_arg, } return self.incremental_prep % args def _incremental_prepare(self, incremental_dir): prepare_cmd = self._incremental_prepare_cmd(incremental_dir) LOG.debug("Running innobackupex prepare: %s.", prepare_cmd) utils.execute(prepare_cmd, shell=True) LOG.debug("Innobackupex prepare finished successfully.") def _incremental_restore(self, location, checksum): """Recursively apply backups from all parents. If we are the parent then we restore to the restore_location and we apply the logs to the restore_location only. Otherwise if we are an incremental we restore to a subfolder to prevent stomping on the full restore data. Then we run apply log with the '--incremental-dir' flag """ metadata = self.storage.load_metadata(location, checksum) incremental_dir = None if 'parent_location' in metadata: LOG.info("Restoring parent: %(parent_location)s" " checksum: %(parent_checksum)s.", metadata) parent_location = metadata['parent_location'] parent_checksum = metadata['parent_checksum'] # Restore parents recursively so backup are applied sequentially self._incremental_restore(parent_location, parent_checksum) # for *this* backup set the incremental_dir # just use the checksum for the incremental path as it is # sufficiently unique /var/lib/mysql/ incremental_dir = os.path.join( cfg.get_configuration_property('mount_point'), checksum) operating_system.create_directory(incremental_dir, as_root=True) command = self._incremental_restore_cmd(incremental_dir) else: # The parent (full backup) use the same command from InnobackupEx # super class and do not set an incremental_dir. command = self.restore_cmd self.content_length += self._unpack(location, checksum, command) self._incremental_prepare(incremental_dir) # Delete unpacked incremental backup metadata if incremental_dir: operating_system.remove(incremental_dir, force=True, as_root=True) def _run_restore(self): """Run incremental restore. First grab all parents and prepare them with '--redo-only'. After all backups are restored the super class InnoBackupEx post_restore method is called to do the final prepare with '--apply-log' """ self._incremental_restore(self.location, self.checksum) return self.content_length ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/guestagent/volume.py0000644000175000017500000003433700000000000021347 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import os import shlex import six from tempfile import NamedTemporaryFile import traceback from oslo_log import log as logging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import utils from trove.guestagent.common import operating_system TMP_MOUNT_POINT = "/mnt/volume" LOG = logging.getLogger(__name__) CONF = cfg.CONF # We removed all translation for messages destinated to log file. # However we cannot use _(xxx) instead of _("xxxx") because of the # H701 pep8 checking, so we have to pass different message format # string and format content here. def log_and_raise(log_fmt, exc_fmt, fmt_content=None): if fmt_content is not None: LOG.exception(log_fmt, fmt_content) raise_msg = exc_fmt % fmt_content else: # if fmt_content is not provided, log_fmt and # exc_fmt are just plain string messages LOG.exception(log_fmt) raise_msg = exc_fmt raise_msg += _("\nExc: %s") % traceback.format_exc() raise exception.GuestError(original_message=raise_msg) @six.add_metaclass(abc.ABCMeta) class FSBase(object): def __init__(self, fstype, format_options): self.fstype = fstype self.format_options = format_options @abc.abstractmethod def format(self, device_path, timeout): """ Format device """ @abc.abstractmethod def check_format(self, device_path): """ Check if device is formatted """ @abc.abstractmethod def resize(self, device_path): """ Resize the filesystem on device """ class FSExt(FSBase): def __init__(self, fstype, format_options): super(FSExt, self).__init__(fstype, format_options) def format(self, device_path, timeout): format_options = shlex.split(self.format_options) format_options.append(device_path) try: utils.execute_with_timeout( "mkfs", "--type", self.fstype, *format_options, timeout=timeout, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: log_fmt = "Could not format '%s'." exc_fmt = _("Could not format '%s'.") log_and_raise(log_fmt, exc_fmt, device_path) def check_format(self, device_path): try: stdout, stderr = utils.execute( "dumpe2fs", device_path, run_as_root=True, root_helper="sudo") if 'has_journal' not in stdout: msg = _("Volume '%s' does not appear to be formatted.") % ( device_path) raise exception.GuestError(original_message=msg) except exception.ProcessExecutionError as pe: if 'Wrong magic number' in pe.stderr: volume_fstype = self.fstype log_fmt = "'Device '%(dev)s' did not seem to be '%(type)s'." exc_fmt = _("'Device '%(dev)s' did not seem to be '%(type)s'.") log_and_raise(log_fmt, exc_fmt, {'dev': device_path, 'type': volume_fstype}) log_fmt = "Volume '%s' was not formatted." exc_fmt = _("Volume '%s' was not formatted.") log_and_raise(log_fmt, exc_fmt, device_path) def resize(self, device_path): utils.execute("e2fsck", "-f", "-p", device_path, run_as_root=True, root_helper="sudo") utils.execute("resize2fs", device_path, run_as_root=True, root_helper="sudo") class FSExt3(FSExt): def __init__(self, format_options): super(FSExt3, self).__init__('ext3', format_options) class FSExt4(FSExt): def __init__(self, format_options): super(FSExt4, self).__init__('ext4', format_options) class FSXFS(FSBase): def __init__(self, format_options): super(FSXFS, self).__init__('xfs', format_options) def format(self, device_path, timeout): format_options = shlex.split(self.format_options) format_options.append(device_path) try: utils.execute_with_timeout( "mkfs.xfs", *format_options, timeout=timeout, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: log_fmt = "Could not format '%s'." exc_fmt = _("Could not format '%s'.") log_and_raise(log_fmt, exc_fmt, device_path) def check_format(self, device_path): stdout, stderr = utils.execute( "xfs_admin", "-l", device_path, run_as_root=True, root_helper="sudo") if 'not a valid XFS filesystem' in stdout: msg = _("Volume '%s' does not appear to be formatted.") % ( device_path) raise exception.GuestError(original_message=msg) def resize(self, device_path): utils.execute("xfs_repair", device_path, run_as_root=True, root_helper="sudo") utils.execute("mount", device_path, run_as_root=True, root_helper="sudo") utils.execute("xfs_growfs", device_path, run_as_root=True, root_helper="sudo") utils.execute("umount", device_path, run_as_root=True, root_helper="sudo") def VolumeFs(fstype, format_options=''): supported_fs = { 'xfs': FSXFS, 'ext3': FSExt3, 'ext4': FSExt4 } return supported_fs[fstype](format_options) class VolumeDevice(object): def __init__(self, device_path): self.device_path = device_path self.volume_fs = VolumeFs(CONF.volume_fstype, CONF.format_options) def migrate_data(self, source_dir, target_subdir=None): """Synchronize the data from the source directory to the new volume; optionally to a new sub-directory on the new volume. """ self.mount(TMP_MOUNT_POINT, write_to_fstab=False) if not source_dir[-1] == '/': source_dir = "%s/" % source_dir target_dir = TMP_MOUNT_POINT if target_subdir: target_dir = target_dir + "/" + target_subdir try: utils.execute("rsync", "--safe-links", "--perms", "--recursive", "--owner", "--group", "--xattrs", "--sparse", source_dir, target_dir, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: log_msg = "Could not migrate data." exc_msg = _("Could not migrate date.") log_and_raise(log_msg, exc_msg) self.unmount(TMP_MOUNT_POINT) def _check_device_exists(self): """Check that the device path exists. Verify that the device path has actually been created and can report its size, only then can it be available for formatting, retry num_tries to account for the time lag. """ try: num_tries = CONF.num_tries LOG.debug("Checking if %s exists.", self.device_path) utils.execute("blockdev", "--getsize64", self.device_path, run_as_root=True, root_helper="sudo", attempts=num_tries) except exception.ProcessExecutionError: log_fmt = "Device '%s' is not ready." exc_fmt = _("Device '%s' is not ready.") log_and_raise(log_fmt, exc_fmt, self.device_path) def _check_format(self): """Checks that a volume is formatted.""" LOG.debug("Checking whether '%s' is formatted.", self.device_path) self.volume_fs.check_format(self.device_path) def _format(self): """Calls mkfs to format the device at device_path.""" LOG.debug("Formatting '%s'.", self.device_path) self.volume_fs.format(self.device_path, CONF.volume_format_timeout) def format(self): """Formats the device at device_path and checks the filesystem.""" self._check_device_exists() self._format() self._check_format() def mount(self, mount_point, write_to_fstab=True): """Mounts, and writes to fstab.""" LOG.debug("Will mount %(path)s at %(mount_point)s.", {'path': self.device_path, 'mount_point': mount_point}) mount_point = VolumeMountPoint(self.device_path, mount_point) mount_point.mount() if write_to_fstab: mount_point.write_to_fstab() def _wait_for_mount(self, mount_point, timeout=2): """Wait for a fs to be mounted.""" def wait_for_mount(): return operating_system.is_mount(mount_point) try: utils.poll_until(wait_for_mount, sleep_time=1, time_out=timeout) except exception.PollTimeOut: return False return True def resize_fs(self, mount_point): """Resize the filesystem on the specified device.""" self._check_device_exists() # Some OS's will mount a file systems after it's attached if # an entry is put in the fstab file (like Trove does). # Thus it may be necessary to wait for the mount and then unmount # the fs again (since the volume was just attached). if self._wait_for_mount(mount_point, timeout=2): LOG.debug("Unmounting '%s' before resizing.", mount_point) self.unmount(mount_point) try: self.volume_fs.resize(self.device_path) except exception.ProcessExecutionError: log_fmt = "Error resizing the filesystem with device '%s'." exc_fmt = _("Error resizing the filesystem with device '%s'.") log_and_raise(log_fmt, exc_fmt, self.device_path) def unmount(self, mount_point): if operating_system.is_mount(mount_point): try: utils.execute("umount", mount_point, run_as_root=True, root_helper='sudo') except exception.ProcessExecutionError: log_fmt = "Error unmounting '%s'." exc_fmt = _("Error unmounting '%s'.") log_and_raise(log_fmt, exc_fmt, mount_point) else: LOG.debug("'%s' is not a mounted fs, cannot unmount", mount_point) def unmount_device(self, device_path): # unmount if device is already mounted mount_points = self.mount_points(device_path) for mnt in mount_points: LOG.info("Device '%(device)s' is mounted on " "'%(mount_point)s'. Unmounting now.", {'device': device_path, 'mount_point': mnt}) self.unmount(mnt) def mount_points(self, device_path): """Returns a list of mount points on the specified device.""" stdout, stderr = utils.execute( "grep '^%s ' /etc/mtab" % device_path, shell=True, check_exit_code=[0, 1]) return [entry.strip().split()[1] for entry in stdout.splitlines()] def set_readahead_size(self, readahead_size): """Set the readahead size of disk.""" self._check_device_exists() try: utils.execute("blockdev", "--setra", readahead_size, self.device_path, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: log_fmt = ("Error setting readahead size to %(size)s " "for device %(device)s.") exc_fmt = _("Error setting readahead size to %(size)s " "for device %(device)s.") log_and_raise(log_fmt, exc_fmt, {'size': readahead_size, 'device': self.device_path}) class VolumeMountPoint(object): def __init__(self, device_path, mount_point): self.device_path = device_path self.mount_point = mount_point self.volume_fstype = CONF.volume_fstype self.mount_options = CONF.mount_options def mount(self): if not operating_system.exists(self.mount_point, is_directory=True, as_root=True): operating_system.create_directory(self.mount_point, as_root=True) LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, " "volume_type:{2}, mount options:{3}".format( self.device_path, self.mount_point, self.volume_fstype, self.mount_options)) try: utils.execute("mount", "-t", self.volume_fstype, "-o", self.mount_options, self.device_path, self.mount_point, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: log_fmt = "Could not mount '%s'." exc_fmt = _("Could not mount '%s'.") log_and_raise(log_fmt, exc_fmt, self.mount_point) def write_to_fstab(self): fstab_line = ("%s\t%s\t%s\t%s\t0\t0" % (self.device_path, self.mount_point, self.volume_fstype, self.mount_options)) LOG.debug("Writing new line to fstab:%s", fstab_line) with open('/etc/fstab', "r") as fstab: fstab_content = fstab.read() with NamedTemporaryFile(mode='w', delete=False) as tempfstab: tempfstab.write(fstab_content + fstab_line) try: utils.execute("install", "-o", "root", "-g", "root", "-m", "644", tempfstab.name, "/etc/fstab", run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: log_fmt = "Could not add '%s' to fstab." exc_fmt = _("Could not add '%s' to fstab.") log_and_raise(log_fmt, exc_fmt, self.mount_point) os.remove(tempfstab.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7681108 trove-12.1.0.dev92/trove/hacking/0000755000175000017500000000000000000000000016712 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/hacking/__init__.py0000644000175000017500000000000000000000000021011 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/hacking/checks.py0000644000175000017500000000660600000000000020534 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import pycodestyle from hacking import core _all_log_levels = ( 'critical', 'debug', 'error', 'exception', 'info', 'reserved', 'warning', ) _translated_log = re.compile( r".*LOG\.(%(levels)s)\(\s*_\(\s*('|\")" % { 'levels': '|'.join(_all_log_levels)}) def _translation_is_not_expected(filename): # Do not do these validations on tests return any(pat in filename for pat in ["/tests/"]) @core.flake8ext def check_raised_localized_exceptions(logical_line, filename): """T103 - Untranslated exception message. :param logical_line: The logical line to check. :param filename: The file name where the logical line exists. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if _translation_is_not_expected(filename): return logical_line = logical_line.strip() raised_search = re.compile( r"raise (?:\w*)\((.*)\)").match(logical_line) if raised_search: exception_msg = raised_search.groups()[0] if exception_msg.startswith("\"") or exception_msg.startswith("\'"): msg = "T103: Untranslated exception message." yield (logical_line.index(exception_msg), msg) @core.flake8ext def check_no_basestring(logical_line): """T104 - Don't use basestring, use six.string_types instead basestring is not supported by py3, using six.string_types to ensure py3 and py2 compatibility """ if re.search(r"\, basestring\)", logical_line): msg = ("T104: basestring is not Python3-compatible, use " "six.string_types instead.") yield(0, msg) @core.flake8ext def no_translate_logs(physical_line, logical_line, filename): """T105 - Log messages shouldn't be translated from the Pike release. :param logical_line: The logical line to check. :param physical_line: The physical line to check. :param filename: The file name where the logical line exists. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if _translation_is_not_expected(filename): return if pycodestyle.noqa(physical_line): return msg = "T105: Log message shouldn't be translated." if _translated_log.match(logical_line): yield (0, msg) asse_raises_regexp = re.compile(r"assertRaisesRegexp\(") @core.flake8ext def assert_raises_regexp(logical_line): """Check for usage of deprecated assertRaisesRegexp N335 """ res = asse_raises_regexp.search(logical_line) if res: yield (0, "N335: assertRaisesRegex must be used instead " "of assertRaisesRegexp") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7681108 trove-12.1.0.dev92/trove/instance/0000755000175000017500000000000000000000000017112 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/instance/__init__.py0000644000175000017500000000000000000000000021211 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/instance/models.py0000644000175000017500000023620100000000000020753 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2013-2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes that form the core of instances functionality.""" from datetime import datetime from datetime import timedelta import os.path import re import six from novaclient import exceptions as nova_exceptions from oslo_config.cfg import NoSuchOptError from oslo_log import log as logging from oslo_utils import encodeutils from sqlalchemy import func from trove.backup.models import Backup from trove.common import cfg from trove.common.clients import create_cinder_client from trove.common.clients import create_dns_client from trove.common.clients import create_glance_client from trove.common.clients import create_guest_client from trove.common.clients import create_neutron_client from trove.common.clients import create_nova_client from trove.common import crypto_utils as cu from trove.common import exception from trove.common.i18n import _ from trove.common import instance as tr_instance from trove.common import neutron from trove.common import notification from trove.common import server_group as srv_grp from trove.common import template from trove.common import timeutils from trove.common.trove_remote import create_trove_client from trove.common import utils from trove.configuration.models import Configuration from trove.datastore import models as datastore_models from trove.datastore.models import DatastoreVersionMetadata as dvm from trove.datastore.models import DBDatastoreVersionMetadata from trove.db import get_db_api from trove.db import models as dbmodels from trove.extensions.security_group.models import SecurityGroup from trove.instance.tasks import InstanceTask from trove.instance.tasks import InstanceTasks from trove.module import models as module_models from trove.module import views as module_views from trove.quota.quota import run_with_quotas from trove.taskmanager import api as task_api CONF = cfg.CONF LOG = logging.getLogger(__name__) def filter_ips(ips, white_list_regex, black_list_regex): """Return IPs matching white_list_regex and Filter out IPs matching black_list_regex. """ return [ip for ip in ips if re.search(white_list_regex, ip) and not re.search(black_list_regex, ip)] def load_server(context, instance_id, server_id, region_name): """ Loads a server or raises an exception. :param context: request context used to access nova :param instance_id: the trove instance id corresponding to the nova server (informational only) :param server_id: the compute instance id which will be retrieved from nova :type context: trove.common.context.TroveContext :type instance_id: unicode :type server_id: unicode :rtype: novaclient.v2.servers.Server """ client = create_nova_client(context, region_name=region_name) try: server = client.servers.get(server_id) except nova_exceptions.NotFound: LOG.error("Could not find nova server_id(%s).", server_id) raise exception.ComputeInstanceNotFound(instance_id=instance_id, server_id=server_id) except nova_exceptions.ClientException as e: raise exception.TroveError(str(e)) return server class InstanceStatus(object): HEALTHY = "HEALTHY" ACTIVE = "ACTIVE" BLOCKED = "BLOCKED" BUILD = "BUILD" FAILED = "FAILED" REBOOT = "REBOOT" RESIZE = "RESIZE" BACKUP = "BACKUP" SHUTDOWN = "SHUTDOWN" ERROR = "ERROR" RESTART_REQUIRED = "RESTART_REQUIRED" PROMOTE = "PROMOTE" EJECT = "EJECT" UPGRADE = "UPGRADE" DETACH = "DETACH" def validate_volume_size(size): if size is None: raise exception.VolumeSizeNotSpecified() max_size = CONF.max_accepted_volume_size if int(size) > max_size: msg = ("Volume 'size' cannot exceed maximum " "of %d GB, %s cannot be accepted." % (max_size, size)) raise exception.VolumeQuotaExceeded(msg) def load_simple_instance_server_status(context, db_info): """Loads a server or raises an exception.""" if 'BUILDING' == db_info.task_status.action: db_info.server_status = "BUILD" db_info.addresses = {} else: client = create_nova_client(context, db_info.region_id) try: server = client.servers.get(db_info.compute_instance_id) db_info.server_status = server.status db_info.addresses = server.addresses except nova_exceptions.NotFound: db_info.server_status = "SHUTDOWN" db_info.addresses = {} # Invalid states to contact the agent AGENT_INVALID_STATUSES = ["BUILD", "REBOOT", "RESIZE", "PROMOTE", "EJECT", "UPGRADE"] class SimpleInstance(object): """A simple view of an instance. This gets loaded directly from the local database, so its cheaper than creating the fully loaded Instance. As the name implies this class knows nothing of the underlying Nova Compute Instance (i.e. server) ----------- | | | i | | t n | | r s --------------------- | o t | datastore/guest | | v a --------------------- | e n | | c | | e | | | ----------- """ def __init__(self, context, db_info, datastore_status, root_password=None, ds_version=None, ds=None, locality=None): """ :type context: trove.common.context.TroveContext :type db_info: trove.instance.models.DBInstance :type datastore_status: trove.instance.models.InstanceServiceStatus :type root_password: str """ self.context = context self.db_info = db_info self.datastore_status = datastore_status self.root_pass = root_password self._fault = None self._fault_loaded = False if ds_version is None: self.ds_version = (datastore_models.DatastoreVersion. load_by_uuid(self.db_info.datastore_version_id)) if ds is None: self.ds = (datastore_models.Datastore. load(self.ds_version.datastore_id)) self.locality = locality self.slave_list = None def __repr__(self, *args, **kwargs): return "%s(%s)" % (self.name, self.id) @property def addresses(self): # TODO(tim.simpson): This code attaches two parts of the Nova server to # db_info: "status" and "addresses". The idea # originally was to listen to events to update this # data and store it in the Trove database. # However, it may have been unwise as a year and a # half later we still have to load the server anyway # and this makes the code confusing. if hasattr(self.db_info, 'addresses'): return self.db_info.addresses else: return None @property def created(self): return self.db_info.created @property def dns_ip_address(self): """Returns the IP address to be used with DNS.""" ips = self.get_visible_ip_addresses() if ips: return ips[0] @property def flavor_id(self): # Flavor ID is a str in the 1.0 API. return str(self.db_info.flavor_id) @property def hostname(self): return self.db_info.hostname def get_visible_ip_addresses(self): """Returns IPs that will be visible to the user.""" if self.addresses is None: return None IPs = [] mgmt_networks = neutron.get_management_networks(self.context) for label in self.addresses: if label in mgmt_networks: continue if (CONF.network_label_regex and not re.search(CONF.network_label_regex, label)): continue IPs.extend([addr.get('addr') for addr in self.addresses[label]]) # Includes ip addresses that match the regexp pattern if CONF.ip_regex and CONF.black_list_regex: IPs = filter_ips(IPs, CONF.ip_regex, CONF.black_list_regex) return IPs @property def id(self): return self.db_info.id @property def type(self): return self.db_info.type @property def tenant_id(self): return self.db_info.tenant_id @property def is_building(self): return self.status in [InstanceStatus.BUILD] @property def is_error(self): return self.status in [InstanceStatus.ERROR] @property def is_datastore_running(self): """True if the service status indicates datastore is up and running.""" return self.datastore_status.status in MYSQL_RESPONSIVE_STATUSES def datastore_status_matches(self, service_status): return self.datastore_status.status == service_status @property def name(self): return self.db_info.name @property def server_id(self): return self.db_info.compute_instance_id @property def slave_of_id(self): return self.db_info.slave_of_id @property def datastore_status(self): """ Returns the Service Status for this instance. For example, the status of the mysql datastore which is running on the server...not the server status itself. :return: the current status of the datastore :rtype: trove.instance.models.InstanceServiceStatus """ return self.__datastore_status @datastore_status.setter def datastore_status(self, datastore_status): if datastore_status and not isinstance(datastore_status, InstanceServiceStatus): raise ValueError(_("datastore_status must be of type " "InstanceServiceStatus. Got %s instead.") % datastore_status.__class__.__name__) self.__datastore_status = datastore_status @property def status(self): # Check for taskmanager errors. if self.db_info.task_status.is_error: return InstanceStatus.ERROR action = self.db_info.task_status.action # Check if we are resetting status or force deleting if (tr_instance.ServiceStatuses.UNKNOWN == self.datastore_status.status and action == InstanceTasks.DELETING.action): return InstanceStatus.SHUTDOWN elif (tr_instance.ServiceStatuses.UNKNOWN == self.datastore_status.status): return InstanceStatus.ERROR # Check for taskmanager status. if 'BUILDING' == action: if 'ERROR' == self.db_info.server_status: return InstanceStatus.ERROR return InstanceStatus.BUILD if 'REBOOTING' == action: return InstanceStatus.REBOOT if 'RESIZING' == action: return InstanceStatus.RESIZE if 'UPGRADING' == action: return InstanceStatus.UPGRADE if 'RESTART_REQUIRED' == action: return InstanceStatus.RESTART_REQUIRED if InstanceTasks.PROMOTING.action == action: return InstanceStatus.PROMOTE if InstanceTasks.EJECTING.action == action: return InstanceStatus.EJECT if InstanceTasks.LOGGING.action == action: return InstanceStatus.LOGGING if InstanceTasks.DETACHING.action == action: return InstanceStatus.DETACH # Check for server status. if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT", "RESIZE"]: return self.db_info.server_status # As far as Trove is concerned, Nova instances in VERIFY_RESIZE should # still appear as though they are in RESIZE. if self.db_info.server_status in ["VERIFY_RESIZE"]: return InstanceStatus.RESIZE # Check if there is a backup running for this instance if Backup.running(self.id): return InstanceStatus.BACKUP # Report as Shutdown while deleting, unless there's an error. if 'DELETING' == action: if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED", "HEALTHY"]: return InstanceStatus.SHUTDOWN else: LOG.error("While shutting down instance (%(instance)s): " "server had status (%(status)s).", {'instance': self.id, 'status': self.db_info.server_status}) return InstanceStatus.ERROR # Check against the service status. # The service is only paused during a reboot. if tr_instance.ServiceStatuses.PAUSED == self.datastore_status.status: return InstanceStatus.REBOOT # If the service status is NEW, then we are building. if tr_instance.ServiceStatuses.NEW == self.datastore_status.status: return InstanceStatus.BUILD # For everything else we can look at the service status mapping. return self.datastore_status.status.api_status @property def updated(self): return self.db_info.updated @property def service_status_updated(self): return self.datastore_status.updated_at @property def volume_id(self): return self.db_info.volume_id @property def volume_size(self): return self.db_info.volume_size @property def datastore_version(self): return self.ds_version @property def datastore(self): return self.ds @property def volume_support(self): return CONF.get(self.datastore_version.manager).volume_support @property def device_path(self): return CONF.get(self.datastore_version.manager).device_path @property def root_password(self): return self.root_pass @property def fault(self): # Fault can be non-existent, so we have a loaded flag if not self._fault_loaded: try: self._fault = DBInstanceFault.find_by(instance_id=self.id) # Get rid of the stack trace if we're not admin if not self.context.is_admin: self._fault.details = None except exception.ModelNotFoundError: pass self._fault_loaded = True return self._fault @property def configuration(self): if self.db_info.configuration_id is not None: return Configuration.load(self.context, self.db_info.configuration_id) @property def slaves(self): if self.slave_list is None: self.slave_list = DBInstance.find_all(tenant_id=self.tenant_id, slave_of_id=self.id, deleted=False).all() return self.slave_list @property def cluster_id(self): return self.db_info.cluster_id @property def shard_id(self): return self.db_info.shard_id @property def region_name(self): return self.db_info.region_id @property def encrypted_rpc_messaging(self): return True if self.db_info.encrypted_key is not None else False class DetailInstance(SimpleInstance): """A detailed view of an Instance. This loads a SimpleInstance and then adds additional data for the instance from the guest. """ def __init__(self, context, db_info, datastore_status): super(DetailInstance, self).__init__(context, db_info, datastore_status) self._volume_used = None self._volume_total = None @property def volume_used(self): return self._volume_used @volume_used.setter def volume_used(self, value): self._volume_used = value @property def volume_total(self): return self._volume_total @volume_total.setter def volume_total(self, value): self._volume_total = value def get_db_info(context, id, cluster_id=None, include_deleted=False): """ Retrieves an instance of the managed datastore from the persisted storage based on the ID and Context :param context: the context which owns the instance :type context: trove.common.context.TroveContext :param id: the unique ID of the instance :type id: unicode or str :param cluster_id: the unique ID of the cluster :type cluster_id: unicode or str :return: a record of the instance as its state exists in persisted storage :rtype: trove.instance.models.DBInstance """ if context is None: raise TypeError(_("Argument context not defined.")) elif id is None: raise TypeError(_("Argument id not defined.")) args = {'id': id} if cluster_id is not None: args['cluster_id'] = cluster_id if not include_deleted: args['deleted'] = False try: db_info = DBInstance.find_by(context=context, **args) except exception.NotFound: raise exception.NotFound(uuid=id) return db_info def load_any_instance(context, id, load_server=True): # Try to load an instance with a server. # If that fails, try to load it without the server. try: return load_instance(BuiltInstance, context, id, needs_server=load_server) except exception.UnprocessableEntity: LOG.warning("Could not load instance %s.", id) return load_instance(FreshInstance, context, id, needs_server=False) def load_instance(cls, context, id, needs_server=False, include_deleted=False): db_info = get_db_info(context, id, include_deleted=include_deleted) if not needs_server: # TODO(tim.simpson): When we have notifications this won't be # necessary and instead we'll just use the server_status field from # the instance table. load_simple_instance_server_status(context, db_info) server = None else: try: server = load_server(context, db_info.id, db_info.compute_instance_id, region_name=db_info.region_id) # TODO(tim.simpson): Remove this hack when we have notifications! db_info.server_status = server.status db_info.addresses = server.addresses except exception.ComputeInstanceNotFound: LOG.error("Could not load compute instance %s.", db_info.compute_instance_id) raise exception.UnprocessableEntity("Instance %s is not ready." % id) service_status = InstanceServiceStatus.find_by(instance_id=id) LOG.debug("Instance %(instance_id)s service status is %(service_status)s.", {'instance_id': id, 'service_status': service_status.status}) return cls(context, db_info, server, service_status) def load_instance_with_info(cls, context, id, cluster_id=None): db_info = get_db_info(context, id, cluster_id) load_simple_instance_server_status(context, db_info) service_status = InstanceServiceStatus.find_by(instance_id=id) LOG.debug("Instance %(instance_id)s service status is %(service_status)s.", {'instance_id': id, 'service_status': service_status.status}) instance = cls(context, db_info, service_status) load_guest_info(instance, context, id) load_server_group_info(instance, context) return instance def load_guest_info(instance, context, id): if instance.status not in AGENT_INVALID_STATUSES: guest = create_guest_client(context, id) try: volume_info = guest.get_volume_info() instance.volume_used = volume_info['used'] instance.volume_total = volume_info['total'] except Exception as e: LOG.exception(e) return instance def load_server_group_info(instance, context): instance_id = instance.slave_of_id if instance.slave_of_id else instance.id server_group = srv_grp.ServerGroup.load(context, instance_id) if server_group: instance.locality = srv_grp.ServerGroup.get_locality(server_group) class BaseInstance(SimpleInstance): """Represents an instance. ----------- | | | i --------------------- | t n | compute instance | | r s --------------------- | o t | | v a | | e n --------------------- | c | datastore/guest | | e --------------------- | | ----------- """ def __init__(self, context, db_info, server, datastore_status): """ Creates a new initialized representation of an instance composed of its state in the database and its state from Nova :param context: the request context which contains the tenant that owns this instance :param db_info: the current state of this instance as it exists in the db :param server: the current state of this instance as it exists in the Nova :param datastore_status: the current state of the datastore on this instance at it exists in the db :type context: trove.common.context.TroveContext :type db_info: trove.instance.models.DBInstance :type server: novaclient.v2.servers.Server :typdatastore_statusus: trove.instance.models.InstanceServiceStatus """ super(BaseInstance, self).__init__(context, db_info, datastore_status) self.server = server self._guest = None self._nova_client = None self._volume_client = None self._neutron_client = None self._server_group = None self._server_group_loaded = False def get_guest(self): return create_guest_client(self.context, self.db_info.id) def delete(self): def _delete_resources(): if self.is_building: raise exception.UnprocessableEntity( "Instance %s is not ready. (Status is %s)." % (self.id, self.status)) LOG.debug("Deleting instance with compute id = %s.", self.db_info.compute_instance_id) from trove.cluster.models import is_cluster_deleting if (self.db_info.cluster_id is not None and not is_cluster_deleting(self.context, self.db_info.cluster_id)): raise exception.ClusterInstanceOperationNotSupported() if self.slaves: LOG.warning("Detach replicas before deleting replica source.") raise exception.ReplicaSourceDeleteForbidden( _("Detach replicas before deleting replica source.")) self.update_db(task_status=InstanceTasks.DELETING, configuration_id=None) task_api.API(self.context).delete_instance(self.id) deltas = {'instances': -1} if self.volume_support: deltas['volumes'] = -self.volume_size return run_with_quotas(self.tenant_id, deltas, _delete_resources) def server_status_matches(self, expected_status, server=None): if not server: server = self.server return server.status.upper() in ( status.upper() for status in expected_status) def _delete_resources(self, deleted_at): """Delete the openstack resources related to an instance. Deleting the instance should not break or raise exceptions because the end users want their instances to be deleted anyway. Cloud operator should consider the way to clean up orphan resources afterwards, e.g. using the naming convention. """ LOG.info("Starting to delete resources for instance %s", self.id) old_server = None if self.server_id: # Stop db try: old_server = self.nova_client.servers.get(self.server_id) # The server may have already been marked as 'SHUTDOWN' # but check for 'ACTIVE' in case of any race condition # We specifically don't want to attempt to stop db if # the server is in 'ERROR' or 'FAILED" state, as it will # result in a long timeout if self.server_status_matches(['ACTIVE', 'SHUTDOWN'], server=self): LOG.debug("Stopping datastore on instance %s before " "deleting any resources.", self.id) self.guest.stop_db() except Exception as e: LOG.warning("Failed to stop the database before attempting " "to delete trove instance %s, error: %s", self.id, six.text_type(e)) # Nova VM if old_server: try: LOG.info("Deleting server for instance %s", self.id) self.server.delete() except Exception as e: LOG.warning("Failed to delete compute server %s", self.server_id, six.text_type(e)) # Neutron ports (floating IP) try: ret = self.neutron_client.list_ports(name='trove-%s' % self.id) ports = ret.get("ports", []) for port in ports: LOG.info("Deleting port %s for instance %s", port["id"], self.id) neutron.delete_port(self.neutron_client, port["id"]) except Exception as e: LOG.warning("Failed to delete ports for instance %s, " "error: %s", self.id, six.text_type(e)) # Neutron security groups try: name = "%s-%s" % (CONF.trove_security_group_name_prefix, self.id) ret = self.neutron_client.list_security_groups(name=name) sgs = ret.get("security_groups", []) for sg in sgs: LOG.info("Deleting security group %s for instance %s", sg["id"], self.id) self.neutron_client.delete_security_group(sg["id"]) except Exception as e: LOG.warning("Failed to delete security groups for instance %s, " "error: %s", self.id, six.text_type(e)) # DNS resources, e.g. Designate try: dns_support = CONF.trove_dns_support if dns_support: dns_api = create_dns_client(self.context) dns_api.delete_instance_entry(instance_id=self.id) except Exception as e: LOG.warning("Failed to delete dns entry of instance %s, error: %s", self.id, six.text_type(e)) # Nova server group try: srv_grp.ServerGroup.delete(self.context, self.server_group) except Exception as e: LOG.warning("Failed to delete server group for %s, error: %s", self.id, six.text_type(e)) def server_is_finished(): try: server = self.nova_client.servers.get(self.server_id) if not self.server_status_matches(['SHUTDOWN', 'ACTIVE'], server=server): LOG.warning("Server %(vm_id)s entered ERROR status " "when deleting instance %(instance_id)s!", {'vm_id': self.server_id, 'instance_id': self.id}) return False except nova_exceptions.NotFound: return True if old_server: try: LOG.info("Waiting for compute server %s removal for " "instance %s", self.server_id, self.id) utils.poll_until(server_is_finished, sleep_time=2, time_out=CONF.server_delete_time_out) except exception.PollTimeOut: LOG.warning("Failed to delete instance %(instance_id)s: " "Timeout deleting compute server %(vm_id)s", {'instance_id': self.id, 'vm_id': self.server_id}) # If volume has been resized it must be manually removed try: if self.volume_id: volume = self.volume_client.volumes.get(self.volume_id) if volume.status in ["available", "error"]: LOG.info("Deleting volume %s for instance %s", self.volume_id, self.id) volume.delete() except Exception as e: LOG.warning("Failed to delete volume for instance %s, error: %s", self.id, six.text_type(e)) notification.TroveInstanceDelete( instance=self, deleted_at=timeutils.isotime(deleted_at), server=old_server ).notify() LOG.info("Finished to delete resources for instance %s", self.id) def delete_async(self): deleted_at = timeutils.utcnow() self._delete_resources(deleted_at) LOG.debug("Setting instance %s to be deleted.", self.id) self.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.NONE) self.set_servicestatus_deleted() self.set_instance_fault_deleted() if CONF.trove_security_groups_support: # Delete associated security group for backward compatibility SecurityGroup.delete_for_instance(self.db_info.id, self.context, self.db_info.region_id) @property def guest(self): if not self._guest: self._guest = self.get_guest() return self._guest @property def nova_client(self): if not self._nova_client: self._nova_client = create_nova_client( self.context, region_name=self.db_info.region_id) return self._nova_client def update_db(self, **values): self.db_info = DBInstance.find_by(id=self.id, deleted=False) for key in values: setattr(self.db_info, key, values[key]) self.db_info.save() def set_servicestatus_deleted(self): del_instance = InstanceServiceStatus.find_by(instance_id=self.id) del_instance.set_status(tr_instance.ServiceStatuses.DELETED) del_instance.save() def set_instance_fault_deleted(self): try: del_fault = DBInstanceFault.find_by(instance_id=self.id) del_fault.deleted = True del_fault.deleted_at = datetime.utcnow() del_fault.save() except exception.ModelNotFoundError: pass @property def volume_client(self): if not self._volume_client: self._volume_client = create_cinder_client( self.context, region_name=self.db_info.region_id) return self._volume_client @property def neutron_client(self): if not self._neutron_client: self._neutron_client = create_neutron_client( self.context, region_name=self.db_info.region_id) return self._neutron_client def reset_task_status(self): self.update_db(task_status=InstanceTasks.NONE) @property def server_group(self): # The server group could be empty, so we need a flag to cache it if not self._server_group_loaded: self._server_group = srv_grp.ServerGroup.load(self.context, self.id) self._server_group_loaded = True return self._server_group def get_injected_files(self, datastore_manager): injected_config_location = CONF.get('injected_config_location') guest_info = CONF.get('guest_info') if ('/' in guest_info): # Set guest_info_file to exactly guest_info from the conf file. # This should be /etc/guest_info for pre-Kilo compatibility. guest_info_file = guest_info else: guest_info_file = os.path.join(injected_config_location, guest_info) files = {guest_info_file: ( "[DEFAULT]\n" "guest_id=%s\n" "datastore_manager=%s\n" "tenant_id=%s\n" % (self.id, datastore_manager, self.tenant_id))} instance_key = get_instance_encryption_key(self.id) if instance_key: files = { guest_info_file: ("%sinstance_rpc_encr_key=%s\n" % (files.get(guest_info_file), instance_key)) } if os.path.isfile(CONF.get('guest_config')): with open(CONF.get('guest_config'), "r") as f: files[os.path.join(injected_config_location, "trove-guestagent.conf")] = f.read() return files def reset_status(self): LOG.info("Resetting the status to ERROR on instance %s.", self.id) self.reset_task_status() reset_instance = InstanceServiceStatus.find_by(instance_id=self.id) reset_instance.set_status(tr_instance.ServiceStatuses.UNKNOWN) reset_instance.save() class FreshInstance(BaseInstance): @classmethod def load(cls, context, id): return load_instance(cls, context, id, needs_server=False) class BuiltInstance(BaseInstance): @classmethod def load(cls, context, id, needs_server=True): return load_instance(cls, context, id, needs_server=needs_server) class Instance(BuiltInstance): """Represents an instance. The life span of this object should be limited. Do not store them or pass them between threads. """ @classmethod def get_root_on_create(cls, datastore_manager): try: root_on_create = CONF.get(datastore_manager).root_on_create return root_on_create except NoSuchOptError: LOG.debug("root_on_create not configured for %s," " hence defaulting the value to False.", datastore_manager) return False @classmethod def _validate_remote_datastore(cls, context, region_name, flavor, datastore, datastore_version): remote_nova_client = create_nova_client(context, region_name=region_name) try: remote_flavor = remote_nova_client.flavors.get(flavor.id) if (flavor.ram != remote_flavor.ram or flavor.vcpus != remote_flavor.vcpus): raise exception.TroveError( "Flavors differ between regions" " %(local)s and %(remote)s." % {'local': CONF.service_credentials.region_name, 'remote': region_name} ) except nova_exceptions.NotFound: raise exception.TroveError( "Flavors %(flavor)s not found in region %(remote)s." % {'flavor': flavor.id, 'remote': region_name}) remote_trove_client = create_trove_client( context, region_name=region_name) try: remote_ds_ver = remote_trove_client.datastore_versions.get( datastore.name, datastore_version.name) if datastore_version.name != remote_ds_ver.name: raise exception.TroveError( "Datastore versions differ between regions " "%(local)s and %(remote)s." % {'local': CONF.service_credentials.region_name, 'remote': region_name} ) except exception.NotFound: raise exception.TroveError( "Datastore Version %(dsv)s not found in region %(remote)s." % {'dsv': datastore_version.name, 'remote': region_name}) glance_client = create_glance_client(context) local_image = glance_client.images.get(datastore_version.image) remote_glance_client = create_glance_client( context, region_name=region_name) remote_image = remote_glance_client.images.get( remote_ds_ver.image) if local_image.checksum != remote_image.checksum: raise exception.TroveError( "Images for Datastore %(ds)s do not match " "between regions %(local)s and %(remote)s." % {'ds': datastore.name, 'local': CONF.service_credentials.region_name, 'remote': region_name}) @classmethod def create(cls, context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone=None, nics=None, configuration_id=None, slave_of_id=None, cluster_config=None, replica_count=None, volume_type=None, modules=None, locality=None, region_name=None, access=None): region_name = region_name or CONF.service_credentials.region_name call_args = { 'name': name, 'flavor_id': flavor_id, 'datastore': datastore.name if datastore else None, 'datastore_version': datastore_version.name, 'image_id': image_id, 'availability_zone': availability_zone, 'region_name': region_name, } # All nova flavors are permitted for a datastore-version unless one # or more entries are found in datastore_version_metadata, # in which case only those are permitted. bound_flavors = DBDatastoreVersionMetadata.find_all( datastore_version_id=datastore_version.id, key='flavor', deleted=False ) if bound_flavors.count() > 0: valid_flavors = tuple(f.value for f in bound_flavors) if flavor_id not in valid_flavors: raise exception.DatastoreFlavorAssociationNotFound( datastore=datastore.name, datastore_version=datastore_version.name, flavor_id=flavor_id) datastore_cfg = CONF.get(datastore_version.manager) client = create_nova_client(context) try: flavor = client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) # If a different region is specified for the instance, ensure # that the flavor and image are the same in both regions if region_name and region_name != CONF.service_credentials.region_name: cls._validate_remote_datastore(context, region_name, flavor, datastore, datastore_version) deltas = {'instances': 1} volume_support = datastore_cfg.volume_support if volume_support: call_args['volume_type'] = volume_type dvm.validate_volume_type(context, volume_type, datastore.name, datastore_version.name) call_args['volume_size'] = volume_size validate_volume_size(volume_size) deltas['volumes'] = volume_size # Instance volume should have enough space for the backup # Backup, and volume sizes are in GBs target_size = volume_size else: target_size = flavor.disk # local_storage if volume_size is not None: raise exception.VolumeNotSupported() if datastore_cfg.device_path: if flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) target_size = flavor.ephemeral # ephemeral_Storage if backup_id: Backup.verify_swift_auth_token(context) call_args['backup_id'] = backup_id backup_info = Backup.get_by_id(context, backup_id) if not backup_info.is_done_successfuly: raise exception.BackupNotCompleteError( backup_id=backup_id, state=backup_info.state) if backup_info.size > target_size: raise exception.BackupTooLarge( backup_size=backup_info.size, disk_size=target_size) if not backup_info.check_swift_object_exist( context, verify_checksum=CONF.verify_swift_checksum_on_restore): raise exception.BackupFileNotFound( location=backup_info.location) if (backup_info.datastore_version_id and backup_info.datastore.name != datastore.name): raise exception.BackupDatastoreMismatchError( datastore1=backup_info.datastore.name, datastore2=datastore.name) if slave_of_id: Backup.verify_swift_auth_token(context) if databases or users: raise exception.ReplicaCreateWithUsersDatabasesError() call_args['replica_of'] = slave_of_id call_args['replica_count'] = replica_count replication_support = datastore_cfg.replication_strategy if not replication_support: raise exception.ReplicationNotSupported( datastore=datastore.name) try: # looking for replica source replica_source = DBInstance.find_by( context, id=slave_of_id, deleted=False) if replica_source.slave_of_id: raise exception.Forbidden( _("Cannot create a replica of a replica %(id)s.") % {'id': slave_of_id}) if (CONF.verify_replica_volume_size and replica_source.volume_size > volume_size): raise exception.Forbidden( _("Replica volume size should not be smaller than" " master's, replica volume size: %(replica_size)s" " and master volume size: %(master_size)s.") % {'replica_size': volume_size, 'master_size': replica_source.volume_size}) # load the replica source status to check if # source is available load_simple_instance_server_status( context, replica_source) replica_source_instance = Instance( context, replica_source, None, InstanceServiceStatus.find_by( context, instance_id=slave_of_id)) replica_source_instance.validate_can_perform_action() except exception.ModelNotFoundError: LOG.exception( "Cannot create a replica of %(id)s " "as that instance could not be found.", {'id': slave_of_id}) raise exception.NotFound(uuid=slave_of_id) elif replica_count and replica_count != 1: raise exception.Forbidden(_( "Replica count only valid when creating replicas. Cannot " "create %(count)d instances.") % {'count': replica_count}) multi_replica = slave_of_id and replica_count and replica_count > 1 instance_count = replica_count if multi_replica else 1 if locality: call_args['locality'] = locality if not nics: nics = [] if CONF.management_networks: # Make sure management network interface is always configured after # user defined instance. nics = nics + [{"net-id": net_id} for net_id in CONF.management_networks] if nics: call_args['nics'] = nics if cluster_config: call_args['cluster_id'] = cluster_config.get("id", None) if not modules: modules = [] module_ids = [mod['id'] for mod in modules] modules = module_models.Modules.load_by_ids(context, module_ids) auto_apply_modules = module_models.Modules.load_auto_apply( context, datastore.id, datastore_version.id) for aa_module in auto_apply_modules: if aa_module.id not in module_ids: modules.append(aa_module) module_models.Modules.validate( modules, datastore.id, datastore_version.id) module_list = module_views.convert_modules_to_list(modules) def _create_resources(): if cluster_config: cluster_id = cluster_config.get("id", None) shard_id = cluster_config.get("shard_id", None) instance_type = cluster_config.get("instance_type", None) else: cluster_id = shard_id = instance_type = None ids = [] names = [] root_passwords = [] root_password = None for instance_index in range(0, instance_count): db_info = DBInstance.create( name=name, flavor_id=flavor_id, tenant_id=context.project_id, volume_size=volume_size, datastore_version_id=datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=configuration_id, slave_of_id=slave_of_id, cluster_id=cluster_id, shard_id=shard_id, type=instance_type, region_id=region_name) LOG.debug("Tenant %(tenant)s created new Trove instance " "%(db)s in region %(region)s.", {'tenant': context.project_id, 'db': db_info.id, 'region': region_name}) instance_id = db_info.id cls.add_instance_modules(context, instance_id, modules) instance_name = name ids.append(instance_id) names.append(instance_name) root_passwords.append(None) # change the name to be name + replica_number if more than one if multi_replica: replica_number = instance_index + 1 names[instance_index] += '-' + str(replica_number) setattr(db_info, 'name', names[instance_index]) db_info.save() # if a configuration group is associated with an instance, # generate an overrides dict to pass into the instance creation # method config = Configuration(context, configuration_id) overrides = config.get_configuration_overrides() service_status = InstanceServiceStatus.create( instance_id=instance_id, status=tr_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(instance_id) db_info.hostname = hostname db_info.save() if cls.get_root_on_create( datastore_version.manager) and not backup_id: root_password = utils.generate_random_password() root_passwords[instance_index] = root_password if instance_count > 1: instance_id = ids instance_name = names root_password = root_passwords task_api.API(context).create_instance( instance_id, instance_name, flavor, image_id, databases, users, datastore_version.manager, datastore_version.packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type=volume_type, modules=module_list, locality=locality, access=access) return SimpleInstance(context, db_info, service_status, root_password, locality=locality) with notification.StartNotification(context, **call_args): return run_with_quotas(context.project_id, deltas, _create_resources) @classmethod def add_instance_modules(cls, context, instance_id, modules): for module in modules: module_models.InstanceModule.create( context, instance_id, module.id, module.md5) def get_flavor(self): return self.nova_client.flavors.get(self.flavor_id) def get_default_configuration_template(self): flavor = self.get_flavor() LOG.debug("Getting default config template for datastore version " "%(ds_version)s and flavor %(flavor)s.", {'ds_version': self.ds_version, 'flavor': flavor}) config = template.SingleInstanceConfigTemplate( self.ds_version, flavor, self.id) return config.render_dict() def resize_flavor(self, new_flavor_id): self.validate_can_perform_action() LOG.info("Resizing instance %(instance_id)s flavor to " "%(flavor_id)s.", {'instance_id': self.id, 'flavor_id': new_flavor_id}) if self.db_info.cluster_id is not None: raise exception.ClusterInstanceOperationNotSupported() # Validate that the old and new flavor IDs are not the same, new flavor # can be found and has ephemeral/volume support if required by the # current flavor. if self.flavor_id == new_flavor_id: raise exception.BadRequest(_("The new flavor id must be different " "than the current flavor id of '%s'.") % self.flavor_id) try: new_flavor = self.nova_client.flavors.get(new_flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=new_flavor_id) old_flavor = self.nova_client.flavors.get(self.flavor_id) if self.volume_support: if new_flavor.ephemeral != 0: raise exception.LocalStorageNotSupported() elif self.device_path is not None: # ephemeral support enabled if new_flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=new_flavor_id) # Set the task to RESIZING and begin the async call before returning. self.update_db(task_status=InstanceTasks.RESIZING) LOG.debug("Instance %s set to RESIZING.", self.id) task_api.API(self.context).resize_flavor(self.id, old_flavor, new_flavor) def resize_volume(self, new_size): def _resize_resources(): self.validate_can_perform_action() LOG.info("Resizing volume of instance %s.", self.id) if self.db_info.cluster_id is not None: raise exception.ClusterInstanceOperationNotSupported() old_size = self.volume_size if int(new_size) <= old_size: raise exception.BadRequest(_("The new volume 'size' must be " "larger than the current volume " "size of '%s'.") % old_size) # Set the task to Resizing before sending off to the taskmanager self.update_db(task_status=InstanceTasks.RESIZING) task_api.API(self.context).resize_volume(new_size, self.id) if not self.volume_size: raise exception.BadRequest(_("Instance %s has no volume.") % self.id) new_size_l = int(new_size) validate_volume_size(new_size_l) return run_with_quotas(self.tenant_id, {'volumes': new_size_l - self.volume_size}, _resize_resources) def reboot(self): self.validate_can_perform_action() LOG.info("Rebooting instance %s.", self.id) if self.db_info.cluster_id is not None and not self.context.is_admin: raise exception.ClusterInstanceOperationNotSupported() self.update_db(task_status=InstanceTasks.REBOOTING) task_api.API(self.context).reboot(self.id) def restart(self): self.validate_can_perform_action() LOG.info("Restarting datastore on instance %s.", self.id) if self.db_info.cluster_id is not None and not self.context.is_admin: raise exception.ClusterInstanceOperationNotSupported() # Set our local status since Nova might not change it quick enough. # TODO(tim.simpson): Possible bad stuff can happen if this service # shuts down before it can set status to NONE. # We need a last updated time to mitigate this; # after some period of tolerance, we'll assume the # status is no longer in effect. self.update_db(task_status=InstanceTasks.REBOOTING) task_api.API(self.context).restart(self.id) def detach_replica(self): self.validate_can_perform_action() LOG.info("Detaching instance %s from its replication source.", self.id) if not self.slave_of_id: raise exception.BadRequest(_("Instance %s is not a replica.") % self.id) self.update_db(task_status=InstanceTasks.DETACHING) task_api.API(self.context).detach_replica(self.id) def promote_to_replica_source(self): self.validate_can_perform_action() LOG.info("Promoting instance %s to replication source.", self.id) if not self.slave_of_id: raise exception.BadRequest(_("Instance %s is not a replica.") % self.id) # Update task status of master and all slaves master = BuiltInstance.load(self.context, self.slave_of_id) for dbinfo in [master.db_info] + master.slaves: setattr(dbinfo, 'task_status', InstanceTasks.PROMOTING) dbinfo.save() task_api.API(self.context).promote_to_replica_source(self.id) def eject_replica_source(self): self.validate_can_perform_action() LOG.info("Ejecting replica source %s from its replication set.", self.id) if not self.slaves: raise exception.BadRequest(_("Instance %s is not a replica" " source.") % self.id) service = InstanceServiceStatus.find_by(instance_id=self.id) last_heartbeat_delta = timeutils.utcnow() - service.updated_at agent_expiry_interval = timedelta(seconds=CONF.agent_heartbeat_expiry) if last_heartbeat_delta < agent_expiry_interval: raise exception.BadRequest(_("Replica Source %s cannot be ejected" " as it has a current heartbeat") % self.id) # Update task status of master and all slaves for dbinfo in [self.db_info] + self.slaves: setattr(dbinfo, 'task_status', InstanceTasks.EJECTING) dbinfo.save() task_api.API(self.context).eject_replica_source(self.id) def migrate(self, host=None): self.validate_can_perform_action() LOG.info("Migrating instance id = %(instance_id)s " "to host = %(host)s.", {'instance_id': self.id, 'host': host}) self.update_db(task_status=InstanceTasks.MIGRATING) task_api.API(self.context).migrate(self.id, host) def validate_can_perform_action(self): """ Raises exception if an instance action cannot currently be performed. """ # cases where action cannot be performed status_type = 'instance' if self.db_info.server_status not in ['ACTIVE', 'HEALTHY']: status = self.db_info.server_status elif (self.db_info.task_status != InstanceTasks.NONE and self.db_info.task_status != InstanceTasks.RESTART_REQUIRED): status_type = 'task' status = self.db_info.task_status.action elif not self.datastore_status.status.action_is_allowed: status = self.status elif Backup.running(self.id): status = InstanceStatus.BACKUP else: # action can be performed return log_fmt = ("Instance %(instance_id)s is not currently available for " "an action to be performed (%(status_type)s status was " "%(action_status)s).") exc_fmt = _("Instance %(instance_id)s is not currently available for " "an action to be performed (%(status_type)s status was " "%(action_status)s).") msg_content = { 'instance_id': self.id, 'status_type': status_type, 'action_status': status} LOG.error(log_fmt, msg_content) raise exception.UnprocessableEntity(exc_fmt % msg_content) def _validate_can_perform_assign(self): """ Raises exception if a configuration assign cannot currently be performed """ # check if the instance is not ACTIVE or has tasks status = None if self.db_info.server_status != InstanceStatus.ACTIVE: status = self.db_info.server_status elif self.db_info.task_status != InstanceTasks.NONE: status = self.db_info.task_status.action if status: raise exception.InvalidInstanceState(instance_id=self.id, status=status) def attach_configuration(self, configuration_id): LOG.debug("Attaching configuration to instance: %s", self.id) if not self.db_info.configuration_id: self._validate_can_perform_assign() LOG.debug("Attaching configuration: %s", configuration_id) config = Configuration.find(self.context, configuration_id, self.db_info.datastore_version_id) self.update_configuration(config) else: raise exception.ConfigurationAlreadyAttached( instance_id=self.id, configuration_id=self.db_info.configuration_id) def update_configuration(self, configuration): self.save_configuration(configuration) return self.apply_configuration(configuration) def save_configuration(self, configuration): """Save configuration changes on the guest. Update Trove records if successful. This method does not update runtime values. It sets the instance task to RESTART_REQUIRED. """ LOG.debug("Saving configuration on instance: %s", self.id) overrides = configuration.get_configuration_overrides() # Always put the instance into RESTART_REQUIRED state after # configuration update. The state may be released only once (and if) # the configuration is successfully applied. # This ensures that the instance will always be in a consistent state # even if the apply never executes or fails. LOG.debug("Persisting new configuration on the guest.") self.guest.update_overrides(overrides) LOG.debug("Configuration has been persisted on the guest.") # Configuration has now been persisted on the instance and can be # safely attached. Update our records to reflect this change # irrespective of results of any further operations. self.update_db(task_status=InstanceTasks.RESTART_REQUIRED, configuration_id=configuration.configuration_id) def apply_configuration(self, configuration): """Apply runtime configuration changes and release the RESTART_REQUIRED task. Apply changes only if ALL values can be applied at once. Return True if the configuration has changed. """ LOG.debug("Applying configuration on instance: %s", self.id) overrides = configuration.get_configuration_overrides() if not configuration.does_configuration_need_restart(): LOG.debug("Applying runtime configuration changes.") self.guest.apply_overrides(overrides) LOG.debug("Configuration has been applied.") self.update_db(task_status=InstanceTasks.NONE) return True LOG.debug( "Configuration changes include non-dynamic settings and " "will require restart to take effect.") return False def detach_configuration(self): LOG.debug("Detaching configuration from instance: %s", self.id) if self.configuration and self.configuration.id: self._validate_can_perform_assign() LOG.debug("Detaching configuration: %s", self.configuration.id) self.remove_configuration() else: LOG.debug("No configuration found on instance.") def remove_configuration(self): configuration_id = self.delete_configuration() return self.reset_configuration(configuration_id) def delete_configuration(self): """Remove configuration changes from the guest. Update Trove records if successful. This method does not update runtime values. It sets the instance task to RESTART_REQUIRED. Return ID of the removed configuration group. """ LOG.debug("Deleting configuration from instance: %s", self.id) configuration_id = self.configuration.id LOG.debug("Removing configuration from the guest.") self.guest.update_overrides({}, remove=True) LOG.debug("Configuration has been removed from the guest.") self.update_db(task_status=InstanceTasks.RESTART_REQUIRED, configuration_id=None) return configuration_id def reset_configuration(self, configuration_id): """Dynamically reset the configuration values back to their default values from the configuration template and release the RESTART_REQUIRED task. Reset the values only if the default is available for all of them and restart is not required by any. Return True if the configuration has changed. """ LOG.debug("Resetting configuration on instance: %s", self.id) if configuration_id: flavor = self.get_flavor() default_config = self._render_config_dict(flavor) current_config = Configuration(self.context, configuration_id) current_overrides = current_config.get_configuration_overrides() # Check the configuration template has defaults for all modified # values. has_defaults_for_all = all(key in default_config.keys() for key in current_overrides.keys()) if (not current_config.does_configuration_need_restart() and has_defaults_for_all): LOG.debug("Applying runtime configuration changes.") self.guest.apply_overrides( {k: v for k, v in default_config.items() if k in current_overrides}) LOG.debug("Configuration has been applied.") self.update_db(task_status=InstanceTasks.NONE) return True else: LOG.debug( "Could not revert all configuration changes dynamically. " "A restart will be required.") else: LOG.debug("There are no values to reset.") return False def _render_config_dict(self, flavor): config = template.SingleInstanceConfigTemplate( self.datastore_version, flavor, self.id) return dict(config.render_dict()) def upgrade(self, datastore_version): self.update_db(datastore_version_id=datastore_version.id, task_status=InstanceTasks.UPGRADING) task_api.API(self.context).upgrade(self.id, datastore_version.id) def create_server_list_matcher(server_list): # Returns a method which finds a server from the given list. def find_server(instance_id, server_id): matches = [server for server in server_list if server.id == server_id] if len(matches) == 1: return matches[0] elif len(matches) < 1: # The instance was not found in the list and # this can happen if the instance is deleted from # nova but still in trove database raise exception.ComputeInstanceNotFound( instance_id=instance_id, server_id=server_id) else: # Should never happen, but never say never. LOG.error("Server %(server)s for instance %(instance)s was " "found twice!", {'server': server_id, 'instance': instance_id}) raise exception.TroveError(uuid=instance_id) return find_server class Instances(object): DEFAULT_LIMIT = CONF.instances_page_size @staticmethod def load(context, include_clustered, instance_ids=None): def load_simple_instance(context, db_info, status, **kwargs): return SimpleInstance(context, db_info, status) if context is None: raise TypeError(_("Argument context not defined.")) client = create_nova_client(context) servers = client.servers.list() query_opts = {'tenant_id': context.project_id, 'deleted': False} if not include_clustered: query_opts['cluster_id'] = None if instance_ids: if context.is_admin: query_opts.pop('tenant_id') filters = [DBInstance.id.in_(instance_ids)] db_infos = DBInstance.find_by_filter(filters=filters, **query_opts) else: db_infos = DBInstance.find_all(**query_opts) limit = utils.pagination_limit(context.limit, Instances.DEFAULT_LIMIT) data_view = DBInstance.find_by_pagination('instances', db_infos, "foo", limit=limit, marker=context.marker) next_marker = data_view.next_page_marker find_server = create_server_list_matcher(servers) for db in db_infos: LOG.debug("Checking for db [id=%(db_id)s, " "compute_instance_id=%(instance_id)s].", {'db_id': db.id, 'instance_id': db.compute_instance_id}) ret = Instances._load_servers_status(load_simple_instance, context, data_view.collection, find_server) return ret, next_marker @staticmethod def load_all_by_cluster_id(context, cluster_id, load_servers=True): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False) db_insts = [] for db_instance in db_instances: try: db_inst = load_any_instance( context, db_instance.id, load_server=load_servers) db_insts.append(db_inst) except exception.NotFound: # The instance may be gone if we're in the middle of a # shrink operation, so just log and continue LOG.debug("Instance %s is no longer available, skipping.", db_instance.id) return db_insts @staticmethod def _load_servers_status(load_instance, context, db_items, find_server): ret = [] for db in db_items: server = None try: # TODO(tim.simpson): Delete when we get notifications working! if InstanceTasks.BUILDING == db.task_status: db.server_status = "BUILD" db.addresses = {} else: try: region = CONF.service_credentials.region_name if (not db.region_id or db.region_id == region): server = find_server(db.id, db.compute_instance_id) else: nova_client = create_nova_client( context, region_name=db.region_id) server = nova_client.servers.get( db.compute_instance_id) db.server_status = server.status db.addresses = server.addresses except exception.ComputeInstanceNotFound: db.server_status = "SHUTDOWN" # Fake it... db.addresses = {} # TODO(tim.simpson): End of hack. # volumes = find_volumes(server.id) datastore_status = InstanceServiceStatus.find_by( instance_id=db.id) if not datastore_status.status: # This should never happen. LOG.error("Server status could not be read for " "instance id(%s).", db.id) continue LOG.debug("Server api_status(%s).", datastore_status.status.api_status) except exception.ModelNotFoundError: LOG.error("Server status could not be read for " "instance id(%s).", db.id) continue ret.append(load_instance(context, db, datastore_status, server=server)) return ret class DBInstance(dbmodels.DatabaseModelBase): _data_fields = ['created', 'updated', 'name', 'hostname', 'compute_instance_id', 'task_id', 'task_description', 'task_start_time', 'volume_id', 'flavor_id', 'volume_size', 'tenant_id', 'server_status', 'deleted', 'deleted_at', 'datastore_version_id', 'configuration_id', 'slave_of_id', 'cluster_id', 'shard_id', 'type', 'region_id', 'encrypted_key'] _table_name = 'instances' def __init__(self, task_status, **kwargs): """ Creates a new persistable entity of the Trove Guest Instance for purposes of recording its current state and record of modifications :param task_status: the current state details of any activity or error that is running on this guest instance (e.g. resizing, deleting) :type task_status: trove.instance.tasks.InstanceTask """ kwargs["task_id"] = task_status.code kwargs["task_description"] = task_status.db_text kwargs["deleted"] = False if CONF.enable_secure_rpc_messaging: key = cu.generate_random_key() kwargs["encrypted_key"] = cu.encode_data(cu.encrypt_data( key, CONF.inst_rpc_key_encr_key)) LOG.debug("Generated unique RPC encryption key for " "instance. key = %s", key) else: kwargs["encrypted_key"] = None super(DBInstance, self).__init__(**kwargs) self.set_task_status(task_status) @property def key(self): if self.encrypted_key is None: return None return cu.decrypt_data(cu.decode_data(self.encrypted_key), CONF.inst_rpc_key_encr_key) def _validate(self, errors): if InstanceTask.from_code(self.task_id) is None: errors['task_id'] = "Not valid." if self.task_status is None: errors['task_status'] = "Cannot be None." def get_task_status(self): return InstanceTask.from_code(self.task_id) def set_task_status(self, value): self.task_id = value.code self.task_description = value.db_text task_status = property(get_task_status, set_task_status) class instance_encryption_key_cache(object): def __init__(self, func, lru_cache_size=10): self._table = {} self._lru = [] self._lru_cache_size = lru_cache_size self._func = func def get(self, instance_id): if instance_id in self._table: if self._lru.index(instance_id) > 0: self._lru.remove(instance_id) self._lru.insert(0, instance_id) return self._table[instance_id] else: val = self._func(instance_id) # BUG(1650518): Cleanup in the Pike release if val is None: return val # We need string anyway if isinstance(val, six.binary_type): val = encodeutils.safe_decode(val) if len(self._lru) == self._lru_cache_size: tail = self._lru.pop() del self._table[tail] self._lru.insert(0, instance_id) self._table[instance_id] = val return self._table[instance_id] def __getitem__(self, instance_id): return self.get(instance_id) def _get_instance_encryption_key(instance_id): instance = DBInstance.find_by(id=instance_id) if instance is not None: return instance.key else: raise exception.NotFound(uuid=id) _instance_encryption_key = instance_encryption_key_cache( func=_get_instance_encryption_key) def get_instance_encryption_key(instance_id): return _instance_encryption_key[instance_id] def module_instance_count(context, module_id, include_clustered=False): """Returns a summary of the instances that have applied a given module. We use the SQLAlchemy query object directly here as there's functionality needed that's not exposed in the trove/db/__init__.py/Query object. """ columns = [module_models.DBModule.name, module_models.DBInstanceModule.module_id, module_models.DBInstanceModule.md5, func.count(module_models.DBInstanceModule.md5), (module_models.DBInstanceModule.md5 == module_models.DBModule.md5), func.min(module_models.DBInstanceModule.updated), func.max(module_models.DBInstanceModule.updated)] filters = [module_models.DBInstanceModule.module_id == module_id, module_models.DBInstanceModule.deleted == 0] query = module_models.DBInstanceModule.query() query = query.join( module_models.DBModule, module_models.DBInstanceModule.module_id == module_models.DBModule.id) query = query.join( DBInstance, module_models.DBInstanceModule.instance_id == DBInstance.id) if not include_clustered: filters.append(DBInstance.cluster_id.is_(None)) if not context.is_admin: filters.append(DBInstance.tenant_id == context.project_id) query = query.group_by(module_models.DBInstanceModule.md5) query = query.add_columns(*columns) query = query.filter(*filters) query = query.order_by(module_models.DBInstanceModule.updated) return query.all() def persist_instance_fault(notification, event_qualifier): """This callback is registered to be fired whenever a notification is sent out. """ if "error" == event_qualifier: instance_id = notification.payload.get('instance_id') message = notification.payload.get( 'message', 'Missing notification message') details = notification.payload.get('exception', []) server_type = notification.server_type if server_type: details.insert(0, "Server type: %s\n" % server_type) save_instance_fault(instance_id, message, details) def save_instance_fault(instance_id, message, details, skip_delta=None): if instance_id: try: # Make sure it's a valid id - sometimes the error is related # to an invalid id and we can't save those DBInstance.find_by(id=instance_id, deleted=False) msg = utils.format_output(message, truncate_len=255) det = utils.format_output(details) try: fault = DBInstanceFault.find_by(instance_id=instance_id) skip = False # If we were passed in a skip_delta, only update the fault # if the old one is at least skip_delta seconds in the past if skip_delta: skip_time = fault.updated + timedelta(seconds=skip_delta) now = datetime.now() skip = now < skip_time if skip: LOG.debug( "Skipping fault message in favor of previous one") else: fault.set_info(msg, det) fault.save() except exception.ModelNotFoundError: DBInstanceFault.create( instance_id=instance_id, message=msg, details=det) except exception.ModelNotFoundError: # We don't need to save anything if the instance id isn't valid pass class DBInstanceFault(dbmodels.DatabaseModelBase): _data_fields = ['instance_id', 'message', 'details', 'created', 'updated', 'deleted', 'deleted_at'] _table_name = 'instance_faults' def __init__(self, **kwargs): super(DBInstanceFault, self).__init__(**kwargs) def set_info(self, message, details): self.message = message self.details = details class InstanceServiceStatus(dbmodels.DatabaseModelBase): _data_fields = ['instance_id', 'status_id', 'status_description', 'updated_at'] _table_name = 'service_statuses' def __init__(self, status, **kwargs): kwargs["status_id"] = status.code kwargs["status_description"] = status.description super(InstanceServiceStatus, self).__init__(**kwargs) self.set_status(status) def _validate(self, errors): if self.status is None: errors['status'] = "Cannot be None." if tr_instance.ServiceStatus.from_code(self.status_id) is None: errors['status_id'] = "Not valid." def get_status(self): """ Returns the current enumerated status of the Service running on the instance :return: a ServiceStatus reference indicating the currently stored status of the service :rtype: trove.common.instance.ServiceStatus """ return tr_instance.ServiceStatus.from_code(self.status_id) def set_status(self, value): """ Sets the status of the hosted service :param value: current state of the hosted service :type value: trove.common.instance.ServiceStatus """ self.status_id = value.code self.status_description = value.description def save(self): self['updated_at'] = timeutils.utcnow() return get_db_api().save(self) status = property(get_status, set_status) def persisted_models(): return { 'instances': DBInstance, 'instance_faults': DBInstanceFault, 'service_statuses': InstanceServiceStatus, } MYSQL_RESPONSIVE_STATUSES = [ tr_instance.ServiceStatuses.RUNNING, tr_instance.ServiceStatuses.HEALTHY ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/instance/service.py0000644000175000017500000006654400000000000021143 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress from oslo_log import log as logging from oslo_utils import strutils from trove.backup.models import Backup as backup_model from trove.backup import views as backup_views import trove.common.apischema as apischema from trove.common import cfg from trove.common import clients from trove.common import exception from trove.common.i18n import _ from trove.common import neutron from trove.common import notification from trove.common.notification import StartNotification from trove.common import pagination from trove.common import policy from trove.common import utils from trove.common import wsgi from trove.datastore import models as datastore_models from trove.extensions.mysql.common import populate_users from trove.extensions.mysql.common import populate_validated_databases from trove.instance import models, views from trove.module import models as module_models from trove.module import views as module_views CONF = cfg.CONF LOG = logging.getLogger(__name__) class InstanceController(wsgi.Controller): """Controller for instance functionality.""" schemas = apischema.instance.copy() @classmethod def authorize_instance_action(cls, context, instance_rule_name, instance): policy.authorize_on_target(context, 'instance:%s' % instance_rule_name, {'tenant': instance.tenant_id}) @classmethod def get_action_schema(cls, body, action_schema): action_type = list(body.keys())[0] action_schema = action_schema.get(action_type, {}) if action_type == 'resize': # volume or flavorRef resize_action = list(body[action_type].keys())[0] action_schema = action_schema.get(resize_action, {}) return action_schema @classmethod def get_schema(cls, action, body): action_schema = super(InstanceController, cls).get_schema(action, body) if action == 'action': # resize or restart action_schema = cls.get_action_schema(body, action_schema) return action_schema def action(self, req, body, tenant_id, id): """ Handles requests that modify existing instances in some manner. Actions could include 'resize', 'restart' :param req: http request object :param body: deserialized body of the request as a dict :param tenant_id: the tenant id for whom owns the instance :param id: instance id """ LOG.debug("instance action req : '%s'\n\n", req) if not body: raise exception.BadRequest(_("Invalid request body.")) context = req.environ[wsgi.CONTEXT_KEY] _actions = { 'restart': self._action_restart, 'resize': self._action_resize, 'promote_to_replica_source': self._action_promote_to_replica_source, 'eject_replica_source': self._action_eject_replica_source, 'reset_status': self._action_reset_status, } selected_action = None action_name = None for key in body: if key in _actions: selected_action = _actions[key] action_name = key LOG.info("Performing %(action_name)s action against " "instance %(instance_id)s for tenant %(tenant_id)s, " "body: %(body)s", {'action_name': action_name, 'instance_id': id, 'tenant_id': tenant_id, 'body': body}) needs_server = True if action_name in ['reset_status']: needs_server = False instance = models.Instance.load(context, id, needs_server=needs_server) return selected_action(context, req, instance, body) def _action_restart(self, context, req, instance, body): context.notification = notification.DBaaSInstanceRestart(context, request=req) self.authorize_instance_action(context, 'restart', instance) with StartNotification(context, instance_id=instance.id): instance.restart() return wsgi.Result(None, 202) def _action_resize(self, context, req, instance, body): """ Handles 2 cases 1. resize volume body only contains {volume: {size: x}} 2. resize instance body only contains {flavorRef: http.../2} If the body has both we will throw back an error. """ options = { 'volume': self._action_resize_volume, 'flavorRef': self._action_resize_flavor } selected_option = None args = None for key in options: if key in body['resize']: selected_option = options[key] args = body['resize'][key] break return selected_option(context, req, instance, args) def _action_resize_volume(self, context, req, instance, volume): context.notification = notification.DBaaSInstanceResizeVolume( context, request=req) self.authorize_instance_action(context, 'resize_volume', instance) with StartNotification(context, instance_id=instance.id, new_size=volume['size']): instance.resize_volume(volume['size']) return wsgi.Result(None, 202) def _action_resize_flavor(self, context, req, instance, flavorRef): context.notification = notification.DBaaSInstanceResizeInstance( context, request=req) self.authorize_instance_action(context, 'resize_flavor', instance) new_flavor_id = utils.get_id_from_href(flavorRef) with StartNotification(context, instance_id=instance.id, new_flavor_id=new_flavor_id): instance.resize_flavor(new_flavor_id) return wsgi.Result(None, 202) def _action_promote_to_replica_source(self, context, req, instance, body): self.authorize_instance_action( context, 'promote_to_replica_source', instance) context.notification = notification.DBaaSInstanceEject(context, request=req) with StartNotification(context, instance_id=instance.id): instance.promote_to_replica_source() return wsgi.Result(None, 202) def _action_eject_replica_source(self, context, req, instance, body): self.authorize_instance_action( context, 'eject_replica_source', instance) context.notification = notification.DBaaSInstancePromote(context, request=req) with StartNotification(context, instance_id=instance.id): instance.eject_replica_source() return wsgi.Result(None, 202) def _action_reset_status(self, context, req, instance, body): if 'force_delete' in body['reset_status']: self.authorize_instance_action(context, 'force_delete', instance) else: self.authorize_instance_action( context, 'reset_status', instance) context.notification = notification.DBaaSInstanceResetStatus( context, request=req) with StartNotification(context, instance_id=instance.id): instance.reset_status() LOG.debug("Failing backups for instance %s.", instance.id) backup_model.fail_for_instance(instance.id) return wsgi.Result(None, 202) def index(self, req, tenant_id): """Return all instances.""" LOG.info("Listing database instances for tenant '%s'", tenant_id) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'instance:index') instances = self._get_instances(req, instance_view=views.InstanceView) return wsgi.Result(instances, 200) def detail(self, req, tenant_id): """Return all instances with details.""" LOG.info("Listing database instances with details for tenant '%s'", tenant_id) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'instance:detail') instances = self._get_instances(req, instance_view=views.InstanceDetailView) return wsgi.Result(instances, 200) def _get_instances(self, req, instance_view): context = req.environ[wsgi.CONTEXT_KEY] clustered_q = req.GET.get('include_clustered', '').lower() include_clustered = clustered_q == 'true' instances, marker = models.Instances.load(context, include_clustered) view = views.InstancesView(instances, item_view=instance_view, req=req) paged = pagination.SimplePaginatedDataView(req.url, 'instances', view, marker) return paged.data() def backups(self, req, tenant_id, id): """Return all backups for the specified instance.""" LOG.info("Listing backups for instance '%s'", id) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) self.authorize_instance_action(context, 'backups', instance) backups, marker = backup_model.list_for_instance(context, id) view = backup_views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200) def show(self, req, tenant_id, id): """Return a single instance.""" LOG.info("Showing database instance '%(instance_id)s' for tenant " "'%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id}) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] server = models.load_instance_with_info(models.DetailInstance, context, id) self.authorize_instance_action(context, 'show', server) return wsgi.Result( views.InstanceDetailView(server, req=req).data(), 200 ) def delete(self, req, tenant_id, id): """Delete a single instance.""" LOG.info("Deleting database instance '%(instance_id)s' for tenant " "'%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id}) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] instance = models.load_any_instance(context, id) self.authorize_instance_action(context, 'delete', instance) context.notification = notification.DBaaSInstanceDelete( context, request=req) with StartNotification(context, instance_id=instance.id): marker = 'foo' while marker: instance_modules, marker = module_models.InstanceModules.load( context, instance_id=id) for instance_module in instance_modules: instance_module = module_models.InstanceModule.load( context, instance_module['instance_id'], instance_module['module_id']) module_models.InstanceModule.delete( context, instance_module) instance.delete() return wsgi.Result(None, 202) def _check_network_overlap(self, context, user_network): neutron_client = clients.create_neutron_client(context) user_cidrs = neutron.get_subnet_cidrs(neutron_client, user_network) mgmt_cidrs = neutron.get_mamangement_subnet_cidrs(neutron_client) LOG.debug("Cidrs of the user network: %s, cidrs of the management " "network: %s", user_cidrs, mgmt_cidrs) for user_cidr in user_cidrs: user_net = ipaddress.ip_network(user_cidr) for mgmt_cidr in mgmt_cidrs: mgmt_net = ipaddress.ip_network(mgmt_cidr) if user_net.overlaps(mgmt_net): raise exception.NetworkConflict() def create(self, req, body, tenant_id): # TODO(hub-cap): turn this into middleware LOG.info("Creating a database instance for tenant '%s'", tenant_id) LOG.debug("req : '%s'\n\n", strutils.mask_password(req)) LOG.debug("body : '%s'\n\n", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'instance:create') context.notification = notification.DBaaSInstanceCreate(context, request=req) datastore_args = body['instance'].get('datastore', {}) datastore, datastore_version = ( datastore_models.get_datastore_version(**datastore_args)) image_id = datastore_version.image_id name = body['instance']['name'] flavor_ref = body['instance']['flavorRef'] flavor_id = utils.get_id_from_href(flavor_ref) configuration = self._configuration_parse(context, body) databases = populate_validated_databases( body['instance'].get('databases', [])) database_names = [database.get('_name', '') for database in databases] users = None try: users = populate_users(body['instance'].get('users', []), database_names) except ValueError as ve: raise exception.BadRequest(message=ve) modules = body['instance'].get('modules') # The following operations have their own API calls. # We need to make sure the same policies are enforced when # creating an instance. # i.e. if attaching configuration group to an existing instance is not # allowed, it should not be possible to create a new instance with the # group attached either if configuration: policy.authorize_on_tenant(context, 'instance:update') if modules: policy.authorize_on_tenant(context, 'instance:module_apply') if users: policy.authorize_on_tenant( context, 'instance:extension:user:create') if databases: policy.authorize_on_tenant( context, 'instance:extension:database:create') if 'volume' in body['instance']: volume_info = body['instance']['volume'] volume_size = int(volume_info['size']) volume_type = volume_info.get('type') else: volume_size = None volume_type = None if 'restorePoint' in body['instance']: backupRef = body['instance']['restorePoint']['backupRef'] backup_id = utils.get_id_from_href(backupRef) else: backup_id = None availability_zone = body['instance'].get('availability_zone') nics = body['instance'].get('nics', []) if len(nics) > 0: self._check_network_overlap(context, nics[0].get('net-id')) slave_of_id = body['instance'].get('replica_of', # also check for older name body['instance'].get('slave_of')) replica_count = body['instance'].get('replica_count') locality = body['instance'].get('locality') if locality: locality_domain = ['affinity', 'anti-affinity'] locality_domain_msg = ("Invalid locality '%s'. " "Must be one of ['%s']" % (locality, "', '".join(locality_domain))) if locality not in locality_domain: raise exception.BadRequest(message=locality_domain_msg) if slave_of_id: dupe_locality_msg = ( 'Cannot specify locality when adding replicas to existing ' 'master.') raise exception.BadRequest(message=dupe_locality_msg) region_name = body['instance'].get( 'region_name', CONF.service_credentials.region_name ) access = body['instance'].get('access', None) instance = models.Instance.create(context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone, nics, configuration, slave_of_id, replica_count=replica_count, volume_type=volume_type, modules=modules, locality=locality, region_name=region_name, access=access) view = views.InstanceDetailView(instance, req=req) return wsgi.Result(view.data(), 200) def _configuration_parse(self, context, body): if 'configuration' in body['instance']: configuration_ref = body['instance']['configuration'] if configuration_ref: configuration_id = utils.get_id_from_href(configuration_ref) return configuration_id def _modify_instance(self, context, req, instance, **kwargs): if 'detach_replica' in kwargs and kwargs['detach_replica']: LOG.debug("Detaching replica from source.") context.notification = notification.DBaaSInstanceDetach( context, request=req) with StartNotification(context, instance_id=instance.id): instance.detach_replica() if 'configuration_id' in kwargs: if kwargs['configuration_id']: context.notification = ( notification.DBaaSInstanceAttachConfiguration(context, request=req)) configuration_id = kwargs['configuration_id'] with StartNotification(context, instance_id=instance.id, configuration_id=configuration_id): instance.attach_configuration(configuration_id) else: context.notification = ( notification.DBaaSInstanceDetachConfiguration(context, request=req)) with StartNotification(context, instance_id=instance.id): instance.detach_configuration() if 'datastore_version' in kwargs: datastore_version = datastore_models.DatastoreVersion.load( instance.datastore, kwargs['datastore_version']) context.notification = ( notification.DBaaSInstanceUpgrade(context, request=req)) with StartNotification(context, instance_id=instance.id, datastore_version_id=datastore_version.id): instance.upgrade(datastore_version) if kwargs: instance.update_db(**kwargs) def update(self, req, id, body, tenant_id): """Updates the instance to attach/detach configuration.""" LOG.info("Updating database instance '%(instance_id)s' for tenant " "'%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id}) LOG.debug("req: %s", req) LOG.debug("body: %s", body) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) self.authorize_instance_action(context, 'update', instance) # Make sure args contains a 'configuration_id' argument, args = {} args['configuration_id'] = self._configuration_parse(context, body) self._modify_instance(context, req, instance, **args) return wsgi.Result(None, 202) def edit(self, req, id, body, tenant_id): """ Updates the instance to set or unset one or more attributes. """ LOG.info("Editing instance for tenant id %s.", tenant_id) LOG.debug("req: %s", strutils.mask_password(req)) LOG.debug("body: %s", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) self.authorize_instance_action(context, 'edit', instance) args = {} args['detach_replica'] = ('replica_of' in body['instance'] or 'slave_of' in body['instance']) if 'name' in body['instance']: args['name'] = body['instance']['name'] if 'configuration' in body['instance']: args['configuration_id'] = self._configuration_parse(context, body) if 'datastore_version' in body['instance']: args['datastore_version'] = body['instance'].get( 'datastore_version') self._modify_instance(context, req, instance, **args) return wsgi.Result(None, 202) def configuration(self, req, tenant_id, id): """ Returns the default configuration template applied to the instance. """ LOG.info("Getting default configuration for instance %s", id) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) self.authorize_instance_action(context, 'configuration', instance) LOG.debug("Server: %s", instance) config = instance.get_default_configuration_template() LOG.debug("Default config for instance %(instance_id)s is %(config)s", {'instance_id': id, 'config': config}) return wsgi.Result(views.DefaultConfigurationView( config).data(), 200) def guest_log_list(self, req, tenant_id, id): """Return all information about all logs for an instance.""" LOG.debug("Listing logs for tenant %s", tenant_id) context = req.environ[wsgi.CONTEXT_KEY] try: backup_model.verify_swift_auth_token(context) except exception.SwiftNotFound: raise exception.LogsNotAvailable() instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) self.authorize_instance_action(context, 'guest_log_list', instance) client = clients.create_guest_client(context, id) guest_log_list = client.guest_log_list() return wsgi.Result({'logs': guest_log_list}, 200) def guest_log_action(self, req, body, tenant_id, id): """Processes a guest log.""" LOG.info("Processing log for tenant %s", tenant_id) context = req.environ[wsgi.CONTEXT_KEY] try: backup_model.verify_swift_auth_token(context) except exception.SwiftNotFound: raise exception.LogsNotAvailable() instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) log_name = body['name'] enable = body.get('enable', None) disable = body.get('disable', None) publish = body.get('publish', None) discard = body.get('discard', None) if enable and disable: raise exception.BadRequest(_("Cannot enable and disable log.")) client = clients.create_guest_client(context, id) guest_log = client.guest_log_action(log_name, enable, disable, publish, discard) return wsgi.Result({'log': guest_log}, 200) def module_list(self, req, tenant_id, id): """Return information about modules on an instance.""" context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) self.authorize_instance_action(context, 'module_list', instance) from_guest = bool(req.GET.get('from_guest', '').lower()) include_contents = bool(req.GET.get('include_contents', '').lower()) if from_guest: return self._module_list_guest( context, id, include_contents=include_contents) else: return self._module_list( context, id, include_contents=include_contents) def _module_list_guest(self, context, id, include_contents): """Return information about modules on an instance.""" client = clients.create_guest_client(context, id) result_list = client.module_list(include_contents) return wsgi.Result({'modules': result_list}, 200) def _module_list(self, context, id, include_contents): """Return information about instance modules.""" client = clients.create_guest_client(context, id) result_list = client.module_list(include_contents) return wsgi.Result({'modules': result_list}, 200) def module_apply(self, req, body, tenant_id, id): """Apply modules to an instance.""" context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) self.authorize_instance_action(context, 'module_apply', instance) module_ids = [mod['id'] for mod in body.get('modules', [])] modules = module_models.Modules.load_by_ids(context, module_ids) module_models.Modules.validate( modules, instance.datastore.id, instance.datastore_version.id) module_list = module_views.convert_modules_to_list(modules) client = clients.create_guest_client(context, id) result_list = client.module_apply(module_list) models.Instance.add_instance_modules(context, id, modules) return wsgi.Result({'modules': result_list}, 200) def module_remove(self, req, tenant_id, id, module_id): """Remove module from an instance.""" context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) self.authorize_instance_action(context, 'module_remove', instance) module = module_models.Module.load(context, module_id) module_info = module_views.DetailedModuleView(module).data() client = clients.create_guest_client(context, id) client.module_remove(module_info) instance_modules = module_models.InstanceModules.load_all( context, instance_id=id, module_id=module_id) for instance_module in instance_modules: module_models.InstanceModule.delete(context, instance_module) LOG.debug("Deleted IM record %(instance_module_id)s " "(instance %(id)s, module %(module_id)s).", {'instance_module_id': instance_module.id, 'id': id, 'module_id': module_id}) return wsgi.Result(None, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/instance/tasks.py0000644000175000017500000001232500000000000020614 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common instance status code used across Trove API. """ class InstanceTask(object): """ Stores the different kind of tasks being performed by an instance. """ # TODO(tim.simpson): Figure out someway to migrate this to the TaskManager # once that revs up. _lookup = {} def __init__(self, code, action, db_text, is_error=False): self._code = int(code) self._action = action self._db_text = db_text self._is_error = is_error InstanceTask._lookup[self._code] = self @property def action(self): return self._action @property def code(self): return self._code @property def db_text(self): return self._db_text @property def is_error(self): return self._is_error def __eq__(self, other): if not isinstance(other, InstanceTask): return False return self._db_text == other._db_text @classmethod def from_code(cls, code): if code not in cls._lookup: return None return cls._lookup[code] def __str__(self): return "(%d %s %s)" % (self._code, self._action, self._db_text) def __repr__(self): return "InstanceTask.%s (%s)" % (self._action, self._db_text) class InstanceTasks(object): NONE = InstanceTask(0x01, 'NONE', 'No tasks for the instance.') DELETING = InstanceTask(0x02, 'DELETING', 'Deleting the instance.') REBOOTING = InstanceTask(0x03, 'REBOOTING', 'Rebooting the instance.') RESIZING = InstanceTask(0x04, 'RESIZING', 'Resizing the instance.') BUILDING = InstanceTask(0x05, 'BUILDING', 'The instance is building.') MIGRATING = InstanceTask(0x06, 'MIGRATING', 'Migrating the instance.') RESTART_REQUIRED = InstanceTask(0x07, 'RESTART_REQUIRED', 'Instance requires a restart.') PROMOTING = InstanceTask(0x08, 'PROMOTING', 'Promoting the instance to replica source.') EJECTING = InstanceTask(0x09, 'EJECTING', 'Ejecting the replica source.') LOGGING = InstanceTask(0x0a, 'LOGGING', 'Transferring guest logs.') DETACHING = InstanceTask(0x0b, 'DETACHING', 'Detaching the instance from replica source.') BUILDING_ERROR_DNS = InstanceTask(0x50, 'BUILDING', 'Build error: DNS.', is_error=True) BUILDING_ERROR_SERVER = InstanceTask(0x51, 'BUILDING', 'Build error: Server.', is_error=True) BUILDING_ERROR_VOLUME = InstanceTask(0x52, 'BUILDING', 'Build error: Volume.', is_error=True) BUILDING_ERROR_SEC_GROUP = InstanceTask(0x53, 'BUILDING', 'Build error: Secgroup ' 'or rule.', is_error=True) BUILDING_ERROR_REPLICA = InstanceTask(0x54, 'BUILDING', 'Build error: Replica.', is_error=True) PROMOTION_ERROR = InstanceTask(0x55, 'PROMOTING', 'Replica Promotion Error.', is_error=True) EJECTION_ERROR = InstanceTask(0x56, 'EJECTING', 'Replica Source Ejection Error.', is_error=True) GROWING_ERROR = InstanceTask(0x57, 'GROWING', 'Growing Cluster Error.', is_error=True) SHRINKING_ERROR = InstanceTask(0x58, 'SHRINKING', 'Shrinking Cluster Error.', is_error=True) UPGRADING = InstanceTask(0x59, 'UPGRADING', 'Upgrading the instance.') UPGRADING_ERROR = InstanceTask(0x5a, 'UPGRADING', 'Upgrading Cluster Error.', is_error=True) BUILDING_ERROR_TIMEOUT_GA = InstanceTask(0x5b, 'ERROR', 'Build error: ' 'guestagent timeout.', is_error=True) BUILDING_ERROR_PORT = InstanceTask(0x5c, 'BUILDING', 'Build error: Port.', is_error=True) # Dissuade further additions at run-time. InstanceTask.__init__ = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/instance/views.py0000644000175000017500000001731700000000000020632 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from trove.common.views import create_links from trove.common import wsgi from trove.instance import models LOG = logging.getLogger(__name__) class InstanceView(object): """Uses a SimpleInstance.""" def __init__(self, instance, req=None): self.instance = instance self.req = req self.context = req.environ[wsgi.CONTEXT_KEY] def data(self): instance_dict = { "id": self.instance.id, "name": self.instance.name, "status": self.instance.status, "links": self._build_links(), "flavor": self._build_flavor_info(), "datastore": {"type": self.instance.datastore.name, "version": self.instance.datastore_version.name}, "region": self.instance.region_name } if self.context.is_admin: instance_dict['tenant_id'] = self.instance.tenant_id if self.instance.volume_support: instance_dict['volume'] = {'size': self.instance.volume_size} if self.instance.hostname: instance_dict['hostname'] = self.instance.hostname else: ip = self.instance.get_visible_ip_addresses() if ip: instance_dict['ip'] = ip if self.instance.slave_of_id is not None: instance_dict['replica_of'] = self._build_master_info() LOG.debug(instance_dict) return {"instance": instance_dict} def _build_links(self): return create_links("instances", self.req, self.instance.id) def _build_flavor_info(self): return { "id": self.instance.flavor_id, "links": self._build_flavor_links() } def _build_flavor_links(self): return create_links("flavors", self.req, self.instance.flavor_id) def _build_master_info(self): return { "id": self.instance.slave_of_id, "links": create_links("instances", self.req, self.instance.slave_of_id) } class InstanceDetailView(InstanceView): """Works with a full-blown instance.""" def __init__(self, instance, req): super(InstanceDetailView, self).__init__(instance, req=req) def data(self): result = super(InstanceDetailView, self).data() result['instance']['created'] = self.instance.created result['instance']['updated'] = self.instance.updated result['instance']['service_status_updated'] = (self.instance. service_status_updated) result['instance']['datastore']['version'] = (self.instance. datastore_version.name) if self.instance.fault: result['instance']['fault'] = self._build_fault_info() if self.instance.slaves: result['instance']['replicas'] = self._build_slaves_info() if self.instance.configuration is not None: result['instance']['configuration'] = (self. _build_configuration_info()) if self.instance.locality: result['instance']['locality'] = self.instance.locality if (isinstance(self.instance, models.DetailInstance) and self.instance.volume_used): used = self.instance.volume_used if self.instance.volume_support: result['instance']['volume']['used'] = used else: # either ephemeral or root partition result['instance']['local_storage'] = {'used': used} if self.instance.root_password: result['instance']['password'] = self.instance.root_password if self.instance.cluster_id: result['instance']['cluster_id'] = self.instance.cluster_id if self.instance.shard_id: result['instance']['shard_id'] = self.instance.shard_id if self.context.is_admin: result['instance']['server_id'] = self.instance.server_id result['instance']['volume_id'] = self.instance.volume_id result['instance']['encrypted_rpc_messaging'] = ( self.instance.encrypted_rpc_messaging) return result def _build_fault_info(self): return { "message": self.instance.fault.message, "created": self.instance.fault.updated, "details": self.instance.fault.details, } def _build_slaves_info(self): data = [] for slave in self.instance.slaves: data.append({ "id": slave.id, "links": create_links("instances", self.req, slave.id) }) return data def _build_configuration_info(self): return { "id": self.instance.configuration.id, "name": self.instance.configuration.name, "links": create_links("configurations", self.req, self.instance.configuration.id) } class InstancesView(object): """Shows a list of SimpleInstance objects.""" def __init__(self, instances, item_view=InstanceView, req=None): self.instances = instances self.item_view = item_view self.req = req def data(self): data = [] # These are model instances for instance in self.instances: data.append(self.data_for_instance(instance)) return {'instances': data} def data_for_instance(self, instance): view = self.item_view(instance, req=self.req) return view.data()['instance'] class DefaultConfigurationView(object): def __init__(self, config): self.config = config def data(self): config_dict = {} for key, val in self.config: config_dict[key] = val return {"instance": {"configuration": config_dict}} class GuestLogView(object): def __init__(self, guest_log): self.guest_log = guest_log def data(self): return { 'name': self.guest_log.name, 'type': self.guest_log.type, 'status': self.guest_log.status, 'published': self.guest_log.published, 'pending': self.guest_log.pending, 'container': self.guest_log.container, 'prefix': self.guest_log.prefix, 'metafile': self.guest_log.metafile, } class GuestLogsView(object): def __init__(self, guest_logs): self.guest_logs = guest_logs def data(self): return [GuestLogView(l).data() for l in self.guest_logs] def convert_instance_count_to_list(instance_count): instance_list = [] for row in instance_count: (_, name, id, md5, count, current, min_date, max_date) = row instance_list.append( {'module_name': name, 'module_id': id, 'module_md5': md5, 'instance_count': count, 'current': current, 'min_updated_date': min_date, 'max_updated_date': max_date }) return instance_list ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/limits/0000755000175000017500000000000000000000000016607 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/limits/__init__.py0000644000175000017500000000000000000000000020706 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/limits/service.py0000644000175000017500000000266300000000000020630 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import policy from trove.common import wsgi from trove.limits import views from trove.quota.quota import QUOTAS class LimitsController(wsgi.Controller): """ Controller for accessing limits in the OpenStack API. """ def index(self, req, tenant_id): """ Return all absolute and rate limit information. """ context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'limits:index') quotas = QUOTAS.get_all_quotas_by_tenant(tenant_id) abs_limits = {k: v['hard_limit'] for k, v in quotas.items()} rate_limits = req.environ.get("trove.limits", []) return wsgi.Result(views.LimitViews(abs_limits, rate_limits).data(), 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/limits/views.py0000644000175000017500000000355000000000000020321 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from trove.common import timeutils class LimitView(object): def __init__(self, rate_limit): self.rate_limit = rate_limit def data(self): get_utc = datetime.datetime.utcfromtimestamp next_avail = get_utc(self.rate_limit.get("resetTime", 0)) return {"limit": { "nextAvailable": timeutils.isotime(next_avail), "remaining": self.rate_limit.get("remaining", 0), "unit": self.rate_limit.get("unit", ""), "value": self.rate_limit.get("value", ""), "verb": self.rate_limit.get("verb", ""), "uri": self.rate_limit.get("URI", ""), "regex": self.rate_limit.get("regex", "") } } class LimitViews(object): def __init__(self, abs_limits, rate_limits): self.abs_limits = abs_limits self.rate_limits = rate_limits def data(self): data = [] abs_view = dict() abs_view["verb"] = "ABSOLUTE" for resource_name, abs_limit in self.abs_limits.items(): abs_view["max_" + resource_name] = abs_limit data.append(abs_view) for l in self.rate_limits: data.append(LimitView(l).data()["limit"]) return {"limits": data} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/module/0000755000175000017500000000000000000000000016573 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/module/__init__.py0000644000175000017500000000000000000000000020672 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/module/models.py0000644000175000017500000004472100000000000020440 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Model classes that form the core of Module functionality.""" import hashlib import six from sqlalchemy.sql.expression import or_ from oslo_log import log as logging from trove.common import cfg from trove.common import crypto_utils from trove.common import exception from trove.common.i18n import _ from trove.common import timeutils from trove.common import utils from trove.datastore import models as datastore_models from trove.db import models from trove.taskmanager import api as task_api CONF = cfg.CONF LOG = logging.getLogger(__name__) class Modules(object): DEFAULT_LIMIT = CONF.modules_page_size ENCRYPT_KEY = CONF.module_aes_cbc_key VALID_MODULE_TYPES = [mt.lower() for mt in CONF.module_types] MATCH_ALL_NAME = 'all' @staticmethod def load(context, datastore=None): if context is None: raise TypeError(_("Argument context not defined.")) elif id is None: raise TypeError(_("Argument is not defined.")) query_opts = {'deleted': False} if datastore: if datastore.lower() == Modules.MATCH_ALL_NAME: datastore = None query_opts['datastore_id'] = datastore if context.is_admin: db_info = DBModule.find_all(**query_opts) if db_info.count() == 0: LOG.debug("No modules found for admin user") else: # build a query manually, since we need current tenant # plus the 'all' tenant ones query_opts['visible'] = True db_info = DBModule.query().filter_by(**query_opts) db_info = db_info.filter( or_(DBModule.tenant_id == context.project_id, DBModule.tenant_id.is_(None)) ) if db_info.count() == 0: LOG.debug("No modules found for tenant %s", context.project_id) modules = db_info.all() return modules @staticmethod def load_auto_apply(context, datastore_id, datastore_version_id): """Return all the auto-apply modules for the given criteria.""" if context is None: raise TypeError(_("Argument context not defined.")) elif id is None: raise TypeError(_("Argument is not defined.")) query_opts = {'deleted': False, 'auto_apply': True} db_info = DBModule.query().filter_by(**query_opts) db_info = Modules.add_tenant_filter(db_info, context.project_id) db_info = Modules.add_datastore_filter(db_info, datastore_id) db_info = Modules.add_ds_version_filter(db_info, datastore_version_id) if db_info.count() == 0: LOG.debug("No auto-apply modules found for tenant %s", context.project_id) modules = db_info.all() return modules @staticmethod def add_tenant_filter(query, tenant_id): return query.filter(or_(DBModule.tenant_id == tenant_id, DBModule.tenant_id.is_(None))) @staticmethod def add_datastore_filter(query, datastore_id): return query.filter(or_(DBModule.datastore_id == datastore_id, DBModule.datastore_id.is_(None))) @staticmethod def add_ds_version_filter(query, datastore_version_id): return query.filter(or_( DBModule.datastore_version_id == datastore_version_id, DBModule.datastore_version_id.is_(None))) @staticmethod def load_by_ids(context, module_ids): """Return all the modules for the given ids. Screens out the ones for other tenants, unless the user is admin. """ if context is None: raise TypeError(_("Argument context not defined.")) elif id is None: raise TypeError(_("Argument is not defined.")) modules = [] if module_ids: query_opts = {'deleted': False} db_info = DBModule.query().filter_by(**query_opts) if not context.is_admin: db_info = Modules.add_tenant_filter(db_info, context.project_id) db_info = db_info.filter(DBModule.id.in_(module_ids)) modules = db_info.all() return modules @staticmethod def validate(modules, datastore_id, datastore_version_id): for module in modules: if (module.datastore_id and module.datastore_id != datastore_id): reason = (_("Module '%(mod)s' cannot be applied " " (Wrong datastore '%(ds)s' - expected '%(ds2)s')") % {'mod': module.name, 'ds': module.datastore_id, 'ds2': datastore_id}) raise exception.ModuleInvalid(reason=reason) if (module.datastore_version_id and module.datastore_version_id != datastore_version_id): reason = (_("Module '%(mod)s' cannot be applied " " (Wrong datastore version '%(ver)s' " "- expected '%(ver2)s')") % {'mod': module.name, 'ver': module.datastore_version_id, 'ver2': datastore_version_id}) raise exception.ModuleInvalid(reason=reason) class Module(object): def __init__(self, context, module_id): self.context = context self.module_id = module_id @staticmethod def create(context, name, module_type, contents, description, tenant_id, datastore, datastore_version, auto_apply, visible, live_update, priority_apply, apply_order, full_access): if module_type.lower() not in Modules.VALID_MODULE_TYPES: LOG.error("Valid module types: %s", Modules.VALID_MODULE_TYPES) raise exception.ModuleTypeNotFound(module_type=module_type) Module.validate_action( context, 'create', tenant_id, auto_apply, visible, priority_apply, full_access) datastore_id, datastore_version_id = ( datastore_models.get_datastore_or_version( datastore, datastore_version)) if Module.key_exists( name, module_type, tenant_id, datastore_id, datastore_version_id): datastore_str = datastore_id or Modules.MATCH_ALL_NAME ds_version_str = datastore_version_id or Modules.MATCH_ALL_NAME raise exception.ModuleAlreadyExists( name=name, datastore=datastore_str, ds_version=ds_version_str) md5, processed_contents = Module.process_contents(contents) is_admin = context.is_admin if full_access: is_admin = 0 module = DBModule.create( name=name, type=module_type.lower(), contents=processed_contents, description=description, tenant_id=tenant_id, datastore_id=datastore_id, datastore_version_id=datastore_version_id, auto_apply=auto_apply, visible=visible, live_update=live_update, priority_apply=priority_apply, apply_order=apply_order, is_admin=is_admin, md5=md5) return module # Certain fields require admin access to create/change/delete @staticmethod def validate_action(context, action_str, tenant_id, auto_apply, visible, priority_apply, full_access): admin_options_str = None option_strs = [] if tenant_id is None: option_strs.append(_("Tenant: %s") % Modules.MATCH_ALL_NAME) if auto_apply: option_strs.append(_("Auto: %s") % auto_apply) if not visible: option_strs.append(_("Visible: %s") % visible) if priority_apply: option_strs.append(_("Priority: %s") % priority_apply) if full_access is not None: if full_access and option_strs: admin_options_str = "(" + ", ".join(option_strs) + ")" raise exception.InvalidModelError( errors=_('Cannot make module full access: %s') % admin_options_str) option_strs.append(_("Full Access: %s") % full_access) if option_strs: admin_options_str = "(" + ", ".join(option_strs) + ")" if not context.is_admin and admin_options_str: raise exception.ModuleAccessForbidden( action=action_str, options=admin_options_str) return admin_options_str @staticmethod def key_exists(name, module_type, tenant_id, datastore_id, datastore_version_id): try: DBModule.find_by( name=name, type=module_type, tenant_id=tenant_id, datastore_id=datastore_id, datastore_version_id=datastore_version_id, deleted=False) return True except exception.ModelNotFoundError: return False # We encrypt the contents (which should be encoded already, since it # might be in binary format) and then encode them again so they can # be stored in a text field in the Trove database. @staticmethod def process_contents(contents): md5 = contents if isinstance(md5, six.text_type): md5 = md5.encode('utf-8') md5 = hashlib.md5(md5).hexdigest() encrypted_contents = crypto_utils.encrypt_data( contents, Modules.ENCRYPT_KEY) return md5, crypto_utils.encode_data(encrypted_contents) # Do the reverse to 'deprocess' the contents @staticmethod def deprocess_contents(processed_contents): encrypted_contents = crypto_utils.decode_data(processed_contents) return crypto_utils.decrypt_data( encrypted_contents, Modules.ENCRYPT_KEY) @staticmethod def delete(context, module): Module.validate_action( context, 'delete', module.tenant_id, module.auto_apply, module.visible, module.priority_apply, None) Module.enforce_live_update(module.id, module.live_update, module.md5) module.deleted = True module.deleted_at = timeutils.utcnow() module.save() @staticmethod def enforce_live_update(module_id, live_update, md5): if not live_update: instances = DBInstanceModule.find_all( module_id=module_id, md5=md5, deleted=False).all() if instances: raise exception.ModuleAppliedToInstance() @staticmethod def load(context, module_id): module = None try: if context.is_admin: module = DBModule.find_by(id=module_id, deleted=False) else: module = DBModule.find_by( id=module_id, tenant_id=context.project_id, visible=True, deleted=False) except exception.ModelNotFoundError: # See if we have the module in the 'all' tenant section if not context.is_admin: try: module = DBModule.find_by( id=module_id, tenant_id=None, visible=True, deleted=False) except exception.ModelNotFoundError: pass # fall through to the raise below if not module: msg = _("Module with ID %s could not be found.") % module_id raise exception.ModelNotFoundError(msg) # Save the encrypted contents in case we need to put it back # when updating the record module.encrypted_contents = module.contents module.contents = Module.deprocess_contents(module.contents) return module @staticmethod def update(context, module, original_module, full_access): Module.enforce_live_update( original_module.id, original_module.live_update, original_module.md5) # we don't allow any changes to 'is_admin' modules by non-admin if original_module.is_admin and not context.is_admin: raise exception.ModuleAccessForbidden( action='update', options='(Module is an admin module)') # we don't allow any changes to admin-only attributes by non-admin admin_options = Module.validate_action( context, 'update', module.tenant_id, module.auto_apply, module.visible, module.priority_apply, full_access) # make sure we set the is_admin flag, but only if it was # originally is_admin or we changed an admin option module.is_admin = original_module.is_admin or ( 1 if admin_options else 0) # but we turn it on/off if full_access is specified if full_access is not None: module.is_admin = 0 if full_access else 1 ds_id, ds_ver_id = datastore_models.get_datastore_or_version( module.datastore_id, module.datastore_version_id) if module.contents != original_module.contents: md5, processed_contents = Module.process_contents(module.contents) module.md5 = md5 module.contents = processed_contents elif hasattr(original_module, 'encrypted_contents'): # on load the contents may have been decrypted, so # we need to put the encrypted contents back before we update module.contents = original_module.encrypted_contents if module.datastore_id: module.datastore_id = ds_id if module.datastore_version_id: module.datastore_version_id = ds_ver_id module.updated = timeutils.utcnow() DBModule.save(module) @staticmethod def reapply(context, id, md5, include_clustered, batch_size, batch_delay, force): task_api.API(context).reapply_module( id, md5, include_clustered, batch_size, batch_delay, force) class InstanceModules(object): @staticmethod def load(context, instance_id=None, module_id=None, md5=None): db_info = InstanceModules.load_all( context, instance_id=instance_id, module_id=module_id, md5=md5) if db_info.count() == 0: LOG.debug("No instance module records found") limit = utils.pagination_limit( context.limit, Modules.DEFAULT_LIMIT) data_view = DBInstanceModule.find_by_pagination( 'modules', db_info, 'foo', limit=limit, marker=context.marker) next_marker = data_view.next_page_marker return data_view.collection, next_marker @staticmethod def load_all(context, instance_id=None, module_id=None, md5=None): query_opts = {'deleted': False} if instance_id: query_opts['instance_id'] = instance_id if module_id: query_opts['module_id'] = module_id if md5: query_opts['md5'] = md5 return DBInstanceModule.find_all(**query_opts) class InstanceModule(object): def __init__(self, context, instance_id, module_id): self.context = context self.instance_id = instance_id self.module_id = module_id @staticmethod def create(context, instance_id, module_id, md5): instance_module = None # First mark any 'old' records as deleted and/or update the # current one. old_ims = InstanceModules.load_all( context, instance_id=instance_id, module_id=module_id) for old_im in old_ims: if old_im.md5 == md5 and not instance_module: instance_module = old_im InstanceModule.update(context, instance_module) else: if old_im.md5 == md5 and instance_module: LOG.debug("Found dupe IM record %(old_im)s; marking as " "deleted (instance %(instance_id)s, " "module %(module_id)s).", {'old_im': old_im.id, 'instance_id': instance_id, 'module_id': module_id}) else: LOG.debug("Deleting IM record %(old_im)s (instance " "%(instance_id)s, module %(module_id)s).", {'old_im': old_im.id, 'instance_id': instance_id, 'module_id': module_id}) InstanceModule.delete(context, old_im) # If we don't have an instance module, it means we need to create # a new one. if not instance_module: instance_module = DBInstanceModule.create( instance_id=instance_id, module_id=module_id, md5=md5) return instance_module @staticmethod def delete(context, instance_module): instance_module.deleted = True instance_module.deleted_at = timeutils.utcnow() instance_module.save() @staticmethod def load(context, instance_id, module_id, deleted=False): instance_module = None try: instance_module = DBInstanceModule.find_by( instance_id=instance_id, module_id=module_id, deleted=deleted) except exception.ModelNotFoundError: pass return instance_module @staticmethod def update(context, instance_module): instance_module.updated = timeutils.utcnow() DBInstanceModule.save(instance_module) class DBInstanceModule(models.DatabaseModelBase): _data_fields = [ 'instance_id', 'module_id', 'md5', 'created', 'updated', 'deleted', 'deleted_at'] _table_name = 'instance_modules' class DBModule(models.DatabaseModelBase): _data_fields = [ 'name', 'type', 'contents', 'description', 'tenant_id', 'datastore_id', 'datastore_version_id', 'auto_apply', 'visible', 'live_update', 'md5', 'created', 'updated', 'deleted', 'deleted_at', 'priority_apply', 'apply_order', 'is_admin'] _table_name = 'modules' def persisted_models(): return {'modules': DBModule, 'instance_modules': DBInstanceModule} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/module/service.py0000644000175000017500000002322500000000000020611 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from oslo_log import log as logging import trove.common.apischema as apischema from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import pagination from trove.common import policy from trove.common import wsgi from trove.datastore import models as datastore_models from trove.instance import models as instance_models from trove.instance import views as instance_views from trove.module import models from trove.module import views CONF = cfg.CONF LOG = logging.getLogger(__name__) class ModuleController(wsgi.Controller): schemas = apischema.module @classmethod def authorize_module_action(cls, context, module_rule_name, module): """If a module is not owned by any particular tenant just check that the current tenant is allowed to perform the action. """ if module.tenant_id is not None: policy.authorize_on_target(context, 'module:%s' % module_rule_name, {'tenant': module.tenant_id}) else: policy.authorize_on_tenant(context, 'module:%s' % module_rule_name) def index(self, req, tenant_id): context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'module:index') datastore = req.GET.get('datastore', '') if datastore and datastore.lower() != models.Modules.MATCH_ALL_NAME: ds, ds_ver = datastore_models.get_datastore_version( type=datastore) datastore = ds.id modules = models.Modules.load(context, datastore=datastore) view = views.ModulesView(modules) return wsgi.Result(view.data(), 200) def show(self, req, tenant_id, id): LOG.info("Showing module %s.", id) context = req.environ[wsgi.CONTEXT_KEY] module = models.Module.load(context, id) self.authorize_module_action(context, 'show', module) module.instance_count = len(models.InstanceModules.load( context, module_id=module.id, md5=module.md5)) return wsgi.Result( views.DetailedModuleView(module).data(), 200) def create(self, req, body, tenant_id): name = body['module']['name'] LOG.info("Creating module '%s'", name) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'module:create') module_type = body['module']['module_type'] contents = body['module']['contents'] description = body['module'].get('description') all_tenants = body['module'].get('all_tenants', 0) module_tenant_id = None if all_tenants else tenant_id datastore = body['module'].get('datastore', {}).get('type', None) ds_version = body['module'].get('datastore', {}).get('version', None) auto_apply = body['module'].get('auto_apply', 0) visible = body['module'].get('visible', 1) live_update = body['module'].get('live_update', 0) priority_apply = body['module'].get('priority_apply', 0) apply_order = body['module'].get('apply_order', 5) full_access = body['module'].get('full_access', None) module = models.Module.create( context, name, module_type, contents, description, module_tenant_id, datastore, ds_version, auto_apply, visible, live_update, priority_apply, apply_order, full_access) view_data = views.DetailedModuleView(module) return wsgi.Result(view_data.data(), 200) def delete(self, req, tenant_id, id): LOG.info("Deleting module %s.", id) context = req.environ[wsgi.CONTEXT_KEY] module = models.Module.load(context, id) self.authorize_module_action(context, 'delete', module) models.Module.delete(context, module) return wsgi.Result(None, 200) def update(self, req, body, tenant_id, id): LOG.info("Updating module %s.", id) context = req.environ[wsgi.CONTEXT_KEY] module = models.Module.load(context, id) self.authorize_module_action(context, 'update', module) original_module = copy.deepcopy(module) if 'name' in body['module']: module.name = body['module']['name'] if 'module_type' in body['module']: module.type = body['module']['module_type'] if 'contents' in body['module']: module.contents = body['module']['contents'] if 'description' in body['module']: module.description = body['module']['description'] if 'all_tenants' in body['module']: module.tenant_id = (None if body['module']['all_tenants'] else tenant_id) ds_changed = False ds_ver_changed = False if 'datastore' in body['module']: if 'type' in body['module']['datastore']: module.datastore_id = body['module']['datastore']['type'] ds_changed = True if 'version' in body['module']['datastore']: module.datastore_version_id = ( body['module']['datastore']['version']) ds_ver_changed = True if 'all_datastores' in body['module']: if ds_changed: raise exception.ModuleInvalid( reason=_('You cannot set a datastore and specify ' '--all_datastores')) module.datastore_id = None if 'all_datastore_versions' in body['module']: if ds_ver_changed: raise exception.ModuleInvalid( reason=_('You cannot set a datastore version and specify ' '--all_datastore_versions')) module.datastore_version_id = None if 'auto_apply' in body['module']: module.auto_apply = body['module']['auto_apply'] if 'visible' in body['module']: module.visible = body['module']['visible'] if 'live_update' in body['module']: module.live_update = body['module']['live_update'] if 'priority_apply' in body['module']: module.priority_apply = body['module']['priority_apply'] if 'apply_order' in body['module']: module.apply_order = body['module']['apply_order'] full_access = None if 'full_access' in body['module']: full_access = body['module']['full_access'] models.Module.update(context, module, original_module, full_access) view_data = views.DetailedModuleView(module) return wsgi.Result(view_data.data(), 200) def instances(self, req, tenant_id, id): LOG.info("Getting instances for module %s.", id) context = req.environ[wsgi.CONTEXT_KEY] module = models.Module.load(context, id) self.authorize_module_action(context, 'instances', module) count_only = req.GET.get('count_only', '').lower() == 'true' include_clustered = ( req.GET.get('include_clustered', '').lower() == 'true') if count_only: instance_count = instance_models.module_instance_count( context, id, include_clustered=include_clustered) result_list = { 'instances': instance_views.convert_instance_count_to_list(instance_count)} else: instance_modules, marker = models.InstanceModules.load( context, module_id=id) if instance_modules: instance_ids = [inst_mod.instance_id for inst_mod in instance_modules] instances, marker = instance_models.Instances.load( context, include_clustered, instance_ids=instance_ids) else: instances = [] marker = None view = instance_views.InstancesView(instances, req=req) result_list = pagination.SimplePaginatedDataView( req.url, 'instances', view, marker).data() return wsgi.Result(result_list, 200) def reapply(self, req, body, tenant_id, id): LOG.info("Reapplying module %s to all instances.", id) context = req.environ[wsgi.CONTEXT_KEY] md5 = None if 'md5' in body['reapply']: md5 = body['reapply']['md5'] include_clustered = None if 'include_clustered' in body['reapply']: include_clustered = body['reapply']['include_clustered'] if 'batch_size' in body['reapply']: batch_size = body['reapply']['batch_size'] else: batch_size = CONF.module_reapply_max_batch_size if 'batch_delay' in body['reapply']: batch_delay = body['reapply']['batch_delay'] else: batch_delay = CONF.module_reapply_min_batch_delay force = None if 'force' in body['reapply']: force = body['reapply']['force'] module = models.Module.load(context, id) self.authorize_module_action(context, 'reapply', module) models.Module.reapply(context, id, md5, include_clustered, batch_size, batch_delay, force) return wsgi.Result(None, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/module/views.py0000644000175000017500000001016200000000000020302 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.datastore import models as datastore_models from trove.module import models class ModuleView(object): def __init__(self, module): self.module = module def data(self): module_dict = dict( id=self.module.id, name=self.module.name, type=self.module.type, description=self.module.description, tenant_id=self.module.tenant_id, datastore_id=self.module.datastore_id, datastore_version_id=self.module.datastore_version_id, auto_apply=bool(self.module.auto_apply), priority_apply=bool(self.module.priority_apply), apply_order=self.module.apply_order, is_admin=bool(self.module.is_admin), md5=self.module.md5, visible=bool(self.module.visible), created=self.module.created, updated=self.module.updated) # add extra data to make results more legible if self.module.tenant_id: # This should be the tenant name, but until we figure out where # to get it from, use the tenant_id tenant = self.module.tenant_id else: tenant = models.Modules.MATCH_ALL_NAME module_dict["tenant"] = tenant datastore = self.module.datastore_id datastore_version = self.module.datastore_version_id if datastore: if datastore_version: ds, ds_ver = ( datastore_models.get_datastore_version( type=datastore, version=datastore_version)) datastore = ds.name datastore_version = ds_ver.name else: ds = datastore_models.Datastore.load(datastore) datastore = ds.name datastore_version = models.Modules.MATCH_ALL_NAME else: datastore = models.Modules.MATCH_ALL_NAME datastore_version = models.Modules.MATCH_ALL_NAME module_dict["datastore"] = datastore module_dict["datastore_version"] = datastore_version return {"module": module_dict} class ModulesView(object): def __init__(self, modules): self.modules = modules def data(self): data = [] for module in self.modules: data.append(self.data_for_module(module)) return {"modules": data} def data_for_module(self, module): view = ModuleView(module) return view.data()['module'] class DetailedModuleView(ModuleView): def __init__(self, module): super(DetailedModuleView, self).__init__(module) def data(self, include_contents=False): return_value = super(DetailedModuleView, self).data() module_dict = return_value["module"] module_dict["live_update"] = bool(self.module.live_update) if hasattr(self.module, 'instance_count'): module_dict["instance_count"] = self.module.instance_count if include_contents: if not hasattr(self.module, 'encrypted_contents'): self.module.encrypted_contents = self.module.contents self.module.contents = models.Module.deprocess_contents( self.module.contents) module_dict['contents'] = self.module.contents return {"module": module_dict} def convert_modules_to_list(modules): module_list = [] for module in modules: module_info = DetailedModuleView(module).data(include_contents=True) module_list.append(module_info) return module_list ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/network/0000755000175000017500000000000000000000000016777 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/network/__init__.py0000644000175000017500000000000000000000000021076 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/network/base.py0000644000175000017500000000230600000000000020264 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import six @six.add_metaclass(abc.ABCMeta) class NetworkDriver(object): """Base Network Driver class to abstract the network driver used.""" @abc.abstractmethod def get_sec_group_by_id(self, group_id): """ Returns security group with given group_id """ @abc.abstractmethod def delete_security_group(self, sec_group_id): """Deletes the security group by given ID.""" @abc.abstractmethod def delete_security_group_rule(self, sec_group_rule_id): """Deletes the rule by given ID.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/network/neutron.py0000644000175000017500000000425100000000000021045 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import exceptions as neutron_exceptions from oslo_log import log as logging from trove.common import clients from trove.common import exception from trove.network import base LOG = logging.getLogger(__name__) class NeutronDriver(base.NetworkDriver): def __init__(self, context, region_name): try: self.client = clients.create_neutron_client(context, region_name) except neutron_exceptions.NeutronClientException as e: raise exception.TroveError(str(e)) def get_sec_group_by_id(self, group_id): try: return self.client.show_security_group(security_group=group_id) except neutron_exceptions.NeutronClientException as e: LOG.exception('Failed to get remote security group') raise exception.TroveError(str(e)) def delete_security_group(self, sec_group_id): try: self.client.delete_security_group(security_group=sec_group_id) except neutron_exceptions.NeutronClientException as e: LOG.exception('Failed to delete remote security group') raise exception.SecurityGroupDeletionError(str(e)) def delete_security_group_rule(self, sec_group_rule_id): try: self.client.delete_security_group_rule( security_group_rule=sec_group_rule_id) except neutron_exceptions.NeutronClientException as e: LOG.exception('Failed to delete rule to remote security group') raise exception.SecurityGroupRuleDeletionError(str(e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/network/nova.py0000644000175000017500000000410200000000000020311 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.common import clients from trove.common import exception from trove.network import base LOG = logging.getLogger(__name__) class NovaNetwork(base.NetworkDriver): def __init__(self, context, region_name): try: self.client = clients.create_nova_client( context, region_name) except nova_exceptions.ClientException as e: raise exception.TroveError(str(e)) def get_sec_group_by_id(self, group_id): try: return self.client.security_groups.get(group_id) except nova_exceptions.ClientException as e: LOG.exception('Failed to get remote security group') raise exception.TroveError(str(e)) def delete_security_group(self, sec_group_id): try: self.client.security_groups.delete(sec_group_id) except nova_exceptions.ClientException as e: LOG.exception('Failed to delete remote security group') raise exception.SecurityGroupDeletionError(str(e)) def delete_security_group_rule(self, sec_group_rule_id): try: self.client.security_group_rules.delete(sec_group_rule_id) except nova_exceptions.ClientException as e: LOG.exception('Failed to delete rule to remote security group') raise exception.SecurityGroupRuleDeletionError(str(e)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/quota/0000755000175000017500000000000000000000000016437 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/quota/__init__.py0000644000175000017500000000000000000000000020536 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/quota/models.py0000644000175000017500000000606700000000000020305 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.common import timeutils from trove.common import utils from trove.db import models as dbmodels CONF = cfg.CONF def enum(**enums): return type('Enum', (), enums) class Quota(dbmodels.DatabaseModelBase): """Defines the base model class for a quota.""" _data_fields = ['created', 'updated', 'tenant_id', 'resource', 'hard_limit'] _table_name = 'quotas' def __init__(self, tenant_id, resource, hard_limit, id=utils.generate_uuid(), created=timeutils.utcnow(), update=timeutils.utcnow()): self.tenant_id = tenant_id self.resource = resource self.hard_limit = hard_limit self.id = id self.created = created self.update = update class QuotaUsage(dbmodels.DatabaseModelBase): """Defines the quota usage for a tenant.""" _data_fields = ['created', 'updated', 'tenant_id', 'in_use', 'reserved', 'resource'] _table_name = 'quota_usages' class Reservation(dbmodels.DatabaseModelBase): """Defines the reservation for a quota.""" _data_fields = ['created', 'updated', 'usage_id', 'delta', 'status'] _table_name = 'reservations' Statuses = enum(NEW='New', RESERVED='Reserved', COMMITTED='Committed', ROLLEDBACK='Rolled Back') def persisted_models(): return { 'quotas': Quota, 'quota_usages': QuotaUsage, 'reservations': Reservation, } class Resource(object): """Describe a single resource for quota checking.""" INSTANCES = 'instances' VOLUMES = 'volumes' BACKUPS = 'backups' def __init__(self, name, flag=None): """ Initializes a Resource. :param name: The name of the resource, i.e., "volumes". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ self.name = name self.flag = flag def __str__(self): return self.name def __hash__(self): return hash(self.name) def __eq__(self, other): return (isinstance(other, Resource) and self.name == other.name and self.flag == other.flag) @property def default(self): """Return the default value of the quota.""" return CONF[self.flag] if self.flag is not None else -1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/quota/quota.py0000644000175000017500000003213400000000000020145 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for DB instances and resources.""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils import six from trove.common import exception from trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.quota.models import Resource LOG = logging.getLogger(__name__) CONF = cfg.CONF class DbQuotaDriver(object): """ Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ def __init__(self, resources): self.resources = resources def get_quota_by_tenant(self, tenant_id, resource): """Get a specific quota by tenant.""" quotas = Quota.find_all(tenant_id=tenant_id, resource=resource).all() if len(quotas) == 0: return Quota(tenant_id, resource, self.resources[resource].default) return quotas[0] def get_all_quotas_by_tenant(self, tenant_id, resources): """ Retrieve the quotas for the given tenant. :param resources: A list of the registered resource to get. :param tenant_id: The ID of the tenant to return quotas for. """ all_quotas = Quota.find_all(tenant_id=tenant_id).all() result_quotas = {quota.resource: quota for quota in all_quotas if quota.resource in resources} if len(result_quotas) != len(resources): for resource in resources: # Not in the DB, return default value if resource not in result_quotas: quota = Quota(tenant_id, resource, self.resources[resource].default) result_quotas[resource] = quota return result_quotas def get_quota_usage_by_tenant(self, tenant_id, resource): """Get a specific quota usage by tenant.""" quotas = QuotaUsage.find_all(tenant_id=tenant_id, resource=resource).all() if len(quotas) == 0: return QuotaUsage.create(tenant_id=tenant_id, in_use=0, reserved=0, resource=resource) return quotas[0] def get_all_quota_usages_by_tenant(self, tenant_id, resources): """ Retrieve the quota usagess for the given tenant. :param tenant_id: The ID of the tenant to return quotas for. :param resources: A list of the registered resources to get. """ all_usages = QuotaUsage.find_all(tenant_id=tenant_id).all() result_usages = {usage.resource: usage for usage in all_usages if usage.resource in resources} if len(result_usages) != len(resources): for resource in resources: # Not in the DB, return default value if resource not in result_usages: usage = QuotaUsage.create(tenant_id=tenant_id, in_use=0, reserved=0, resource=resource) result_usages[resource] = usage return result_usages def get_defaults(self, resources): """Given a list of resources, retrieve the default quotas. :param resources: A list of the registered resources. """ quotas = {} for resource in resources.values(): quotas[resource.name] = resource.default return quotas def check_quotas(self, tenant_id, resources, deltas): """Check quotas for a tenant. This method checks quotas against current usage, reserved resources and the desired deltas. If any of the proposed values is over the defined quota, an QuotaExceeded exception will be raised with the sorted list of the resources which are too high. :param tenant_id: The ID of the tenant reserving the resources. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. """ unregistered_resources = [delta for delta in deltas if delta not in resources] if unregistered_resources: raise exception.QuotaResourceUnknown( unknown=unregistered_resources) quotas = self.get_all_quotas_by_tenant(tenant_id, deltas.keys()) quota_usages = self.get_all_quota_usages_by_tenant(tenant_id, deltas.keys()) overs = [resource for resource in deltas if (int(deltas[resource]) > 0 and quotas[resource].hard_limit >= 0 and (quota_usages[resource].in_use + quota_usages[resource].reserved + int(deltas[resource])) > quotas[resource].hard_limit)] if overs: raise exception.QuotaExceeded(overs=sorted(overs)) def reserve(self, tenant_id, resources, deltas): """Check quotas and reserve resources for a tenant. This method checks quotas against current usage, reserved resources and the desired deltas. If any of the proposed values is over the defined quota, an QuotaExceeded exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation objects which were created. :param tenant_id: The ID of the tenant reserving the resources. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. """ self.check_quotas(tenant_id, resources, deltas) quota_usages = self.get_all_quota_usages_by_tenant(tenant_id, deltas.keys()) reservations = [] for resource in sorted(deltas): reserved = deltas[resource] usage = quota_usages[resource] usage.reserved += reserved usage.save() resv = Reservation.create(usage_id=usage.id, delta=reserved, status=Reservation.Statuses.RESERVED) reservations.append(resv) return reservations def commit(self, reservations): """Commit reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ for reservation in reservations: usage = QuotaUsage.find_by(id=reservation.usage_id) usage.in_use += reservation.delta if usage.in_use < 0: usage.in_use = 0 usage.reserved -= reservation.delta reservation.status = Reservation.Statuses.COMMITTED usage.save() reservation.save() def rollback(self, reservations): """Roll back reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ for reservation in reservations: usage = QuotaUsage.find_by(id=reservation.usage_id) usage.reserved -= reservation.delta reservation.status = Reservation.Statuses.ROLLEDBACK usage.save() reservation.save() class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} if not quota_driver_class: quota_driver_class = CONF.quota_driver if isinstance(quota_driver_class, six.string_types): quota_driver_class = importutils.import_object(quota_driver_class, self._resources) self._driver = quota_driver_class def __contains__(self, resource): return resource in self._resources def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a dictionary of resources.""" for resource in resources: self.register_resource(resource) def get_quota_by_tenant(self, tenant_id, resource): """Get a specific quota by tenant.""" return self._driver.get_quota_by_tenant(tenant_id, resource) def get_quota_usage(self, quota): """Get the usage for a quota.""" return self._driver.get_quota_usage_by_tenant(quota.tenant_id, quota.resource) def get_defaults(self): """Retrieve the default quotas.""" return self._driver.get_defaults(self._resources) def get_all_quotas_by_tenant(self, tenant_id): """Retrieve the quotas for the given tenant. :param tenant_id: The ID of the tenant to return quotas for. """ return self._driver.get_all_quotas_by_tenant(tenant_id, self._resources) def get_all_quota_usages_by_tenant(self, tenant_id): """Retrieve the quota usages for the given tenant. :param tenant_id: The ID of the tenant to return quota usages for. """ return self._driver.get_all_quota_usages_by_tenant(tenant_id, self._resources) def check_quotas(self, tenant_id, **deltas): self._driver.check_quotas(tenant_id, self._resources, deltas) def reserve(self, tenant_id, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an QuotaExceeded exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param tenant_id: The ID of the tenant to reserve quotas for. """ reservations = self._driver.reserve(tenant_id, self._resources, deltas) LOG.debug("Created reservations %(reservations)s", {'reservations': reservations}) return reservations def commit(self, reservations): """Commit reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ try: self._driver.commit(reservations) except Exception: LOG.exception("Failed to commit reservations " "%(reservations)s", {'reservations': reservations}) def rollback(self, reservations): """Roll back reservations. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ try: self._driver.rollback(reservations) except Exception: LOG.exception("Failed to roll back reservations " "%(reservations)s", {'reservations': reservations}) @property def resources(self): return sorted(self._resources.keys()) QUOTAS = QuotaEngine() ''' Define all kind of resources here ''' resources = [Resource(Resource.INSTANCES, 'max_instances_per_tenant'), Resource(Resource.BACKUPS, 'max_backups_per_tenant'), Resource(Resource.VOLUMES, 'max_volumes_per_tenant')] QUOTAS.register_resources(resources) def run_with_quotas(tenant_id, deltas, f): """Quota wrapper.""" reservations = QUOTAS.reserve(tenant_id, **deltas) result = None try: result = f() except Exception: QUOTAS.rollback(reservations) raise else: QUOTAS.commit(reservations) return result def check_quotas(tenant_id, deltas): QUOTAS.check_quotas(tenant_id, **deltas) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/rpc.py0000644000175000017500000001016600000000000016450 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(esp): This code was taken from nova __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'get_client', 'get_server', 'get_notifier', ] from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher import trove.common.exception from trove.common.rpc import secure_serializer as ssz from trove.common.rpc import serializer as sz CONF = cfg.CONF TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ trove.common.exception.__name__, ] EXTRA_EXMODS = [] def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport(conf, allowed_remote_exmods=exmods) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods) serializer = sz.TroveRequestContextSerializer( messaging.JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFICATION_TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str) def get_client(target, key, version_cap=None, serializer=None, secure_serializer=ssz.SecureSerializer): assert TRANSPORT is not None # BUG(1650518): Cleanup in the Pike release # uncomment this (following) line in the pike release # assert key is not None serializer = secure_serializer( sz.TroveRequestContextSerializer(serializer), key) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, key, serializer=None, secure_serializer=ssz.SecureSerializer): assert TRANSPORT is not None # Thread module is not monkeypatched if remote debugging is enabled. # Using eventlet executor without monkepatching thread module will # lead to unpredictable results. from trove.common import debug_utils debug_utils.setup() executor = "blocking" if debug_utils.enabled() else "eventlet" # BUG(1650518): Cleanup in the Pike release # uncomment this (following) line in the pike release # assert key is not None serializer = secure_serializer( sz.TroveRequestContextSerializer(serializer), key) return messaging.get_rpc_server( TRANSPORT, target, endpoints, executor=executor, serializer=serializer, access_policy=dispatcher.DefaultRPCAccessPolicy) def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/taskmanager/0000755000175000017500000000000000000000000017603 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/taskmanager/__init__.py0000644000175000017500000000000000000000000021702 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/taskmanager/api.py0000644000175000017500000002565200000000000020740 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routes all the requests to the task manager. """ from oslo_log import log as logging import oslo_messaging as messaging from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common.notification import NotificationCastWrapper from trove.common.strategies.cluster import strategy from trove.guestagent import models as agent_models from trove import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class API(object): """API for interacting with the task manager. API version history: * 1.0 - Initial version. When updating this API, also update API_LATEST_VERSION """ # API_LATEST_VERSION should bump the minor number each time # a method signature is added or changed API_LATEST_VERSION = '1.0' # API_BASE_VERSION should only change on major version upgrade API_BASE_VERSION = '1.0' VERSION_ALIASES = { 'icehouse': '1.0', 'juno': '1.0', 'kilo': '1.0', 'liberty': '1.0', 'mitaka': '1.0', 'newton': '1.0', 'latest': API_LATEST_VERSION } def __init__(self, context): self.context = context super(API, self).__init__() version_cap = self.VERSION_ALIASES.get( CONF.upgrade_levels.taskmanager, CONF.upgrade_levels.taskmanager) target = messaging.Target(topic=CONF.taskmanager_queue, version=version_cap) self.client = self.get_client(target, version_cap) def _cast(self, method_name, version, **kwargs): LOG.debug("Casting %s", method_name) with NotificationCastWrapper(self.context, 'taskmanager'): cctxt = self.client.prepare(version=version) cctxt.cast(self.context, method_name, **kwargs) def get_client(self, target, version_cap, serializer=None): if CONF.enable_secure_rpc_messaging: key = CONF.taskmanager_rpc_encr_key else: key = None return rpc.get_client(target, key=key, version_cap=version_cap, serializer=serializer) def _transform_obj(self, obj_ref): # Turn the object into a dictionary and remove the mgr if "__dict__" in dir(obj_ref): obj_dict = obj_ref.__dict__ # We assume manager contains a object due to the *clients if obj_dict.get('manager'): del obj_dict['manager'] return obj_dict raise ValueError(_("Could not transform %s") % obj_ref) def _delete_heartbeat(self, instance_id): agent_heart_beat = agent_models.AgentHeartBeat() try: heartbeat = agent_heart_beat.find_by_instance_id(instance_id) heartbeat.delete() except exception.ModelNotFoundError as e: LOG.error(e.message) def resize_volume(self, new_size, instance_id): LOG.debug("Making async call to resize volume for instance: %s", instance_id) version = self.API_BASE_VERSION self._cast("resize_volume", version=version, new_size=new_size, instance_id=instance_id) def resize_flavor(self, instance_id, old_flavor, new_flavor): LOG.debug("Making async call to resize flavor for instance: %s", instance_id) version = self.API_BASE_VERSION self._cast("resize_flavor", version=version, instance_id=instance_id, old_flavor=self._transform_obj(old_flavor), new_flavor=self._transform_obj(new_flavor)) def reboot(self, instance_id): LOG.debug("Making async call to reboot instance: %s", instance_id) version = self.API_BASE_VERSION self._cast("reboot", version=version, instance_id=instance_id) def restart(self, instance_id): LOG.debug("Making async call to restart instance: %s", instance_id) version = self.API_BASE_VERSION self._cast("restart", version=version, instance_id=instance_id) def detach_replica(self, instance_id): LOG.debug("Making async call to detach replica: %s", instance_id) version = self.API_BASE_VERSION self._cast("detach_replica", version=version, instance_id=instance_id) def promote_to_replica_source(self, instance_id): LOG.debug("Making async call to promote replica to source: %s", instance_id) version = self.API_BASE_VERSION self._cast("promote_to_replica_source", version=version, instance_id=instance_id) def eject_replica_source(self, instance_id): LOG.debug("Making async call to eject replica source: %s", instance_id) version = self.API_BASE_VERSION self._cast("eject_replica_source", version=version, instance_id=instance_id) def migrate(self, instance_id, host): LOG.debug("Making async call to migrate instance: %s", instance_id) version = self.API_BASE_VERSION self._cast("migrate", version=version, instance_id=instance_id, host=host) def delete_instance(self, instance_id): LOG.debug("Making async call to delete instance: %s", instance_id) version = self.API_BASE_VERSION self._cast("delete_instance", version=version, instance_id=instance_id) def create_backup(self, backup_info, instance_id): LOG.debug("Making async call to create a backup for instance: %s", instance_id) version = self.API_BASE_VERSION self._cast("create_backup", version=version, backup_info=backup_info, instance_id=instance_id) def delete_backup(self, backup_id): LOG.debug("Making async call to delete backup: %s", backup_id) version = self.API_BASE_VERSION self._cast("delete_backup", version=version, backup_id=backup_id) def create_instance(self, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id=None, availability_zone=None, root_password=None, nics=None, overrides=None, slave_of_id=None, cluster_config=None, volume_type=None, modules=None, locality=None, access=None): LOG.debug("Making async call to create instance %s ", instance_id) version = self.API_BASE_VERSION self._cast("create_instance", version=version, instance_id=instance_id, name=name, flavor=self._transform_obj(flavor), image_id=image_id, databases=databases, users=users, datastore_manager=datastore_manager, packages=packages, volume_size=volume_size, backup_id=backup_id, availability_zone=availability_zone, root_password=root_password, nics=nics, overrides=overrides, slave_of_id=slave_of_id, cluster_config=cluster_config, volume_type=volume_type, modules=modules, locality=locality, access=access) def create_cluster(self, cluster_id): LOG.debug("Making async call to create cluster %s ", cluster_id) version = self.API_BASE_VERSION self._cast("create_cluster", version=version, cluster_id=cluster_id) def grow_cluster(self, cluster_id, new_instance_ids): LOG.debug("Making async call to grow cluster %s ", cluster_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "grow_cluster", cluster_id=cluster_id, new_instance_ids=new_instance_ids) def shrink_cluster(self, cluster_id, instance_ids): LOG.debug("Making async call to shrink cluster %s ", cluster_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "shrink_cluster", cluster_id=cluster_id, instance_ids=instance_ids) def delete_cluster(self, cluster_id): LOG.debug("Making async call to delete cluster %s ", cluster_id) version = self.API_BASE_VERSION self._cast("delete_cluster", version=version, cluster_id=cluster_id) def upgrade(self, instance_id, datastore_version_id): LOG.debug("Making async call to upgrade guest to datastore " "version %s ", datastore_version_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "upgrade", instance_id=instance_id, datastore_version_id=datastore_version_id) def restart_cluster(self, cluster_id): LOG.debug("Making async call to restart cluster %s ", cluster_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "restart_cluster", cluster_id=cluster_id) def upgrade_cluster(self, cluster_id, datastore_version_id): LOG.debug("Making async call to upgrade guest to datastore " "version %s ", datastore_version_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "upgrade_cluster", cluster_id=cluster_id, datastore_version_id=datastore_version_id) def reapply_module(self, module_id, md5, include_clustered, batch_size, batch_delay, force): LOG.debug("Making async call to reapply module %s", module_id) version = self.API_BASE_VERSION cctxt = self.client.prepare(version=version) cctxt.cast(self.context, "reapply_module", module_id=module_id, md5=md5, include_clustered=include_clustered, batch_size=batch_size, batch_delay=batch_delay, force=force) def load(context, manager=None): if manager: task_manager_api_class = (strategy.load_taskmanager_strategy(manager) .task_manager_api_class) else: task_manager_api_class = API return task_manager_api_class(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/taskmanager/manager.py0000644000175000017500000006101100000000000021566 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_service import periodic_task from oslo_utils import importutils from trove.backup.models import Backup import trove.common.cfg as cfg from trove.common import clients from trove.common.context import TroveContext from trove.common import exception from trove.common.exception import ReplicationSlaveAttachError from trove.common.exception import TroveError from trove.common.i18n import _ from trove.common.notification import DBaaSQuotas, EndNotification from trove.common import server_group as srv_grp from trove.common.strategies.cluster import strategy from trove.datastore.models import DatastoreVersion import trove.extensions.mgmt.instances.models as mgmtmodels from trove.instance.tasks import InstanceTasks from trove.taskmanager import models from trove.taskmanager.models import FreshInstanceTasks, BuiltInstanceTasks from trove.quota.quota import QUOTAS LOG = logging.getLogger(__name__) CONF = cfg.CONF class Manager(periodic_task.PeriodicTasks): def __init__(self): super(Manager, self).__init__(CONF) self.admin_context = TroveContext( user=CONF.service_credentials.username, tenant=CONF.service_credentials.project_id, user_domain_name=CONF.service_credentials.user_domain_name) if CONF.exists_notification_transformer: self.exists_transformer = importutils.import_object( CONF.exists_notification_transformer, context=self.admin_context) def resize_volume(self, context, instance_id, new_size): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.resize_volume(new_size) def resize_flavor(self, context, instance_id, old_flavor, new_flavor): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.resize_flavor(old_flavor, new_flavor) def reboot(self, context, instance_id): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.reboot() def restart(self, context, instance_id): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.restart() def detach_replica(self, context, instance_id): with EndNotification(context): slave = models.BuiltInstanceTasks.load(context, instance_id) master_id = slave.slave_of_id master = models.BuiltInstanceTasks.load(context, master_id) slave.detach_replica(master) def _set_task_status(self, instances, status): for instance in instances: setattr(instance.db_info, 'task_status', status) instance.db_info.save() def promote_to_replica_source(self, context, instance_id): # TODO(atomic77) Promote and eject need to be able to handle the case # where a datastore like Postgresql needs to treat the slave to be # promoted differently from the old master and the slaves which will # be simply reassigned to a new master. See: # https://bugs.launchpad.net/trove/+bug/1553339 def _promote_to_replica_source(old_master, master_candidate, replica_models): # First, we transition from the old master to new as quickly as # possible to minimize the scope of unrecoverable error # NOTE(zhaochao): we cannot reattach the old master to the new # one immediately after the new master is up, because for MariaDB # the other replicas are still connecting to the old master, and # during reattaching the old master as a slave, new GTID may be # created and synced to the replicas. After that, when attaching # the replicas to the new master, 'START SLAVE' will fail by # 'fatal error 1236' if the binlog of the replica diverged from # the new master. So the proper order should be: # -1. make the old master read only (and detach floating ips) # -2. make sure the new master is up-to-date # -3. detach the new master from the old one # -4. enable the new master (and attach floating ips) # -5. attach the other replicas to the new master # -6. attach the old master to the new one # (and attach floating ips) # -7. demote the old master # What we changed here is the order of the 6th step, previously # this step took place right after step 4, which causes failures # with MariaDB replications. old_master.make_read_only(True) master_ips = old_master.detach_public_ips() slave_ips = master_candidate.detach_public_ips() latest_txn_id = old_master.get_latest_txn_id() master_candidate.wait_for_txn(latest_txn_id) master_candidate.detach_replica(old_master, for_failover=True) master_candidate.enable_as_master() master_candidate.attach_public_ips(master_ips) master_candidate.make_read_only(False) # At this point, should something go wrong, there # should be a working master with some number of working slaves, # and possibly some number of "orphaned" slaves exception_replicas = [] error_messages = "" for replica in replica_models: try: if replica.id != master_candidate.id: replica.detach_replica(old_master, for_failover=True) replica.attach_replica(master_candidate) except exception.TroveError as ex: log_fmt = ("Unable to migrate replica %(slave)s from " "old replica source %(old_master)s to " "new source %(new_master)s on promote.") exc_fmt = _("Unable to migrate replica %(slave)s from " "old replica source %(old_master)s to " "new source %(new_master)s on promote.") msg_content = { "slave": replica.id, "old_master": old_master.id, "new_master": master_candidate.id} LOG.exception(log_fmt, msg_content) exception_replicas.append(replica) error_messages += "%s (%s)\n" % ( exc_fmt % msg_content, ex) # dealing with the old master after all the other replicas # has been migrated. old_master.attach_replica(master_candidate) old_master.attach_public_ips(slave_ips) try: old_master.demote_replication_master() except Exception as ex: log_fmt = "Exception demoting old replica source %s." exc_fmt = _("Exception demoting old replica source %s.") LOG.exception(log_fmt, old_master.id) exception_replicas.append(old_master) error_messages += "%s (%s)\n" % ( exc_fmt % old_master.id, ex) self._set_task_status([old_master] + replica_models, InstanceTasks.NONE) if exception_replicas: self._set_task_status(exception_replicas, InstanceTasks.PROMOTION_ERROR) msg = (_("promote-to-replica-source %(id)s: The following " "replicas may not have been switched: %(replicas)s:" "\n%(err)s") % {"id": master_candidate.id, "replicas": [repl.id for repl in exception_replicas], "err": error_messages}) raise ReplicationSlaveAttachError(msg) with EndNotification(context): master_candidate = BuiltInstanceTasks.load(context, instance_id) old_master = BuiltInstanceTasks.load(context, master_candidate.slave_of_id) replicas = [] for replica_dbinfo in old_master.slaves: if replica_dbinfo.id == instance_id: replica = master_candidate else: replica = BuiltInstanceTasks.load(context, replica_dbinfo.id) replicas.append(replica) try: _promote_to_replica_source(old_master, master_candidate, replicas) except ReplicationSlaveAttachError: raise except Exception: self._set_task_status([old_master] + replicas, InstanceTasks.PROMOTION_ERROR) raise # pulled out to facilitate testing def _get_replica_txns(self, replica_models): return [[repl] + repl.get_last_txn() for repl in replica_models] def _most_current_replica(self, old_master, replica_models): last_txns = self._get_replica_txns(replica_models) master_ids = [txn[1] for txn in last_txns if txn[1]] if len(set(master_ids)) > 1: raise TroveError(_("Replicas of %s not all replicating" " from same master") % old_master.id) return sorted(last_txns, key=lambda x: x[2], reverse=True)[0][0] def eject_replica_source(self, context, instance_id): def _eject_replica_source(old_master, replica_models): master_candidate = self._most_current_replica(old_master, replica_models) master_ips = old_master.detach_public_ips() slave_ips = master_candidate.detach_public_ips() master_candidate.detach_replica(old_master, for_failover=True) master_candidate.enable_as_master() master_candidate.attach_public_ips(master_ips) master_candidate.make_read_only(False) old_master.attach_public_ips(slave_ips) exception_replicas = [] error_messages = "" for replica in replica_models: try: if replica.id != master_candidate.id: replica.detach_replica(old_master, for_failover=True) replica.attach_replica(master_candidate) except exception.TroveError as ex: log_fmt = ("Unable to migrate replica %(slave)s from " "old replica source %(old_master)s to " "new source %(new_master)s on eject.") exc_fmt = _("Unable to migrate replica %(slave)s from " "old replica source %(old_master)s to " "new source %(new_master)s on eject.") msg_content = { "slave": replica.id, "old_master": old_master.id, "new_master": master_candidate.id} LOG.exception(log_fmt, msg_content) exception_replicas.append(replica) error_messages += "%s (%s)\n" % ( exc_fmt % msg_content, ex) self._set_task_status([old_master] + replica_models, InstanceTasks.NONE) if exception_replicas: self._set_task_status(exception_replicas, InstanceTasks.EJECTION_ERROR) msg = (_("eject-replica-source %(id)s: The following " "replicas may not have been switched: %(replicas)s:" "\n%(err)s") % {"id": master_candidate.id, "replicas": [repl.id for repl in exception_replicas], "err": error_messages}) raise ReplicationSlaveAttachError(msg) with EndNotification(context): master = BuiltInstanceTasks.load(context, instance_id) replicas = [BuiltInstanceTasks.load(context, dbinfo.id) for dbinfo in master.slaves] try: _eject_replica_source(master, replicas) except ReplicationSlaveAttachError: raise except Exception: self._set_task_status([master] + replicas, InstanceTasks.EJECTION_ERROR) raise def migrate(self, context, instance_id, host): with EndNotification(context): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.migrate(host) def delete_instance(self, context, instance_id): with EndNotification(context): try: instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.delete_async() except exception.UnprocessableEntity: instance_tasks = models.FreshInstanceTasks.load(context, instance_id) instance_tasks.delete_async() def delete_backup(self, context, backup_id): with EndNotification(context): models.BackupTasks.delete_backup(context, backup_id) def create_backup(self, context, backup_info, instance_id): with EndNotification(context, backup_id=backup_info['id']): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) instance_tasks.create_backup(backup_info) def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules): if type(instance_id) in [list]: ids = instance_id root_passwords = root_password else: ids = [instance_id] root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id replica_backup_created = False replicas = [] master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id) server_group = master_instance_tasks.server_group scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group) LOG.debug("Using scheduler hints %s for creating instance %s", scheduler_hints, instance_id) try: for replica_index in range(0, len(ids)): try: replica_number += 1 LOG.debug("Creating replica %(num)d of %(count)d.", {'num': replica_number, 'count': len(ids)}) instance_tasks = FreshInstanceTasks.load( context, ids[replica_index]) snapshot = instance_tasks.get_replication_master_snapshot( context, slave_of_id, flavor, replica_backup_id, replica_number=replica_number) replica_backup_id = snapshot['dataset']['snapshot_id'] replica_backup_created = (replica_backup_id is not None) instance_tasks.create_instance( flavor, image_id, databases, users, datastore_manager, packages, volume_size, replica_backup_id, availability_zone, root_passwords[replica_index], nics, overrides, None, snapshot, volume_type, modules, scheduler_hints) replicas.append(instance_tasks) except Exception: # if it's the first replica, then we shouldn't continue LOG.exception( "Could not create replica %(num)d of %(count)d.", {'num': replica_number, 'count': len(ids)}) if replica_number == 1: raise for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) finally: if replica_backup_created: Backup.delete(context, replica_backup_id) def _create_instance(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules, locality, access=None): if slave_of_id: self._create_replication_slave(context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules) else: if type(instance_id) in [list]: raise AttributeError(_( "Cannot create multiple non-replica instances.")) scheduler_hints = srv_grp.ServerGroup.build_scheduler_hint( context, locality, instance_id ) LOG.debug("Using scheduler hints %s for creating instance %s", scheduler_hints, instance_id) instance_tasks = FreshInstanceTasks.load(context, instance_id) instance_tasks.create_instance( flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, cluster_config, None, volume_type, modules, scheduler_hints, access=access ) timeout = (CONF.restore_usage_timeout if backup_id else CONF.usage_timeout) instance_tasks.wait_for_instance(timeout, flavor) def create_instance(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules, locality, access=None): with EndNotification(context, instance_id=(instance_id[0] if isinstance(instance_id, list) else instance_id)): self._create_instance(context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules, locality, access=access) def upgrade(self, context, instance_id, datastore_version_id): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) datastore_version = DatastoreVersion.load_by_uuid(datastore_version_id) with EndNotification(context): instance_tasks.upgrade(datastore_version) def create_cluster(self, context, cluster_id): with EndNotification(context, cluster_id=cluster_id): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.create_cluster(context, cluster_id) def grow_cluster(self, context, cluster_id, new_instance_ids): with EndNotification(context, cluster_id=cluster_id, instance_ids=new_instance_ids): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.grow_cluster(context, cluster_id, new_instance_ids) def shrink_cluster(self, context, cluster_id, instance_ids): with EndNotification(context, cluster_id=cluster_id, instance_ids=instance_ids): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.shrink_cluster(context, cluster_id, instance_ids) def restart_cluster(self, context, cluster_id): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.restart_cluster(context, cluster_id) def upgrade_cluster(self, context, cluster_id, datastore_version_id): datastore_version = DatastoreVersion.load_by_uuid(datastore_version_id) cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.upgrade_cluster(context, cluster_id, datastore_version) def delete_cluster(self, context, cluster_id): with EndNotification(context): cluster_tasks = models.load_cluster_tasks(context, cluster_id) cluster_tasks.delete_cluster(context, cluster_id) def reapply_module(self, context, module_id, md5, include_clustered, batch_size, batch_delay, force): models.ModuleTasks.reapply_module( context, module_id, md5, include_clustered, batch_size, batch_delay, force) if CONF.exists_notification_transformer: @periodic_task.periodic_task def publish_exists_event(self, context): """ Push this in Instance Tasks to fetch a report/collection :param context: currently None as specied in bin script """ mgmtmodels.publish_exist_events(self.exists_transformer, self.admin_context) if CONF.quota_notification_interval: @periodic_task.periodic_task(spacing=CONF.quota_notification_interval) def publish_quota_notifications(self, context): nova_client = clients.create_nova_client(self.admin_context) for tenant in nova_client.tenants.list(): for quota in QUOTAS.get_all_quotas_by_tenant(tenant.id): usage = QUOTAS.get_quota_usage(quota) DBaaSQuotas(self.admin_context, quota, usage).notify() def __getattr__(self, name): """ We should only get here if Python couldn't find a "real" method. """ def raise_error(msg): raise AttributeError(msg) manager, sep, method = name.partition('_') if not manager: raise_error('Cannot derive manager from attribute name "%s"' % name) task_strategy = strategy.load_taskmanager_strategy(manager) if not task_strategy: raise_error('No task manager strategy for manager "%s"' % manager) if method not in task_strategy.task_manager_manager_actions: raise_error('No method "%s" for task manager strategy for manager' ' "%s"' % (method, manager)) return task_strategy.task_manager_manager_actions.get(method) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/taskmanager/models.py0000755000175000017500000025043600000000000021455 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os.path import time import traceback from cinderclient import exceptions as cinder_exceptions from eventlet import greenthread from eventlet.timeout import Timeout from oslo_log import log as logging from oslo_utils import netutils from swiftclient.client import ClientException from trove.backup import models as bkup_models from trove.backup.models import Backup from trove.backup.models import DBBackup from trove.backup.state import BackupState from trove.cluster.models import Cluster from trove.cluster.models import DBCluster from trove.cluster import tasks from trove.common import cfg from trove.common import clients from trove.common.clients import create_cinder_client from trove.common.clients import create_dns_client from trove.common.clients import create_guest_client from trove.common import crypto_utils as cu from trove.common import exception from trove.common.exception import BackupCreationError from trove.common.exception import GuestError from trove.common.exception import GuestTimeout from trove.common.exception import InvalidModelError from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common.exception import VolumeCreationFailure from trove.common.i18n import _ from trove.common import instance as rd_instance from trove.common.instance import ServiceStatuses from trove.common import neutron from trove.common.notification import ( DBaaSInstanceRestart, DBaaSInstanceUpgrade, EndNotification, StartNotification, TroveInstanceCreate, TroveInstanceModifyVolume, TroveInstanceModifyFlavor) from trove.common.strategies.cluster import strategy from trove.common import template from trove.common import timeutils from trove.common import utils from trove.common.utils import try_recover from trove.extensions.mysql import models as mysql_models from trove.instance import models as inst_models from trove.instance.models import BuiltInstance from trove.instance.models import DBInstance from trove.instance.models import FreshInstance from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceStatus from trove.instance.tasks import InstanceTasks from trove.module import models as module_models from trove.module import views as module_views from trove.quota.quota import run_with_quotas from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF class NotifyMixin(object): """Notification Mixin This adds the ability to send usage events to an Instance object. """ def _get_service_id(self, datastore_manager, id_map): if datastore_manager in id_map: datastore_manager_id = id_map[datastore_manager] else: datastore_manager_id = cfg.UNKNOWN_SERVICE_ID LOG.error("Datastore ID for Manager (%s) is not configured", datastore_manager) return datastore_manager_id def send_usage_event(self, event_type, **kwargs): event_type = 'trove.instance.%s' % event_type publisher_id = CONF.host # Grab the instance size from the kwargs or from the nova client instance_size = kwargs.pop('instance_size', None) flavor = self.nova_client.flavors.get(self.flavor_id) server = kwargs.pop('server', None) if server is None: server = self.nova_client.servers.get(self.server_id) az = getattr(server, 'OS-EXT-AZ:availability_zone', None) # Default payload created_time = timeutils.isotime(self.db_info.created) payload = { 'availability_zone': az, 'created_at': created_time, 'name': self.name, 'instance_id': self.id, 'instance_name': self.name, 'instance_size': instance_size or flavor.ram, 'instance_type': flavor.name, 'instance_type_id': flavor.id, 'launched_at': created_time, 'nova_instance_id': self.server_id, 'region': CONF.region, 'state_description': self.status, 'state': self.status, 'tenant_id': self.tenant_id, 'user_id': self.context.user, } if CONF.get(self.datastore_version.manager).volume_support: payload.update({ 'volume_size': self.volume_size, 'nova_volume_id': self.volume_id }) payload['service_id'] = self._get_service_id( self.datastore_version.manager, CONF.notification_service_id) # Update payload with all other kwargs payload.update(kwargs) LOG.debug('Sending event: %(event_type)s, %(payload)s', {'event_type': event_type, 'payload': payload}) notifier = rpc.get_notifier( service="taskmanager", publisher_id=publisher_id) notifier.info(self.context, event_type, payload) class ConfigurationMixin(object): """Configuration Mixin Configuration related tasks for instances and resizes. """ def _render_config(self, flavor): config = template.SingleInstanceConfigTemplate( self.datastore_version, flavor, self.id) config.render() return config def _render_replica_source_config(self, flavor): config = template.ReplicaSourceConfigTemplate( self.datastore_version, flavor, self.id) config.render() return config def _render_replica_config(self, flavor): config = template.ReplicaConfigTemplate( self.datastore_version, flavor, self.id) config.render() return config def _render_config_dict(self, flavor): config = template.SingleInstanceConfigTemplate( self.datastore_version, flavor, self.id) ret = config.render_dict() LOG.debug("the default template dict of mysqld section: %s", ret) return ret class ClusterTasks(Cluster): def update_statuses_on_failure(self, cluster_id, shard_id=None, status=None): if CONF.update_status_on_fail: if shard_id: db_instances = DBInstance.find_all(cluster_id=cluster_id, shard_id=shard_id).all() else: db_instances = DBInstance.find_all( cluster_id=cluster_id).all() for db_instance in db_instances: db_instance.set_task_status( status or InstanceTasks.BUILDING_ERROR_SERVER) db_instance.save() @classmethod def get_ip(cls, instance): return instance.get_visible_ip_addresses()[0] def _all_instances_ready(self, instance_ids, cluster_id, shard_id=None): """Wait for all instances to get READY.""" return self._all_instances_acquire_status( instance_ids, cluster_id, shard_id, ServiceStatuses.INSTANCE_READY, fast_fail_statuses=[ServiceStatuses.FAILED, ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT]) def _all_instances_shutdown(self, instance_ids, cluster_id, shard_id=None): """Wait for all instances to go SHUTDOWN.""" return self._all_instances_acquire_status( instance_ids, cluster_id, shard_id, ServiceStatuses.SHUTDOWN, fast_fail_statuses=[ServiceStatuses.FAILED, ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT]) def _all_instances_running(self, instance_ids, cluster_id, shard_id=None): """Wait for all instances to become ACTIVE.""" return self._all_instances_acquire_status( instance_ids, cluster_id, shard_id, ServiceStatuses.RUNNING, fast_fail_statuses=[ServiceStatuses.FAILED, ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT]) def _all_instances_acquire_status( self, instance_ids, cluster_id, shard_id, expected_status, fast_fail_statuses=None): def _is_fast_fail_status(status): return ((fast_fail_statuses is not None) and ((status == fast_fail_statuses) or (status in fast_fail_statuses))) def _all_have_status(ids): for instance_id in ids: status = InstanceServiceStatus.find_by( instance_id=instance_id).get_status() task_status = DBInstance.find_by( id=instance_id).get_task_status() if (_is_fast_fail_status(status) or (task_status == InstanceTasks.BUILDING_ERROR_SERVER)): # if one has failed, no need to continue polling LOG.debug("Instance %(id)s has acquired a fast-fail " "status %(status)s and" " task_status %(task_status)s.", {'id': instance_id, 'status': status, 'task_status': task_status}) return True if status != expected_status: # if one is not in the expected state, continue polling LOG.debug("Instance %(id)s was %(status)s.", {'id': instance_id, 'status': status}) return False return True def _instance_ids_with_failures(ids): LOG.debug("Checking for service failures on instances: %s", ids) failed_instance_ids = [] for instance_id in ids: status = InstanceServiceStatus.find_by( instance_id=instance_id).get_status() task_status = DBInstance.find_by( id=instance_id).get_task_status() if (_is_fast_fail_status(status) or (task_status == InstanceTasks.BUILDING_ERROR_SERVER)): failed_instance_ids.append(instance_id) return failed_instance_ids LOG.debug("Polling until all instances acquire %(expected)s " "status: %(ids)s", {'expected': expected_status, 'ids': instance_ids}) try: utils.poll_until(lambda: instance_ids, lambda ids: _all_have_status(ids), sleep_time=CONF.usage_sleep_time, time_out=CONF.usage_timeout) except PollTimeOut: LOG.exception("Timed out while waiting for all instances " "to become %s.", expected_status) self.update_statuses_on_failure(cluster_id, shard_id) return False failed_ids = _instance_ids_with_failures(instance_ids) if failed_ids: LOG.error("Some instances failed: %s", failed_ids) self.update_statuses_on_failure(cluster_id, shard_id) return False LOG.debug("All instances have acquired the expected status %s.", expected_status) return True def delete_cluster(self, context, cluster_id): LOG.debug("begin delete_cluster for id: %s", cluster_id) def all_instances_marked_deleted(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() return len(db_instances) == 0 try: utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error("timeout for instances to be marked as deleted.") return LOG.debug("setting cluster %s as deleted.", cluster_id) cluster = DBCluster.find_by(id=cluster_id) cluster.deleted = True cluster.deleted_at = timeutils.utcnow() cluster.task_status = tasks.ClusterTasks.NONE cluster.save() LOG.debug("end delete_cluster for id: %s", cluster_id) def rolling_restart_cluster(self, context, cluster_id, delay_sec=0): LOG.debug("Begin rolling cluster restart for id: %s", cluster_id) def _restart_cluster_instance(instance): LOG.debug("Restarting instance with id: %s", instance.id) context.notification = ( DBaaSInstanceRestart(context, **request_info)) with StartNotification(context, instance_id=instance.id): with EndNotification(context): instance.update_db(task_status=InstanceTasks.REBOOTING) instance.restart() timeout = Timeout(CONF.cluster_usage_timeout) cluster_notification = context.notification request_info = cluster_notification.serialize(context) try: node_db_inst = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() for index, db_inst in enumerate(node_db_inst): if index > 0: LOG.debug( "Waiting (%ds) for restarted nodes to rejoin the " "cluster before proceeding.", delay_sec) time.sleep(delay_sec) instance = BuiltInstanceTasks.load(context, db_inst.id) _restart_cluster_instance(instance) except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for restarting cluster.") raise except Exception: LOG.exception("Error restarting cluster.", cluster_id) raise finally: context.notification = cluster_notification timeout.cancel() self.reset_task() LOG.debug("End rolling restart for id: %s.", cluster_id) def rolling_upgrade_cluster(self, context, cluster_id, datastore_version, ordering_function=None): LOG.debug("Begin rolling cluster upgrade for id: %s.", cluster_id) def _upgrade_cluster_instance(instance): LOG.debug("Upgrading instance with id: %s.", instance.id) context.notification = ( DBaaSInstanceUpgrade(context, **request_info)) with StartNotification( context, instance_id=instance.id, datastore_version_id=datastore_version.id): with EndNotification(context): instance.update_db( datastore_version_id=datastore_version.id, task_status=InstanceTasks.UPGRADING) instance.upgrade(datastore_version) timeout = Timeout(CONF.cluster_usage_timeout) cluster_notification = context.notification request_info = cluster_notification.serialize(context) try: instances = [] for db_inst in DBInstance.find_all(cluster_id=cluster_id, deleted=False).all(): instance = BuiltInstanceTasks.load( context, db_inst.id) instances.append(instance) if ordering_function is not None: instances.sort(key=ordering_function) for instance in instances: _upgrade_cluster_instance(instance) self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for upgrading cluster.") self.update_statuses_on_failure( cluster_id, status=InstanceTasks.UPGRADING_ERROR) except Exception: LOG.exception("Error upgrading cluster %s.", cluster_id) self.update_statuses_on_failure( cluster_id, status=InstanceTasks.UPGRADING_ERROR) finally: context.notification = cluster_notification timeout.cancel() LOG.debug("End upgrade_cluster for id: %s.", cluster_id) class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): """ FreshInstanceTasks contains the tasks related an instance that not associated with a compute server. """ def wait_for_instance(self, timeout, flavor): # Make sure the service becomes active before sending a usage # record to avoid over billing a customer for an instance that # fails to build properly. error_message = '' error_details = '' try: LOG.info("Waiting for instance %s up and running with " "timeout %ss", self.id, timeout) utils.poll_until(self._service_is_active, sleep_time=CONF.usage_sleep_time, time_out=timeout) LOG.info("Created instance %s successfully.", self.id) TroveInstanceCreate(instance=self, instance_size=flavor['ram']).notify() except (TroveError, PollTimeOut) as ex: LOG.error("Failed to create instance %s, error: %s.", self.id, str(ex)) self.update_statuses_on_time_out() error_message = "%s" % ex error_details = traceback.format_exc() except Exception as ex: LOG.error("Failed to send usage create-event for instance %s, " "error: %s", self.id, str(ex)) error_message = "%s" % ex error_details = traceback.format_exc() finally: if error_message: inst_models.save_instance_fault( self.id, error_message, error_details, skip_delta=CONF.usage_sleep_time + 1 ) def _create_port(self, network, security_groups, is_mgmt=False, is_public=False): name = 'trove-%s' % self.id type = 'Management' if is_mgmt else 'User' description = '%s port for trove instance %s' % (type, self.id) try: port_id = neutron.create_port( self.neutron_client, name, description, network, security_groups, is_public=is_public ) except Exception: error = ("Failed to create %s port for instance %s" % (type, self.id)) LOG.exception(error) self.update_db( task_status=inst_models.InstanceTasks.BUILDING_ERROR_PORT ) raise TroveError(message=error) return port_id def _prepare_networks_for_instance(self, datastore_manager, nics, access=None): """Prepare the networks for the trove instance. 'nics' contains the networks that management network always comes at last. """ LOG.info("Preparing networks for the instance %s", self.id) security_group = None networks = copy.deepcopy(nics) access = access or {} if CONF.trove_security_groups_support: security_group = self._create_secgroup( datastore_manager, access.get('allowed_cidrs', []) ) LOG.info( "Security group %s created for instance %s", security_group, self.id ) # Create management port if CONF.management_networks: port_sgs = [security_group] if security_group else [] if len(CONF.management_security_groups) > 0: port_sgs = CONF.management_security_groups # The management network is always the last one networks.pop(-1) port_id = self._create_port( CONF.management_networks[-1], port_sgs, is_mgmt=True ) LOG.info("Management port %s created for instance: %s", port_id, self.id) networks.append({"port-id": port_id}) # Create port in the user defined network, associate floating IP if # needed if len(networks) > 1 or not CONF.management_networks: network = networks.pop(0).get("net-id") port_sgs = [security_group] if security_group else [] port_id = self._create_port( network, port_sgs, is_mgmt=False, is_public=access.get('is_public', False) ) LOG.info("User port %s created for instance %s", port_id, self.id) networks.insert(0, {"port-id": port_id}) LOG.info( "Finished to prepare networks for the instance %s, networks: %s", self.id, networks ) return networks def create_instance(self, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, cluster_config, snapshot, volume_type, modules, scheduler_hints, access=None): """Create trove instance. It is the caller's responsibility to ensure that FreshInstanceTasks.wait_for_instance is called after create_instance to ensure that the proper usage event gets sent """ LOG.info( "Creating instance %s, nics: %s, access: %s", self.id, nics, access ) networks = self._prepare_networks_for_instance( datastore_manager, nics, access=access ) files = self.get_injected_files(datastore_manager) cinder_volume_type = volume_type or CONF.cinder_volume_type volume_info = self._create_server_volume( flavor['id'], image_id, datastore_manager, volume_size, availability_zone, networks, files, cinder_volume_type, scheduler_hints ) config = self._render_config(flavor) backup_info = None if backup_id is not None: backup = bkup_models.Backup.get_by_id(self.context, backup_id) backup_info = {'id': backup_id, 'instance_id': backup.instance_id, 'location': backup.location, 'type': backup.backup_type, 'checksum': backup.checksum, } self._guest_prepare(flavor['ram'], volume_info, packages, databases, users, backup_info, config.config_contents, root_password, overrides, cluster_config, snapshot, modules) if root_password: self.report_root_enabled() if not self.db_info.task_status.is_error: self.reset_task_status() # when DNS is supported, we attempt to add this after the # instance is prepared. Otherwise, if DNS fails, instances # end up in a poorer state and there's no tooling around # re-sending the prepare call; retrying DNS is much easier. try: self._create_dns_entry() except Exception as e: log_fmt = "Error creating DNS entry for instance: %s" exc_fmt = _("Error creating DNS entry for instance: %s") err = inst_models.InstanceTasks.BUILDING_ERROR_DNS self._log_and_raise(e, log_fmt, exc_fmt, self.id, err) def attach_replication_slave(self, snapshot, flavor): LOG.debug("Calling attach_replication_slave for %s.", self.id) try: replica_config = self._render_replica_config(flavor) self.guest.attach_replication_slave(snapshot, replica_config.config_contents) except GuestError as e: log_fmt = "Error attaching instance %s as replica." exc_fmt = _("Error attaching instance %s as replica.") err = inst_models.InstanceTasks.BUILDING_ERROR_REPLICA self._log_and_raise(e, log_fmt, exc_fmt, self.id, err) def get_replication_master_snapshot(self, context, slave_of_id, flavor, backup_id=None, replica_number=1): # First check to see if we need to take a backup master = BuiltInstanceTasks.load(context, slave_of_id) backup_required = master.backup_required_for_replication() if backup_required: # if we aren't passed in a backup id, look it up to possibly do # an incremental backup, thus saving time if not backup_id: backup = Backup.get_last_completed( context, slave_of_id, include_incremental=True) if backup: backup_id = backup.id else: LOG.debug('Will skip replication master backup') snapshot_info = { 'name': "Replication snapshot for %s" % self.id, 'description': "Backup image used to initialize " "replication slave", 'instance_id': slave_of_id, 'parent_id': backup_id, 'tenant_id': self.tenant_id, 'state': BackupState.NEW, 'datastore_version_id': self.datastore_version.id, 'deleted': False, 'replica_number': replica_number, } replica_backup_id = None if backup_required: # Only do a backup if it's the first replica if replica_number == 1: try: db_info = DBBackup.create(**snapshot_info) replica_backup_id = db_info.id except InvalidModelError: log_fmt = ("Unable to create replication snapshot record " "for instance: %s") exc_fmt = _("Unable to create replication snapshot record " "for instance: %s") LOG.exception(log_fmt, self.id) raise BackupCreationError(exc_fmt % self.id) if backup_id: # Look up the parent backup info or fail early if not # found or if the user does not have access to the parent. _parent = Backup.get_by_id(context, backup_id) parent = { 'location': _parent.location, 'checksum': _parent.checksum, } snapshot_info.update({ 'parent': parent, }) else: # we've been passed in the actual replica backup id, # so just use it replica_backup_id = backup_id try: snapshot_info.update({ 'id': replica_backup_id, 'datastore': master.datastore.name, 'datastore_version': master.datastore_version.name, }) snapshot = master.get_replication_snapshot( snapshot_info, flavor=master.flavor_id) snapshot.update({ 'config': self._render_replica_config(flavor).config_contents }) return snapshot except Exception as e_create: create_log_fmt = ( "Error creating replication snapshot from " "instance %(source)s for new replica %(replica)s.") create_exc_fmt = ( "Error creating replication snapshot from " "instance %(source)s for new replica %(replica)s.") create_fmt_content = { 'source': slave_of_id, 'replica': self.id } err = inst_models.InstanceTasks.BUILDING_ERROR_REPLICA e_create_fault = create_log_fmt % create_fmt_content e_create_stack = traceback.format_exc() # we persist fault details to source instance inst_models.save_instance_fault(slave_of_id, e_create_fault, e_create_stack) # if the delete of the 'bad' backup fails, it'll mask the # create exception, so we trap it here try: # Only try to delete the backup if it's the first replica if replica_number == 1 and backup_required: Backup.delete(context, replica_backup_id) except Exception as e_delete: LOG.error(create_log_fmt, create_fmt_content) # Make sure we log any unexpected errors from the create if not isinstance(e_create, TroveError): LOG.exception(e_create) delete_log_fmt = ( "An error occurred while deleting a bad " "replication snapshot from instance %(source)s.") delete_exc_fmt = _( "An error occurred while deleting a bad " "replication snapshot from instance %(source)s.") # we've already logged the create exception, so we'll raise # the delete (otherwise the create will be logged twice) self._log_and_raise(e_delete, delete_log_fmt, delete_exc_fmt, {'source': slave_of_id}, err) # the delete worked, so just log the original problem with create self._log_and_raise(e_create, create_log_fmt, create_exc_fmt, create_fmt_content, err) def report_root_enabled(self): mysql_models.RootHistory.create(self.context, self.id) def update_statuses_on_time_out(self): if CONF.update_status_on_fail: # Updating service status service = InstanceServiceStatus.find_by(instance_id=self.id) service.set_status(ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT) service.save() LOG.error( "Service status: %s, service error description: %s", ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT.api_status, ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT.description ) # Updating instance status db_info = DBInstance.find_by(id=self.id, deleted=False) db_info.set_task_status(InstanceTasks.BUILDING_ERROR_TIMEOUT_GA) db_info.save() LOG.error( "Trove instance status: %s, Trove instance status " "description: %s", InstanceTasks.BUILDING_ERROR_TIMEOUT_GA.action, InstanceTasks.BUILDING_ERROR_TIMEOUT_GA.db_text ) def _service_is_active(self): """ Check that the database guest is active. This function is meant to be called with poll_until to check that the guest is alive before sending a 'create' message. This prevents over billing a customer for an instance that they can never use. Returns: boolean if the service is active. Raises: TroveError if the service is in a failure state. """ service = InstanceServiceStatus.find_by(instance_id=self.id) status = service.get_status() if (status == rd_instance.ServiceStatuses.RUNNING or status == rd_instance.ServiceStatuses.INSTANCE_READY or status == rd_instance.ServiceStatuses.HEALTHY): return True elif status not in [rd_instance.ServiceStatuses.NEW, rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.UNKNOWN, rd_instance.ServiceStatuses.DELETED]: raise TroveError(_("Service not active, status: %s") % status) c_id = self.db_info.compute_instance_id try: server = self.nova_client.servers.get(c_id) except Exception as e: raise TroveError( _("Failed to get server %(server)s for instance %(instance)s, " "error: %(error)s"), server=c_id, instance=self.id, error=str(e) ) server_status = server.status if server_status in [InstanceStatus.ERROR, InstanceStatus.FAILED]: server_fault_message = 'No fault found' try: server_fault_message = server.fault.get('message', 'Unknown') except AttributeError: pass raise TroveError( _("Server not active, status: %(status)s, fault message: " "%(srv_msg)s") % {'status': server_status, 'srv_msg': server_fault_message} ) return False def _build_sg_rules_mapping(self, rule_ports): final = [] cidr = CONF.trove_security_group_rule_cidr for port_or_range in set(rule_ports): from_, to_ = port_or_range[0], port_or_range[-1] final.append({'cidr': cidr, 'from_': str(from_), 'to_': str(to_)}) return final def _create_server_volume(self, flavor_id, image_id, datastore_manager, volume_size, availability_zone, nics, files, volume_type, scheduler_hints): LOG.debug("Begin _create_server_volume for id: %s", self.id) server = None volume_info = self._build_volume_info(datastore_manager, volume_size=volume_size, volume_type=volume_type) block_device_mapping_v2 = volume_info['block_device'] try: server = self._create_server( flavor_id, image_id, datastore_manager, block_device_mapping_v2, availability_zone, nics, files, scheduler_hints ) server_id = server.id # Save server ID. self.update_db(compute_instance_id=server_id) except Exception as e: log_fmt = "Failed to create server for instance %s" exc_fmt = _("Failed to create server for instance %s") err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER self._log_and_raise(e, log_fmt, exc_fmt, self.id, err) LOG.debug("End _create_server_volume for id: %s", self.id) return volume_info def _build_volume_info(self, datastore_manager, volume_size=None, volume_type=None): volume_info = None volume_support = self.volume_support device_path = self.device_path mount_point = CONF.get(datastore_manager).mount_point LOG.debug("trove volume support = %s", volume_support) if volume_support: try: volume_info = self._create_volume( volume_size, volume_type, datastore_manager) except Exception as e: log_fmt = "Failed to create volume for instance %s" exc_fmt = _("Failed to create volume for instance %s") err = inst_models.InstanceTasks.BUILDING_ERROR_VOLUME self._log_and_raise(e, log_fmt, exc_fmt, self.id, err) else: LOG.debug("device_path = %(path)s\n" "mount_point = %(point)s", { "path": device_path, "point": mount_point }) volume_info = { 'block_device': None, 'device_path': device_path, 'mount_point': mount_point, } return volume_info # We remove all translations for messages logging execpet those for # exception raising. And we cannot use _(xxxx) instead of _("xxxx") # because of H701 PEP8 checking. So we pass log format , exception # format, and format content in and do translations only if needed. def _log_and_raise(self, exc, log_fmt, exc_fmt, fmt_content, task_status): LOG.error("%(message)s\n%(exc)s\n%(trace)s", {"message": log_fmt % fmt_content, "exc": exc, "trace": traceback.format_exc()}) self.update_db(task_status=task_status) exc_message = '\n%s' % exc if exc else '' full_message = "%s%s" % (exc_fmt % fmt_content, exc_message) raise TroveError(message=full_message) def _create_volume(self, volume_size, volume_type, datastore_manager): LOG.debug("Begin _create_volume for id: %s", self.id) volume_client = create_cinder_client(self.context, self.region_name) volume_desc = ("datastore volume for %s" % self.id) volume_ref = volume_client.volumes.create( volume_size, name="trove-%s" % self.id, description=volume_desc, volume_type=volume_type) # Record the volume ID in case something goes wrong. self.update_db(volume_id=volume_ref.id) utils.poll_until( lambda: volume_client.volumes.get(volume_ref.id), lambda v_ref: v_ref.status in ['available', 'error'], sleep_time=2, time_out=CONF.volume_time_out) v_ref = volume_client.volumes.get(volume_ref.id) if v_ref.status in ['error']: raise VolumeCreationFailure() LOG.debug("End _create_volume for id: %s", self.id) return self._build_volume(v_ref, datastore_manager) def _build_volume(self, v_ref, datastore_manager): LOG.debug("Created volume %s", v_ref) # TODO(zhaochao): from Liberty, Nova libvirt driver does not honor # user-supplied device name anymore, so we may need find a new # method to make sure the volume is correctly mounted inside the # guest, please refer to the 'intermezzo-problem-with-device-names' # section of Nova user referrence at: # https://docs.openstack.org/nova/latest/user/block-device-mapping.html bdm = CONF.block_device_mapping # use Nova block_device_mapping_v2, referrence: # https://docs.openstack.org/api-ref/compute/#create-server # setting the delete_on_terminate instance to true=1 block_device_v2 = [{ "uuid": v_ref.id, "source_type": "volume", "destination_type": "volume", "device_name": bdm, "volume_size": v_ref.size, "delete_on_termination": True }] created_volumes = [{'id': v_ref.id, 'size': v_ref.size}] device_path = self.device_path mount_point = CONF.get(datastore_manager).mount_point LOG.debug("block_device = %(device)s\n" "volume = %(volume)s\n" "device_path = %(path)s\n" "mount_point = %(point)s", {"device": block_device_v2, "volume": created_volumes, "path": device_path, "point": mount_point}) volume_info = {'block_device': block_device_v2, 'device_path': device_path, 'mount_point': mount_point} return volume_info def _prepare_userdata(self, datastore_manager): userdata = None cloudinit = os.path.join(CONF.get('cloudinit_location'), "%s.cloudinit" % datastore_manager) if os.path.isfile(cloudinit): with open(cloudinit, "r") as f: userdata = f.read() return userdata def _create_server(self, flavor_id, image_id, datastore_manager, block_device_mapping_v2, availability_zone, nics, files={}, scheduler_hints=None): userdata = self._prepare_userdata(datastore_manager) name = self.hostname or self.name bdmap_v2 = block_device_mapping_v2 config_drive = CONF.use_nova_server_config_drive key_name = CONF.nova_keypair server = self.nova_client.servers.create( name, image_id, flavor_id, key_name=key_name, nics=nics, block_device_mapping_v2=bdmap_v2, files=files, userdata=userdata, availability_zone=availability_zone, config_drive=config_drive, scheduler_hints=scheduler_hints, ) LOG.debug("Created new compute instance %(server_id)s " "for database instance %(id)s", {'server_id': server.id, 'id': self.id}) return server def _guest_prepare(self, flavor_ram, volume_info, packages, databases, users, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): LOG.debug("Entering guest_prepare") # Now wait for the response from the create to do additional work self.guest.prepare(flavor_ram, packages, databases, users, device_path=volume_info['device_path'], mount_point=volume_info['mount_point'], backup_info=backup_info, config_contents=config_contents, root_password=root_password, overrides=overrides, cluster_config=cluster_config, snapshot=snapshot, modules=modules) def _create_dns_entry(self): dns_support = CONF.trove_dns_support LOG.debug("trove dns support = %s", dns_support) if dns_support: LOG.debug("%(gt)s: Creating dns entry for instance: %(id)s", {'gt': greenthread.getcurrent(), 'id': self.id}) dns_client = create_dns_client(self.context) def get_server(): c_id = self.db_info.compute_instance_id return self.nova_client.servers.get(c_id) def ip_is_available(server): LOG.debug("Polling for ip addresses: $%s ", server.addresses) if server.addresses != {}: return True elif (server.addresses == {} and server.status != InstanceStatus.ERROR): return False elif (server.addresses == {} and server.status == InstanceStatus.ERROR): LOG.error("Failed to create DNS entry for instance " "%(instance)s. Server status was " "%(status)s).", {'instance': self.id, 'status': server.status}) raise TroveError(status=server.status) utils.poll_until(get_server, ip_is_available, sleep_time=1, time_out=CONF.dns_time_out) server = self.nova_client.servers.get( self.db_info.compute_instance_id) self.db_info.addresses = server.addresses LOG.debug("Creating dns entry...") ip = self.dns_ip_address if not ip: raise TroveError(_("Failed to create DNS entry for instance " "%s. No IP available.") % self.id) dns_client.create_instance_entry(self.id, ip) LOG.debug("Successfully created DNS entry for instance: %s", self.id) else: LOG.debug("%(gt)s: DNS not enabled for instance: %(id)s", {'gt': greenthread.getcurrent(), 'id': self.id}) def _create_secgroup(self, datastore_manager, allowed_cidrs): name = "%s-%s" % (CONF.trove_security_group_name_prefix, self.id) try: sg_id = neutron.create_security_group( self.neutron_client, name, self.id ) if not allowed_cidrs: allowed_cidrs = [CONF.trove_security_group_rule_cidr] tcp_ports = CONF.get(datastore_manager).tcp_ports udp_ports = CONF.get(datastore_manager).udp_ports neutron.create_security_group_rule( self.neutron_client, sg_id, 'tcp', tcp_ports, allowed_cidrs ) neutron.create_security_group_rule( self.neutron_client, sg_id, 'udp', udp_ports, allowed_cidrs ) except Exception: message = ("Failed to create security group for instance %s" % self.id) LOG.exception(message) self.update_db( task_status=inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP ) raise TroveError(message=message) return sg_id class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): """ BuiltInstanceTasks contains the tasks related an instance that already associated with a compute server. """ def resize_volume(self, new_size): LOG.info("Resizing volume for instance %(instance_id)s from " "%(old_size)s GB to %(new_size)s GB.", {'instance_id': self.id, 'old_size': self.volume_size, 'new_size': new_size}) action = ResizeVolumeAction(self, self.volume_size, new_size) action.execute() LOG.info("Resized volume for instance %s successfully.", self.id) def resize_flavor(self, old_flavor, new_flavor): LOG.info("Resizing instance %(instance_id)s from flavor " "%(old_flavor)s to %(new_flavor)s.", {'instance_id': self.id, 'old_flavor': old_flavor['id'], 'new_flavor': new_flavor['id']}) action = ResizeAction(self, old_flavor, new_flavor) action.execute() LOG.info("Resized instance %s successfully.", self.id) def migrate(self, host): LOG.info("Initiating migration to host %s.", host) action = MigrateAction(self, host) action.execute() def create_backup(self, backup_info): LOG.info("Initiating backup for instance %s, backup_info: %s", self.id, backup_info) self.guest.create_backup(backup_info) def backup_required_for_replication(self): LOG.debug("Check if replication backup is required for instance %s.", self.id) return self.guest.backup_required_for_replication() def get_replication_snapshot(self, snapshot_info, flavor): def _get_replication_snapshot(): LOG.debug("Calling get_replication_snapshot on %s.", self.id) try: rep_source_config = self._render_replica_source_config(flavor) result = self.guest.get_replication_snapshot( snapshot_info, rep_source_config.config_contents) LOG.info("Finnished getting replication snapshot for " "instance %s", self.id) return result except Exception: LOG.exception("Failed to get replication snapshot from %s.", self.id) raise return run_with_quotas(self.context.project_id, {'backups': 1}, _get_replication_snapshot) def detach_replica(self, master, for_failover=False): LOG.debug("Calling detach_replica on %s", self.id) try: self.guest.detach_replica(for_failover) self.update_db(slave_of_id=None) self.slave_list = None except (GuestError, GuestTimeout): LOG.exception("Failed to detach replica %s.", self.id) raise finally: if not for_failover: self.reset_task_status() def attach_replica(self, master): LOG.debug("Calling attach_replica on %s", self.id) try: replica_info = master.guest.get_replica_context() flavor = self.nova_client.flavors.get(self.flavor_id) slave_config = self._render_replica_config(flavor).config_contents self.guest.attach_replica(replica_info, slave_config) self.update_db(slave_of_id=master.id) self.slave_list = None except (GuestError, GuestTimeout): LOG.exception("Failed to attach replica %s.", self.id) raise def make_read_only(self, read_only): LOG.debug("Calling make_read_only on %s", self.id) self.guest.make_read_only(read_only) def _get_floating_ips(self): """Returns floating ips as a dict indexed by the ip.""" floating_ips = {} network_floating_ips = self.neutron_client.list_floatingips() for ip in network_floating_ips.get('floatingips'): floating_ips.update( {ip.get('floating_ip_address'): ip.get('id')}) LOG.debug("In _get_floating_ips(), returning %s", floating_ips) return floating_ips def detach_public_ips(self): LOG.debug("Begin detach_public_ips for instance %s", self.id) removed_ips = [] floating_ips = self._get_floating_ips() for ip in self.get_visible_ip_addresses(): if ip in floating_ips: fip_id = floating_ips[ip] self.neutron_client.update_floatingip( fip_id, {'floatingip': {'port_id': None}}) removed_ips.append(fip_id) return removed_ips def attach_public_ips(self, ips): LOG.debug("Begin attach_public_ips for instance %s", self.id) server_id = self.db_info.compute_instance_id # NOTE(zhaochao): in Nova's addFloatingIp, the new floating ip will # always be associated with the first IPv4 fixed address of the Nova # instance, we're doing the same thing here, after add_floating_ip is # removed from novaclient. server_ports = (self.neutron_client.list_ports(device_id=server_id) .get('ports')) fixed_address, port_id = next( (fixed_ip['ip_address'], port['id']) for port in server_ports for fixed_ip in port.get('fixed_ips') if netutils.is_valid_ipv4(fixed_ip['ip_address'])) for fip_id in ips: self.neutron_client.update_floatingip( fip_id, {'floatingip': { 'port_id': port_id, 'fixed_ip_address': fixed_address}}) def enable_as_master(self): LOG.debug("Calling enable_as_master on %s", self.id) flavor = self.nova_client.flavors.get(self.flavor_id) replica_source_config = self._render_replica_source_config(flavor) self.update_db(slave_of_id=None) self.slave_list = None self.guest.enable_as_master(replica_source_config.config_contents) def get_last_txn(self): LOG.debug("Calling get_last_txn on %s", self.id) return self.guest.get_last_txn() def get_latest_txn_id(self): LOG.debug("Calling get_latest_txn_id on %s", self.id) return self.guest.get_latest_txn_id() def wait_for_txn(self, txn): LOG.debug("Calling wait_for_txn on %s", self.id) if txn: self.guest.wait_for_txn(txn) def cleanup_source_on_replica_detach(self, replica_info): LOG.debug("Calling cleanup_source_on_replica_detach on %s", self.id) self.guest.cleanup_source_on_replica_detach(replica_info) def demote_replication_master(self): LOG.debug("Calling demote_replication_master on %s", self.id) self.guest.demote_replication_master() def reboot(self): try: LOG.debug("Stopping datastore on instance %s.", self.id) try: self.guest.stop_db() except (exception.GuestError, exception.GuestTimeout) as e: # Acceptable to be here if db was already in crashed state # Also we check guest state before issuing reboot LOG.debug(str(e)) LOG.info("Rebooting instance %s.", self.id) self.server.reboot() def update_server_info(): self.refresh_compute_server_info() return self.server_status_matches(['ACTIVE']) utils.poll_until( update_server_info, sleep_time=3, time_out=CONF.reboot_time_out) # Set the status to PAUSED. The guest agent will reset the status # when the reboot completes and MySQL is running. self.set_datastore_status_to_paused() LOG.info("Rebooted instance %s successfully.", self.id) except Exception as e: LOG.error("Failed to reboot instance %(id)s: %(e)s", {'id': self.id, 'e': str(e)}) finally: self.reset_task_status() def restart(self): LOG.info("Initiating datastore restart on instance %s.", self.id) try: self.guest.restart() except GuestError: LOG.error("Failed to initiate datastore restart on instance " "%s.", self.id) finally: self.reset_task_status() def guest_log_list(self): LOG.info("Retrieving guest log list for instance %s.", self.id) try: return self.guest.guest_log_list() except GuestError: LOG.error("Failed to retrieve guest log list for instance " "%s.", self.id) finally: self.reset_task_status() def guest_log_action(self, log_name, enable, disable, publish, discard): LOG.info("Processing guest log for instance %s.", self.id) try: return self.guest.guest_log_action(log_name, enable, disable, publish, discard) except GuestError: LOG.error("Failed to process guest log for instance %s.", self.id) finally: self.reset_task_status() def refresh_compute_server_info(self): """Refreshes the compute server field.""" server = self.nova_client.servers.get(self.server.id) self.server = server def _refresh_datastore_status(self): """ Gets the latest instance service status from datastore and updates the reference on this BuiltInstanceTask reference """ self.datastore_status = InstanceServiceStatus.find_by( instance_id=self.id) def set_datastore_status_to_paused(self): """ Updates the InstanceServiceStatus for this BuiltInstance to PAUSED. This does not change the reference for this BuiltInstanceTask """ datastore_status = InstanceServiceStatus.find_by(instance_id=self.id) datastore_status.status = rd_instance.ServiceStatuses.PAUSED datastore_status.save() def upgrade(self, datastore_version): LOG.info("Upgrading instance %s to new datastore version %s", self.id, datastore_version) def server_finished_rebuilding(): self.refresh_compute_server_info() return not self.server_status_matches(['REBUILD']) try: upgrade_info = self.guest.pre_upgrade() if self.volume_id: volume = self.volume_client.volumes.get(self.volume_id) volume_device = self._fix_device_path( volume.attachments[0]['device']) if volume: upgrade_info['device'] = volume_device # BUG(1650518): Cleanup in the Pike release some instances # that we will be upgrading will be pre secureserialier # and will have no instance_key entries. If this is one of # those instances, make a key. That will make it appear in # the injected files that are generated next. From this # point, and until the guest comes up, attempting to send # messages to it will fail because the RPC framework will # encrypt messages to a guest which potentially doesn't # have the code to handle it. if CONF.enable_secure_rpc_messaging and ( self.db_info.encrypted_key is None): encrypted_key = cu.encode_data(cu.encrypt_data( cu.generate_random_key(), CONF.inst_rpc_key_encr_key)) self.update_db(encrypted_key=encrypted_key) LOG.debug("Generated unique RPC encryption key for " "instance = %(id)s, key = %(key)s", {'id': self.id, 'key': encrypted_key}) injected_files = self.get_injected_files( datastore_version.manager) LOG.debug("Rebuilding instance %(instance)s with image %(image)s.", {'instance': self, 'image': datastore_version.image_id}) self.server.rebuild(datastore_version.image_id, files=injected_files) utils.poll_until( server_finished_rebuilding, sleep_time=5, time_out=600) if not self.server_status_matches(['ACTIVE']): raise TroveError(_("Instance %(instance)s failed to " "upgrade to %(datastore_version)s"), instance=self, datastore_version=datastore_version) LOG.info('Finished rebuilding server for instance %s', self.id) self.guest.post_upgrade(upgrade_info) self.reset_task_status() LOG.info("Finished upgrading instance %s to new datastore " "version %s", self.id, datastore_version) except Exception as e: LOG.exception(e) err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER self.update_db(task_status=err) raise e # Some cinder drivers appear to return "vdb" instead of "/dev/vdb". # We need to account for that. def _fix_device_path(self, device): if device.startswith("/dev"): return device else: return "/dev/%s" % device class BackupTasks(object): @classmethod def _parse_manifest(cls, manifest): # manifest is in the format 'container/prefix' # where prefix can be 'path' or 'lots/of/paths' try: container_index = manifest.index('/') prefix_index = container_index + 1 except ValueError: return None, None container = manifest[:container_index] prefix = manifest[prefix_index:] return container, prefix @classmethod def delete_files_from_swift(cls, context, filename): container = CONF.backup_swift_container client = clients.create_swift_client(context) obj = client.head_object(container, filename) if 'x-static-large-object' in obj: # Static large object LOG.debug("Deleting large object file: %(cont)s/%(filename)s", {'cont': container, 'filename': filename}) client.delete_object(container, filename, query_string='multipart-manifest=delete') else: # Single object LOG.debug("Deleting object file: %(cont)s/%(filename)s", {'cont': container, 'filename': filename}) client.delete_object(container, filename) @classmethod def delete_backup(cls, context, backup_id): """Delete backup from swift.""" LOG.info("Deleting backup %s.", backup_id) backup = bkup_models.Backup.get_by_id(context, backup_id) try: filename = backup.filename if filename: BackupTasks.delete_files_from_swift(context, filename) except ValueError: backup.delete() except ClientException as e: if e.http_status == 404: # Backup already deleted in swift backup.delete() else: LOG.exception("Error occurred when deleting from swift. " "Details: %s", e) backup.state = bkup_models.BackupState.DELETE_FAILED backup.save() raise TroveError(_("Failed to delete swift object for backup " "%s.") % backup_id) else: backup.delete() LOG.info("Deleted backup %s successfully.", backup_id) class ModuleTasks(object): @classmethod def reapply_module(cls, context, module_id, md5, include_clustered, batch_size, batch_delay, force): """Reapply module.""" LOG.info("Reapplying module %s.", module_id) batch_size = batch_size or CONF.module_reapply_max_batch_size batch_delay = batch_delay or CONF.module_reapply_min_batch_delay # Don't let non-admin bypass the safeguards if not context.is_admin: batch_size = min(batch_size, CONF.module_reapply_max_batch_size) batch_delay = max(batch_delay, CONF.module_reapply_min_batch_delay) modules = module_models.Modules.load_by_ids(context, [module_id]) current_md5 = modules[0].md5 LOG.debug("MD5: %(md5)s Force: %(f)s.", {'md5': md5, 'f': force}) # Process all the instances instance_modules = module_models.InstanceModules.load_all( context, module_id=module_id, md5=md5) total_count = instance_modules.count() reapply_count = 0 skipped_count = 0 if instance_modules: module_list = module_views.convert_modules_to_list(modules) for instance_module in instance_modules: instance_id = instance_module.instance_id if (instance_module.md5 != current_md5 or force) and ( not md5 or md5 == instance_module.md5): instance = BuiltInstanceTasks.load(context, instance_id, needs_server=False) if instance and ( include_clustered or not instance.cluster_id): try: module_models.Modules.validate( modules, instance.datastore.id, instance.datastore_version.id) client = create_guest_client(context, instance_id) client.module_apply(module_list) Instance.add_instance_modules( context, instance_id, modules) reapply_count += 1 except exception.ModuleInvalid as ex: LOG.info("Skipping: %s", ex) skipped_count += 1 # Sleep if we've fired off too many in a row. if (batch_size and not reapply_count % batch_size and (reapply_count + skipped_count) < total_count): LOG.debug("Applied module to %(cnt)d of %(total)d " "instances - sleeping for %(batch)ds", {'cnt': reapply_count, 'total': total_count, 'batch': batch_delay}) time.sleep(batch_delay) else: LOG.debug("Instance '%s' not found or doesn't match " "criteria, skipping reapply.", instance_id) skipped_count += 1 else: LOG.debug("Instance '%s' does not match " "criteria, skipping reapply.", instance_id) skipped_count += 1 LOG.info("Reapplied module to %(num)d instances " "(skipped %(skip)d).", {'num': reapply_count, 'skip': skipped_count}) class ResizeVolumeAction(object): """Performs volume resize action.""" def __init__(self, instance, old_size, new_size): self.instance = instance self.old_size = int(old_size) self.new_size = int(new_size) def get_mount_point(self): mount_point = CONF.get( self.instance.datastore_version.manager).mount_point return mount_point def get_device_path(self): return self.instance.device_path def _fail(self, orig_func): LOG.exception("%(func)s encountered an error when " "attempting to resize the volume for " "instance %(id)s. Setting service " "status to failed.", {'func': orig_func.__name__, 'id': self.instance.id}) service = InstanceServiceStatus.find_by(instance_id=self.instance.id) service.set_status(ServiceStatuses.FAILED) service.save() def _recover_restart(self, orig_func): LOG.exception("%(func)s encountered an error when attempting to " "resize the volume for instance %(id)s. Trying to " "recover by restarting the " "guest.", {'func': orig_func.__name__, 'id': self.instance.id}) self.instance.restart() def _recover_mount_restart(self, orig_func): LOG.exception("%(func)s encountered an error when attempting to " "resize the volume for instance %(id)s. Trying to " "recover by mounting the volume and then restarting " "the guest.", {'func': orig_func.__name__, 'id': self.instance.id}) self._mount_volume() self.instance.restart() def _recover_full(self, orig_func): LOG.exception("%(func)s encountered an error when attempting to " "resize the volume for instance %(id)s. Trying to " "recover by attaching and" " mounting the volume and then restarting the " "guest.", {'func': orig_func.__name__, 'id': self.instance.id}) self._attach_volume() self._mount_volume() self.instance.restart() def _stop_db(self): LOG.debug("Instance %s calling stop_db.", self.instance.id) self.instance.guest.stop_db() @try_recover def _unmount_volume(self): LOG.debug("Unmounting the volume on instance %(id)s", { 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.unmount_volume(device_path=device_path, mount_point=mount_point) LOG.debug("Successfully unmounted the volume %(vol_id)s for " "instance %(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _detach_volume(self): LOG.debug("Detach volume %(vol_id)s from instance %(id)s", { 'vol_id': self.instance.volume_id, 'id': self.instance.id}) self.instance.nova_client.volumes.delete_server_volume( self.instance.server.id, self.instance.volume_id) def volume_available(): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.status == 'available' utils.poll_until(volume_available, sleep_time=2, time_out=CONF.volume_time_out) LOG.debug("Successfully detached volume %(vol_id)s from instance " "%(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _attach_volume(self): device_path = self.get_device_path() LOG.debug("Attach volume %(vol_id)s to instance %(id)s at " "%(dev)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id, 'dev': device_path}) self.instance.nova_client.volumes.create_server_volume( self.instance.server.id, self.instance.volume_id, device_path) def volume_in_use(): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.status == 'in-use' utils.poll_until(volume_in_use, sleep_time=2, time_out=CONF.volume_time_out) LOG.debug("Successfully attached volume %(vol_id)s to instance " "%(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _resize_fs(self): LOG.debug("Resizing the filesystem for instance %(id)s", { 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.resize_fs(device_path=device_path, mount_point=mount_point) LOG.debug("Successfully resized volume %(vol_id)s filesystem for " "instance %(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _mount_volume(self): LOG.debug("Mount the volume on instance %(id)s", { 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.mount_volume(device_path=device_path, mount_point=mount_point) LOG.debug("Successfully mounted the volume %(vol_id)s on instance " "%(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) @try_recover def _extend(self): LOG.debug("Extending volume %(vol_id)s for instance %(id)s to " "size %(size)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id, 'size': self.new_size}) self.instance.volume_client.volumes.extend(self.instance.volume_id, self.new_size) LOG.debug("Successfully extended the volume %(vol_id)s for instance " "%(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) def _verify_extend(self): try: volume = self.instance.volume_client.volumes.get( self.instance.volume_id) if not volume: msg = (_('Failed to get volume %(vol_id)s') % { 'vol_id': self.instance.volume_id}) raise cinder_exceptions.ClientException(msg) def volume_is_new_size(): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.size == self.new_size utils.poll_until(volume_is_new_size, sleep_time=2, time_out=CONF.volume_time_out) self.instance.update_db(volume_size=self.new_size) except PollTimeOut: LOG.exception("Timeout trying to extend the volume %(vol_id)s " "for instance %(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) volume = self.instance.volume_client.volumes.get( self.instance.volume_id) if volume.status == 'extending': self._fail(self._verify_extend) elif volume.size != self.new_size: self.instance.update_db(volume_size=volume.size) self._recover_full(self._verify_extend) raise except Exception: LOG.exception("Error encountered trying to verify extend for " "the volume %(vol_id)s for instance %(id)s", {'vol_id': self.instance.volume_id, 'id': self.instance.id}) self._recover_full(self._verify_extend) raise def _resize_active_volume(self): LOG.debug("Begin _resize_active_volume for id: %(id)s", { 'id': self.instance.id}) self._stop_db() self._unmount_volume(recover_func=self._recover_restart) self._detach_volume(recover_func=self._recover_mount_restart) self._extend(recover_func=self._recover_full) self._verify_extend() # if anything fails after this point, recovery is futile self._attach_volume(recover_func=self._fail) self._resize_fs(recover_func=self._fail) self._mount_volume(recover_func=self._fail) self.instance.restart() LOG.debug("End _resize_active_volume for id: %(id)s", { 'id': self.instance.id}) def execute(self): LOG.debug("%(gt)s: Resizing instance %(id)s volume for server " "%(server_id)s from %(old_volume_size)s to " "%(new_size)r GB", {'gt': greenthread.getcurrent(), 'id': self.instance.id, 'server_id': self.instance.server.id, 'old_volume_size': self.old_size, 'new_size': self.new_size}) if self.instance.server.status in [InstanceStatus.ACTIVE, InstanceStatus.HEALTHY]: self._resize_active_volume() self.instance.reset_task_status() # send usage event for size reported by cinder volume = self.instance.volume_client.volumes.get( self.instance.volume_id) launched_time = timeutils.isotime(self.instance.updated) modified_time = timeutils.isotime(self.instance.updated) TroveInstanceModifyVolume(instance=self.instance, old_volume_size=self.old_size, launched_at=launched_time, modify_at=modified_time, volume_size=volume.size, ).notify() else: self.instance.reset_task_status() msg = ( "Failed to resize instance %(id)s volume for server " "%(server_id)s. The instance must be in state %(state)s " "not %(inst_state)s." % { 'id': self.instance.id, 'server_id': self.instance.server.id, 'state': [InstanceStatus.ACTIVE, InstanceStatus.HEALTHY], 'inst_state': self.instance.server.status } ) raise TroveError(msg) class ResizeActionBase(object): """Base class for executing a resize action.""" def __init__(self, instance): """ Creates a new resize action for a given instance :param instance: reference to existing instance that will be resized :type instance: trove.taskmanager.models.BuiltInstanceTasks """ self.instance = instance def _assert_guest_is_ok(self): # The guest will never set the status to PAUSED. self.instance.set_datastore_status_to_paused() # Now we wait until it sets it to anything at all, # so we know it's alive. utils.poll_until( self._guest_is_awake, sleep_time=2, time_out=CONF.resize_time_out) def _assert_nova_status_is_ok(self): # Make sure Nova thinks things went well. if not self.instance.server_status_matches(["VERIFY_RESIZE"]): msg = "Migration failed! status=%(act_status)s and " \ "not %(exp_status)s" % { "act_status": self.instance.server.status, "exp_status": 'VERIFY_RESIZE'} raise TroveError(msg) def _assert_datastore_is_ok(self): # Tell the guest to turn on datastore, and ensure the status becomes # RUNNING. self._start_datastore() utils.poll_until( self._datastore_is_online, sleep_time=2, time_out=CONF.resize_time_out) def _assert_datastore_is_offline(self): # Tell the guest to turn off MySQL, and ensure the status becomes # SHUTDOWN. self.instance.guest.stop_db(do_not_start_on_reboot=True) utils.poll_until( self._datastore_is_offline, sleep_time=2, time_out=CONF.resize_time_out) def _assert_processes_are_ok(self): """Checks the procs; if anything is wrong, reverts the operation.""" # Tell the guest to turn back on, and make sure it can start. self._assert_guest_is_ok() LOG.debug("Nova guest is ok.") self._assert_datastore_is_ok() LOG.debug("Datastore is ok.") def _confirm_nova_action(self): LOG.debug("Instance %s calling Compute confirm resize...", self.instance.id) self.instance.server.confirm_resize() def _datastore_is_online(self): self.instance._refresh_datastore_status() return self.instance.is_datastore_running def _datastore_is_offline(self): self.instance._refresh_datastore_status() return (self.instance.datastore_status_matches( rd_instance.ServiceStatuses.SHUTDOWN)) def _revert_nova_action(self): LOG.debug("Instance %s calling Compute revert resize...", self.instance.id) self.instance.server.revert_resize() def execute(self): """Initiates the action.""" try: LOG.debug("Instance %s calling stop_db...", self.instance.id) self._assert_datastore_is_offline() self._perform_nova_action() finally: if self.instance.db_info.task_status != ( inst_models.InstanceTasks.NONE): self.instance.reset_task_status() def _guest_is_awake(self): self.instance._refresh_datastore_status() return not self.instance.datastore_status_matches( rd_instance.ServiceStatuses.PAUSED) def _perform_nova_action(self): """Calls Nova to resize or migrate an instance, and confirms.""" LOG.debug("Begin resize method _perform_nova_action instance: %s", self.instance.id) need_to_revert = False try: LOG.debug("Initiating nova action") self._initiate_nova_action() LOG.debug("Waiting for nova action") self._wait_for_nova_action() LOG.debug("Asserting nova status is ok") self._assert_nova_status_is_ok() need_to_revert = True LOG.debug("* * * REVERT BARRIER PASSED * * *") LOG.debug("Asserting nova action success") self._assert_nova_action_was_successful() LOG.debug("Asserting processes are OK") self._assert_processes_are_ok() LOG.debug("Confirming nova action") self._confirm_nova_action() except Exception: LOG.exception("Exception during nova action.") if need_to_revert: LOG.error("Reverting action for instance %s", self.instance.id) self._revert_nova_action() self._wait_for_revert_nova_action() if self.instance.server_status_matches(['ACTIVE']): LOG.error("Restarting datastore.") self.instance.guest.restart() else: LOG.error("Cannot restart datastore because " "Nova server status is not ACTIVE") LOG.error("Error resizing instance %s.", self.instance.id) raise LOG.debug("Recording success") self._record_action_success() LOG.debug("End resize method _perform_nova_action instance: %s", self.instance.id) def _wait_for_nova_action(self): # Wait for the flavor to change. def update_server_info(): self.instance.refresh_compute_server_info() return not self.instance.server_status_matches(['RESIZE']) utils.poll_until( update_server_info, sleep_time=2, time_out=CONF.resize_time_out) def _wait_for_revert_nova_action(self): # Wait for the server to return to ACTIVE after revert. def update_server_info(): self.instance.refresh_compute_server_info() return self.instance.server_status_matches(['ACTIVE']) utils.poll_until( update_server_info, sleep_time=2, time_out=CONF.revert_time_out) class ResizeAction(ResizeActionBase): def __init__(self, instance, old_flavor, new_flavor): """ :type instance: trove.taskmanager.models.BuiltInstanceTasks :type old_flavor: dict :type new_flavor: dict """ super(ResizeAction, self).__init__(instance) self.old_flavor = old_flavor self.new_flavor = new_flavor self.new_flavor_id = new_flavor['id'] def _assert_nova_action_was_successful(self): # Do check to make sure the status and flavor id are correct. if str(self.instance.server.flavor['id']) != str(self.new_flavor_id): msg = "Assertion failed! flavor_id=%s and not %s" \ % (self.instance.server.flavor['id'], self.new_flavor_id) raise TroveError(msg) def _initiate_nova_action(self): self.instance.server.resize(self.new_flavor_id) def _revert_nova_action(self): LOG.debug("Instance %s calling Compute revert resize... " "Repairing config.", self.instance.id) try: config = self.instance._render_config(self.old_flavor) config = {'config_contents': config.config_contents} self.instance.guest.reset_configuration(config) except GuestTimeout: LOG.exception("Error sending reset_configuration call.") LOG.debug("Reverting resize.") super(ResizeAction, self)._revert_nova_action() def _record_action_success(self): LOG.debug("Updating instance %(id)s to flavor_id %(flavor_id)s.", {'id': self.instance.id, 'flavor_id': self.new_flavor_id}) self.instance.update_db(flavor_id=self.new_flavor_id, task_status=inst_models.InstanceTasks.NONE) update_time = timeutils.isotime(self.instance.updated) TroveInstanceModifyFlavor(instance=self.instance, old_instance_size=self.old_flavor['ram'], instance_size=self.new_flavor['ram'], launched_at=update_time, modify_at=update_time, server=self.instance.server).notify() def _start_datastore(self): config = self.instance._render_config(self.new_flavor) self.instance.guest.start_db_with_conf_changes(config.config_contents) class MigrateAction(ResizeActionBase): def __init__(self, instance, host=None): super(MigrateAction, self).__init__(instance) self.instance = instance self.host = host def _assert_nova_action_was_successful(self): LOG.debug("Currently no assertions for a Migrate Action") def _initiate_nova_action(self): LOG.debug("Migrating instance %(instance)s without flavor change ...\n" "Forcing migration to host(%(host)s)", {"instance": self.instance.id, "host": self.host}) self.instance.server.migrate(force_host=self.host) def _record_action_success(self): LOG.debug("Successfully finished Migration to " "%(hostname)s: %(id)s", {'hostname': self.instance.hostname, 'id': self.instance.id}) def _start_datastore(self): self.instance.guest.restart() def load_cluster_tasks(context, cluster_id): manager = Cluster.manager_from_cluster_id(context, cluster_id) strat = strategy.load_taskmanager_strategy(manager) task_manager_cluster_tasks_class = strat.task_manager_cluster_tasks_class return ClusterTasks.load(context, cluster_id, task_manager_cluster_tasks_class) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/taskmanager/service.py0000644000175000017500000000140100000000000021611 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class TaskService(object): """Task Manager interface.""" def app_factory(global_conf, **local_conf): return TaskService() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.6641088 trove-12.1.0.dev92/trove/templates/0000755000175000017500000000000000000000000017304 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/cassandra/0000755000175000017500000000000000000000000021243 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/cassandra/config.template0000644000175000017500000011415300000000000024252 0ustar00coreycorey00000000000000# Cassandra storage config YAML # NOTE: # See http://wiki.apache.org/cassandra/StorageConfiguration for # full explanations of configuration directives # /NOTE # The name of the cluster. This is mainly used to prevent machines in # one logical cluster from joining another. cluster_name: 'Test Cluster' # This defines the number of tokens randomly assigned to this node on the ring # The more tokens, relative to other nodes, the larger the proportion of data # that this node will store. You probably want all nodes to have the same number # of tokens assuming they have equal hardware capability. # # If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, # and will use the initial_token as described below. # # Specifying initial_token will override this setting on the node's initial start, # on subsequent starts, this setting will apply even if initial token is set. # # If you already have a cluster with 1 token per node, and wish to migrate to # multiple tokens per node, see http://wiki.apache.org/cassandra/Operations num_tokens: 256 # initial_token allows you to specify tokens manually. While you can use # it with # vnodes (num_tokens > 1, above) -- in which case you should provide a # comma-separated list -- it's primarily used when adding nodes # to legacy clusters # that do not have vnodes enabled. # initial_token: # See http://wiki.apache.org/cassandra/HintedHandoff # May either be "true" or "false" to enable globally, or contain a list # of data centers to enable per-datacenter. # hinted_handoff_enabled: DC1,DC2 hinted_handoff_enabled: true # this defines the maximum amount of time a dead host will have hints # generated. After it has been dead this long, new hints for it will not be # created until it has been seen alive and gone down again. max_hint_window_in_ms: 10800000 # 3 hours # Maximum throttle in KBs per second, per delivery thread. This will be # reduced proportionally to the number of nodes in the cluster. (If there # are two nodes in the cluster, each delivery thread will use the maximum # rate; if there are three, each will throttle to half of the maximum, # since we expect two nodes to be delivering hints simultaneously.) hinted_handoff_throttle_in_kb: 1024 # Number of threads with which to deliver hints; # Consider increasing this number when you have multi-dc deployments, since # cross-dc handoff tends to be slower max_hints_delivery_threads: 2 # Maximum throttle in KBs per second, total. This will be # reduced proportionally to the number of nodes in the cluster. batchlog_replay_throttle_in_kb: 1024 # Authentication backend, implementing IAuthenticator; used to identify users # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, # PasswordAuthenticator}. # # - AllowAllAuthenticator performs no checks - set it to disable authentication. # - PasswordAuthenticator relies on username/password pairs to authenticate # users. It keeps usernames and hashed passwords in system_auth.credentials table. # Please increase system_auth keyspace replication factor if you use this authenticator. # # Authenticator is required to support Trove user functions. authenticator: org.apache.cassandra.auth.PasswordAuthenticator # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, # CassandraAuthorizer}. # # - AllowAllAuthorizer allows any action to any user - set it to disable authorization. # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please # increase system_auth keyspace replication factor if you use this authorizer. # # Authorizer is required to support Trove user functions. authorizer: org.apache.cassandra.auth.CassandraAuthorizer # Validity period for permissions cache (fetching permissions can be an # expensive operation depending on the authorizer, CassandraAuthorizer is # one example). Defaults to 2000, set to 0 to disable. # Will be disabled automatically for AllowAllAuthorizer. permissions_validity_in_ms: 2000 # Refresh interval for permissions cache (if enabled). # After this interval, cache entries become eligible for refresh. Upon next # access, an async reload is scheduled and the old value returned until it # completes. If permissions_validity_in_ms is non-zero, then this must be # also. # Defaults to the same value as permissions_validity_in_ms. # permissions_update_interval_in_ms: 1000 # The partitioner is responsible for distributing groups of rows (by # partition key) across nodes in the cluster. You should leave this # alone for new clusters. The partitioner can NOT be changed without # reloading all data, so when upgrading you should set this to the # same partitioner you were already using. # # Besides Murmur3Partitioner, partitioners included for backwards # compatibility include RandomPartitioner, ByteOrderedPartitioner, and # OrderPreservingPartitioner. # partitioner: org.apache.cassandra.dht.Murmur3Partitioner # Directories where Cassandra should store data on disk. Cassandra # will spread data evenly across them, subject to the granularity of # the configured compaction strategy. # If not set, the default directory is $CASSANDRA_HOME/data/data. data_file_directories: - /var/lib/cassandra/data # commit log. when running on magnetic HDD, this should be a # separate spindle than the data directories. # If not set, the default directory is $CASSANDRA_HOME/data/commitlog. commitlog_directory: /var/lib/cassandra/commitlog # policy for data disk failures: # die: shut down gossip and client transports and kill the JVM for any fs errors or # single-sstable errors, so the node can be replaced. # stop_paranoid: shut down gossip and client transports even for single-sstable errors, # kill the JVM for errors during startup. # stop: shut down gossip and client transports, leaving the node effectively dead, but # can still be inspected via JMX, kill the JVM for errors during startup. # best_effort: stop using the failed disk and respond to requests based on # remaining available sstables. This means you WILL see obsolete # data at CL.ONE! # ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra disk_failure_policy: stop # policy for commit disk failures: # die: shut down gossip and Thrift and kill the JVM, so the node can be replaced. # stop: shut down gossip and Thrift, leaving the node effectively dead, but # can still be inspected via JMX. # stop_commit: shutdown the commit log, letting writes collect but # continuing to service reads, as in pre-2.0.5 Cassandra # ignore: ignore fatal errors and let the batches fail commit_failure_policy: stop # Maximum size of the key cache in memory. # # Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the # minimum, sometimes more. The key cache is fairly tiny for the amount of # time it saves, so it's worthwhile to use it at large numbers. # The row cache saves even more time, but must contain the entire row, # so it is extremely space-intensive. It's best to only use the # row cache if you have hot rows or static rows. # # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. key_cache_size_in_mb: # Duration in seconds after which Cassandra should # save the key cache. Caches are saved to saved_caches_directory as # specified in this configuration file. # # Saved caches greatly improve cold-start speeds, and is relatively cheap in # terms of I/O for the key cache. Row cache saving is much more expensive and # has limited use. # # Default is 14400 or 4 hours. key_cache_save_period: 14400 # Number of keys from the key cache to save # Disabled by default, meaning all keys are going to be saved # key_cache_keys_to_save: 100 # Maximum size of the row cache in memory. # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is 0, to disable row caching. row_cache_size_in_mb: 0 # Duration in seconds after which Cassandra should # save the row cache. Caches are saved to saved_caches_directory as specified # in this configuration file. # # Saved caches greatly improve cold-start speeds, and is relatively cheap in # terms of I/O for the key cache. Row cache saving is much more expensive and # has limited use. # # Default is 0 to disable saving the row cache. row_cache_save_period: 0 # Number of keys from the row cache to save # Disabled by default, meaning all keys are going to be saved # row_cache_keys_to_save: 100 # Maximum size of the counter cache in memory. # # Counter cache helps to reduce counter locks' contention for hot counter cells. # In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before # write entirely. With RF > 1 a counter cache hit will still help to reduce the duration # of the lock hold, helping with hot counter cell updates, but will not allow skipping # the read entirely. Only the local (clock, count) tuple of a counter cell is kept # in memory, not the whole counter, so it's relatively cheap. # # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. # NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. counter_cache_size_in_mb: # Duration in seconds after which Cassandra should # save the counter cache (keys only). Caches are saved to saved_caches_directory as # specified in this configuration file. # # Default is 7200 or 2 hours. counter_cache_save_period: 7200 # Number of keys from the counter cache to save # Disabled by default, meaning all keys are going to be saved # counter_cache_keys_to_save: 100 # The off-heap memory allocator. Affects storage engine metadata as # well as caches. Experiments show that JEMAlloc saves some memory # than the native GCC allocator (i.e., JEMalloc is more # fragmentation-resistant). # # Supported values are: NativeAllocator, JEMallocAllocator # # If you intend to use JEMallocAllocator you have to install JEMalloc as library and # modify cassandra-env.sh as directed in the file. # # Defaults to NativeAllocator # memory_allocator: NativeAllocator # saved caches # If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. saved_caches_directory: /var/lib/cassandra/saved_caches # commitlog_sync may be either "periodic" or "batch." # # When in batch mode, Cassandra won't ack writes until the commit log # has been fsynced to disk. It will wait # commitlog_sync_batch_window_in_ms milliseconds between fsyncs. # This window should be kept short because the writer threads will # be unable to do extra work while waiting. (You may need to increase # concurrent_writes for the same reason.) # # commitlog_sync: batch # commitlog_sync_batch_window_in_ms: 2 # # the other option is "periodic" where writes may be acked immediately # and the CommitLog is simply synced every commitlog_sync_period_in_ms # milliseconds. commitlog_sync: periodic commitlog_sync_period_in_ms: 10000 # The size of the individual commitlog file segments. A commitlog # segment may be archived, deleted, or recycled once all the data # in it (potentially from each columnfamily in the system) has been # flushed to sstables. # # The default size is 32, which is almost always fine, but if you are # archiving commitlog segments (see commitlog_archiving.properties), # then you probably want a finer granularity of archiving; 8 or 16 MB # is reasonable. commitlog_segment_size_in_mb: 32 # Reuse commit log files when possible. The default is false, and this # feature will be removed entirely in future versions of Cassandra. #commitlog_segment_recycling: false # any class that implements the SeedProvider interface and has a # constructor that takes a Map of parameters will do. seed_provider: # Addresses of hosts that are deemed contact points. # Cassandra nodes use this list of hosts to find each other and learn # the topology of the ring. You must change this if you are running # multiple nodes! - class_name: org.apache.cassandra.locator.SimpleSeedProvider parameters: # seeds is actually a comma-delimited list of addresses. # Ex: ",," - seeds: "127.0.0.1" # For workloads with more data than can fit in memory, Cassandra's # bottleneck will be reads that need to fetch data from # disk. "concurrent_reads" should be set to (16 * number_of_drives) in # order to allow the operations to enqueue low enough in the stack # that the OS and drives can reorder them. Same applies to # "concurrent_counter_writes", since counter writes read the current # values before incrementing and writing them back. # # On the other hand, since writes are almost never IO bound, the ideal # number of "concurrent_writes" is dependent on the number of cores in # your system; (8 * number_of_cores) is a good rule of thumb. concurrent_reads: 32 concurrent_writes: 32 concurrent_counter_writes: 32 # Total memory to use for sstable-reading buffers. Defaults to # the smaller of 1/4 of heap or 512MB. # file_cache_size_in_mb: 512 # Total permitted memory to use for memtables. Cassandra will stop # accepting writes when the limit is exceeded until a flush completes, # and will trigger a flush based on memtable_cleanup_threshold # If omitted, Cassandra will set both to 1/4 the size of the heap. # memtable_heap_space_in_mb: 2048 # memtable_offheap_space_in_mb: 2048 # Ratio of occupied non-flushing memtable size to total permitted size # that will trigger a flush of the largest memtable. Lager mct will # mean larger flushes and hence less compaction, but also less concurrent # flush activity which can make it difficult to keep your disks fed # under heavy write load. # # memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) # memtable_cleanup_threshold: 0.11 # Specify the way Cassandra allocates and manages memtable memory. # Options are: # heap_buffers: on heap nio buffers # offheap_buffers: off heap (direct) nio buffers # offheap_objects: native memory, eliminating nio buffer heap overhead memtable_allocation_type: heap_buffers # Total space to use for commitlogs. Since commitlog segments are # mmapped, and hence use up address space, the default size is 32 # on 32-bit JVMs, and 8192 on 64-bit JVMs. # # If space gets above this value (it will round up to the next nearest # segment multiple), Cassandra will flush every dirty CF in the oldest # segment and remove it. So a small total commitlog space will tend # to cause more flush activity on less-active columnfamilies. # commitlog_total_space_in_mb: 8192 # This sets the amount of memtable flush writer threads. These will # be blocked by disk io, and each one will hold a memtable in memory # while blocked. # # memtable_flush_writers defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. # # If your data directories are backed by SSD, you should increase this # to the number of cores. #memtable_flush_writers: 8 # A fixed memory pool size in MB for for SSTable index summaries. If left # empty, this will default to 5% of the heap size. If the memory usage of # all index summaries exceeds this limit, SSTables with low read rates will # shrink their index summaries in order to meet this limit. However, this # is a best-effort process. In extreme conditions Cassandra may need to use # more than this amount of memory. index_summary_capacity_in_mb: # How frequently index summaries should be resampled. This is done # periodically to redistribute memory from the fixed-size pool to sstables # proportional their recent read rates. Setting to -1 will disable this # process, leaving existing index summaries at their current sampling level. index_summary_resize_interval_in_minutes: 60 # Whether to, when doing sequential writing, fsync() at intervals in # order to force the operating system to flush the dirty # buffers. Enable this to avoid sudden dirty buffer flushing from # impacting read latencies. Almost always a good idea on SSDs; not # necessarily on platters. trickle_fsync: false trickle_fsync_interval_in_kb: 10240 # TCP port, for commands and data # For security reasons, you should not expose this port to the internet. Firewall it if needed. storage_port: 7000 # SSL port, for encrypted communication. Unused unless enabled in # encryption_options # For security reasons, you should not expose this port to the internet. Firewall it if needed. ssl_storage_port: 7001 # Address or interface to bind to and tell other Cassandra nodes to connect to. # You _must_ change this if you want multiple nodes to be able to communicate! # # Set listen_address OR listen_interface, not both. Interfaces must correspond # to a single address, IP aliasing is not supported. # # Leaving it blank leaves it up to InetAddress.getLocalHost(). This # will always do the Right Thing _if_ the node is properly configured # (hostname, name resolution, etc), and the Right Thing is to use the # address associated with the hostname (it might not be). # # Setting listen_address to 0.0.0.0 is always wrong. # # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address # you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 # address will be used. If true the first ipv6 address will be used. Defaults to false preferring # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. listen_address: 127.0.0.1 # listen_interface: eth0 # listen_interface_prefer_ipv6: false # Address to broadcast to other Cassandra nodes # Leaving this blank will set it to the same value as listen_address # broadcast_address: 1.2.3.4 # Internode authentication backend, implementing IInternodeAuthenticator; # used to allow/disallow connections from peer nodes. # internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator # Whether to start the native transport server. # Please note that the address on which the native transport is bound is the # same as the rpc_address. The port however is different and specified below. # # The Python driver uses the binary protocol and requires this to be enabled. start_native_transport: true # port for the CQL native transport to listen for clients on # For security reasons, you should not expose this port to the internet. Firewall it if needed. native_transport_port: 9042 # The maximum threads for handling requests when the native transport is used. # This is similar to rpc_max_threads though the default differs slightly (and # there is no native_transport_min_threads, idle threads will always be stopped # after 30 seconds). # native_transport_max_threads: 128 # # The maximum size of allowed frame. Frame (requests) larger than this will # be rejected as invalid. The default is 256MB. # native_transport_max_frame_size_in_mb: 256 # The maximum number of concurrent client connections. # The default is -1, which means unlimited. # native_transport_max_concurrent_connections: -1 # The maximum number of concurrent client connections per source ip. # The default is -1, which means unlimited. # native_transport_max_concurrent_connections_per_ip: -1 # Whether to start the thrift rpc server. start_rpc: true # The address or interface to bind the Thrift RPC service and native transport # server to. # # Set rpc_address OR rpc_interface, not both. Interfaces must correspond # to a single address, IP aliasing is not supported. # # Leaving rpc_address blank has the same effect as on listen_address # (i.e. it will be based on the configured hostname of the node). # # Note that unlike listen_address, you can specify 0.0.0.0, but you must also # set broadcast_rpc_address to a value other than 0.0.0.0. # # For security reasons, you should not expose this port to the internet. Firewall it if needed. # # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address # you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 # address will be used. If true the first ipv6 address will be used. Defaults to false preferring # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. rpc_address: 0.0.0.0 # rpc_interface: eth1 # rpc_interface_prefer_ipv6: false # port for Thrift to listen for clients on rpc_port: 9160 # RPC address to broadcast to drivers and other Cassandra nodes. This cannot # be set to 0.0.0.0. If left blank, this will be set to the value of # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must # be set. broadcast_rpc_address: 127.0.0.1 # enable or disable keepalive on rpc/native connections rpc_keepalive: true # Cassandra provides two out-of-the-box options for the RPC Server: # # sync -> One thread per thrift connection. For a very large number of clients, memory # will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size # per thread, and that will correspond to your use of virtual memory (but physical memory # may be limited depending on use of stack space). # # hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled # asynchronously using a small number of threads that does not vary with the amount # of thrift clients (and thus scales well to many clients). The rpc requests are still # synchronous (one thread per active request). If hsha is selected then it is essential # that rpc_max_threads is changed from the default value of unlimited. # # The default is sync because on Windows hsha is about 30% slower. On Linux, # sync/hsha performance is about the same, with hsha of course using less memory. # # Alternatively, can provide your own RPC server by providing the fully-qualified class name # of an o.a.c.t.TServerFactory that can create an instance of it. rpc_server_type: sync # Uncomment rpc_min|max_thread to set request pool size limits. # # Regardless of your choice of RPC server (see above), the number of maximum requests in the # RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync # RPC server, it also dictates the number of clients that can be connected at all). # # The default is unlimited and thus provides no protection against clients overwhelming the server. You are # encouraged to set a maximum that makes sense for you in production, but do keep in mind that # rpc_max_threads represents the maximum number of client requests this server may execute concurrently. # # rpc_min_threads: 16 # rpc_max_threads: 2048 # uncomment to set socket buffer sizes on rpc connections # rpc_send_buff_size_in_bytes: # rpc_recv_buff_size_in_bytes: # Uncomment to set socket buffer size for internode communication # Note that when setting this, the buffer size is limited by net.core.wmem_max # and when not setting it it is defined by net.ipv4.tcp_wmem # See: # /proc/sys/net/core/wmem_max # /proc/sys/net/core/rmem_max # /proc/sys/net/ipv4/tcp_wmem # /proc/sys/net/ipv4/tcp_wmem # and: man tcp # internode_send_buff_size_in_bytes: # internode_recv_buff_size_in_bytes: # Frame size for thrift (maximum message length). thrift_framed_transport_size_in_mb: 15 # Set to true to have Cassandra create a hard link to each sstable # flushed or streamed locally in a backups/ subdirectory of the # keyspace data. Removing these links is the operator's # responsibility. incremental_backups: false # Whether or not to take a snapshot before each compaction. Be # careful using this option, since Cassandra won't clean up the # snapshots for you. Mostly useful if you're paranoid when there # is a data format change. # # Trove currently does not provide any support for guestagent-local snapshots. # see comment on 'auto_snapshot' below. snapshot_before_compaction: false # Whether or not a snapshot is taken of the data before keyspace truncation # or dropping of column families. The STRONGLY advised default of true # should be used to provide data safety. If you set this flag to false, you will # lose data on truncation or drop. # # Trove currently does not provide any support for guestagent-local snapshots. # They may be used internally and/or removed (!) by backup implementations. # The operator would be also required to have remote access to the filesystem. auto_snapshot: false # When executing a scan, within or across a partition, we need to keep the # tombstones seen in memory so we can return them to the coordinator, which # will use them to make sure other replicas also know about the deleted rows. # With workloads that generate a lot of tombstones, this can cause performance # problems and even exaust the server heap. # (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) # Adjust the thresholds here if you understand the dangers and want to # scan more tombstones anyway. These thresholds may also be adjusted at runtime # using the StorageService mbean. tombstone_warn_threshold: 1000 tombstone_failure_threshold: 100000 # Granularity of the collation index of rows within a partition. # Increase if your rows are large, or if you have a very large # number of rows per partition. The competing goals are these: # 1) a smaller granularity means more index entries are generated # and looking up rows within the partition by collation column # is faster # 2) but, Cassandra will keep the collation index in memory for hot # rows (as part of the key cache), so a larger granularity means # you can cache more hot rows column_index_size_in_kb: 64 # Log WARN on any batch size exceeding this value. 5kb per batch by default. # Caution should be taken on increasing the size of this threshold as it can lead to node instability. batch_size_warn_threshold_in_kb: 5 # Number of simultaneous compactions to allow, NOT including # validation "compactions" for anti-entropy repair. Simultaneous # compactions can help preserve read performance in a mixed read/write # workload, by mitigating the tendency of small sstables to accumulate # during a single long running compactions. The default is usually # fine and if you experience problems with compaction running too # slowly or too fast, you should look at # compaction_throughput_mb_per_sec first. # # concurrent_compactors defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. # # If your data directories are backed by SSD, you should increase this # to the number of cores. #concurrent_compactors: 1 # Throttles compaction to the given total throughput across the entire # system. The faster you insert data, the faster you need to compact in # order to keep the sstable count down, but in general, setting this to # 16 to 32 times the rate you are inserting data is more than sufficient. # Setting this to 0 disables throttling. Note that this account for all types # of compaction, including validation compaction. compaction_throughput_mb_per_sec: 16 # Log a warning when compacting partitions larger than this value compaction_large_partition_warning_threshold_mb: 100 # When compacting, the replacement sstable(s) can be opened before they # are completely written, and used in place of the prior sstables for # any range that has been written. This helps to smoothly transfer reads # between the sstables, reducing page cache churn and keeping hot rows hot sstable_preemptive_open_interval_in_mb: 50 # Throttles all outbound streaming file transfers on this node to the # given total throughput in Mbps. This is necessary because Cassandra does # mostly sequential IO when streaming data during bootstrap or repair, which # can lead to saturating the network connection and degrading rpc performance. # When unset, the default is 200 Mbps or 25 MB/s. # stream_throughput_outbound_megabits_per_sec: 200 # Throttles all streaming file transfer between the datacenters, # this setting allows users to throttle inter dc stream throughput in addition # to throttling all network stream traffic as configured with # stream_throughput_outbound_megabits_per_sec # inter_dc_stream_throughput_outbound_megabits_per_sec: # How long the coordinator should wait for read operations to complete read_request_timeout_in_ms: 5000 # How long the coordinator should wait for seq or index scans to complete range_request_timeout_in_ms: 10000 # How long the coordinator should wait for writes to complete write_request_timeout_in_ms: 2000 # How long the coordinator should wait for counter writes to complete counter_write_request_timeout_in_ms: 5000 # How long a coordinator should continue to retry a CAS operation # that contends with other proposals for the same row cas_contention_timeout_in_ms: 1000 # How long the coordinator should wait for truncates to complete # (This can be much longer, because unless auto_snapshot is disabled # we need to flush first so we can snapshot before removing the data.) truncate_request_timeout_in_ms: 60000 # The default timeout for other, miscellaneous operations request_timeout_in_ms: 10000 # Enable operation timeout information exchange between nodes to accurately # measure request timeouts. If disabled, replicas will assume that requests # were forwarded to them instantly by the coordinator, which means that # under overload conditions we will waste that much extra time processing # already-timed-out requests. # # Warning: before enabling this property make sure to ntp is installed # and the times are synchronized between the nodes. cross_node_timeout: false # Enable socket timeout for streaming operation. # When a timeout occurs during streaming, streaming is retried from the start # of the current file. This _can_ involve re-streaming an important amount of # data, so you should avoid setting the value too low. # Default value is 3600000, which means streams timeout after an hour. # streaming_socket_timeout_in_ms: 3600000 # phi value that must be reached for a host to be marked down. # most users should never need to adjust this. # phi_convict_threshold: 8 # endpoint_snitch -- Set this to a class that implements # IEndpointSnitch. The snitch has two functions: # - it teaches Cassandra enough about your network topology to route # requests efficiently # - it allows Cassandra to spread replicas around your cluster to avoid # correlated failures. It does this by grouping machines into # "datacenters" and "racks." Cassandra will do its best not to have # more than one replica on the same "rack" (which may not actually # be a physical location) # # IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, # YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS # ARE PLACED. # # IF THE RACK A REPLICA IS PLACED IN CHANGES AFTER THE REPLICA HAS BEEN # ADDED TO A RING, THE NODE MUST BE DECOMMISSIONED AND REBOOTSTRAPPED. # # Out of the box, Cassandra provides # - SimpleSnitch: # Treats Strategy order as proximity. This can improve cache # locality when disabling read repair. Only appropriate for # single-datacenter deployments. # - GossipingPropertyFileSnitch # This should be your go-to snitch for production use. The rack # and datacenter for the local node are defined in # cassandra-rackdc.properties and propagated to other nodes via # gossip. If cassandra-topology.properties exists, it is used as a # fallback, allowing migration from the PropertyFileSnitch. # - PropertyFileSnitch: # Proximity is determined by rack and data center, which are # explicitly configured in cassandra-topology.properties. # - Ec2Snitch: # Appropriate for EC2 deployments in a single Region. Loads Region # and Availability Zone information from the EC2 API. The Region is # treated as the datacenter, and the Availability Zone as the rack. # Only private IPs are used, so this will not work across multiple # Regions. # - Ec2MultiRegionSnitch: # Uses public IPs as broadcast_address to allow cross-region # connectivity. (Thus, you should set seed addresses to the public # IP as well.) You will need to open the storage_port or # ssl_storage_port on the public IP firewall. (For intra-Region # traffic, Cassandra will switch to the private IP after # establishing a connection.) # - RackInferringSnitch: # Proximity is determined by rack and data center, which are # assumed to correspond to the 3rd and 2nd octet of each node's IP # address, respectively. Unless this happens to match your # deployment conventions, this is best used as an example of # writing a custom Snitch class and is provided in that spirit. # # You can use a custom Snitch by setting this to the full class name # of the snitch, which will be assumed to be on your classpath. endpoint_snitch: SimpleSnitch # controls how often to perform the more expensive part of host score # calculation dynamic_snitch_update_interval_in_ms: 100 # controls how often to reset all host scores, allowing a bad host to # possibly recover dynamic_snitch_reset_interval_in_ms: 600000 # if set greater than zero and read_repair_chance is < 1.0, this will allow # 'pinning' of replicas to hosts in order to increase cache capacity. # The badness threshold will control how much worse the pinned host has to be # before the dynamic snitch will prefer other replicas over it. This is # expressed as a double which represents a percentage. Thus, a value of # 0.2 means Cassandra would continue to prefer the static snitch values # until the pinned host was 20% worse than the fastest. dynamic_snitch_badness_threshold: 0.1 # request_scheduler -- Set this to a class that implements # RequestScheduler, which will schedule incoming client requests # according to the specific policy. This is useful for multi-tenancy # with a single Cassandra cluster. # NOTE: This is specifically for requests from the client and does # not affect inter node communication. # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of # client requests to a node with a separate queue for each # request_scheduler_id. The scheduler is further customized by # request_scheduler_options as described below. request_scheduler: org.apache.cassandra.scheduler.NoScheduler # Scheduler Options vary based on the type of scheduler # NoScheduler - Has no options # RoundRobin # - throttle_limit -- The throttle_limit is the number of in-flight # requests per client. Requests beyond # that limit are queued up until # running requests can complete. # The value of 80 here is twice the number of # concurrent_reads + concurrent_writes. # - default_weight -- default_weight is optional and allows for # overriding the default which is 1. # - weights -- Weights are optional and will default to 1 or the # overridden default_weight. The weight translates into how # many requests are handled during each turn of the # RoundRobin, based on the scheduler id. # # request_scheduler_options: # throttle_limit: 80 # default_weight: 5 # weights: # Keyspace1: 1 # Keyspace2: 5 # request_scheduler_id -- An identifier based on which to perform # the request scheduling. Currently the only valid option is keyspace. # request_scheduler_id: keyspace # Enable or disable inter-node encryption # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher # suite for authentication, key exchange and encryption of the actual data transfers. # Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode. # NOTE: No custom encryption options are enabled at the moment # The available internode options are : all, none, dc, rack # # If set to dc cassandra will encrypt the traffic between the DCs # If set to rack cassandra will encrypt the traffic between the racks # # The passwords used in these options must match the passwords used when generating # the keystore and truststore. For instructions on generating these files, see: # http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore # server_encryption_options: internode_encryption: none keystore: conf/.keystore keystore_password: cassandra truststore: conf/.truststore truststore_password: cassandra # More advanced defaults below: # protocol: TLS # algorithm: SunX509 # store_type: JKS # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] # require_client_auth: false # enable or disable client/server encryption. client_encryption_options: enabled: false # If enabled and optional is set to true encrypted and unencrypted connections are handled. #optional: false # Only supported by versions >= 2.1.12 keystore: conf/.keystore keystore_password: cassandra # require_client_auth: false # Set trustore and truststore_password if require_client_auth is true # truststore: conf/.truststore # truststore_password: cassandra # More advanced defaults below: # protocol: TLS # algorithm: SunX509 # store_type: JKS # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] # internode_compression controls whether traffic between nodes is # compressed. # can be: all - all traffic is compressed # dc - traffic between different datacenters is compressed # none - nothing is compressed. internode_compression: all # Enable or disable tcp_nodelay for inter-dc communication. # Disabling it will result in larger (but fewer) network packets being sent, # reducing overhead from the TCP protocol itself, at the cost of increasing # latency if you block for cross-datacenter responses. inter_dc_tcp_nodelay: false # GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level # Adjust the threshold based on your application throughput requirement # By default, Cassandra logs GC Pauses greater than 200 ms at INFO level # gc_warn_threshold_in_ms: 1000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/cassandra/validation-rules.json0000644000175000017500000003471600000000000025433 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "cluster_name", "restart_required": true, "type": "string" }, { "name": "listen_address", "restart_required": true, "type": "string" }, { "name": "commit_failure_policy", "restart_required": true, "type": "string" }, { "name": "disk_failure_policy", "restart_required": true, "type": "string" }, { "name": "endpoint_snitch", "restart_required": true, "type": "string" }, { "name": "seed_provider", "restart_required": true, "type": "list" }, { "name": "compaction_throughput_mb_per_sec", "restart_required": true, "min": 0, "type": "integer" }, { "name": "compaction_large_partition_warning_threshold_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_total_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "concurrent_reads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "concurrent_writes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "phi_convict_threshold", "restart_required": true, "type": "integer" }, { "name": "commitlog_sync", "restart_required": true, "type": "string" }, { "name": "commitlog_segment_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "commitlog_total_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "compaction_preheat_key_cache", "restart_required": true, "type": "boolean" }, { "name": "concurrent_compactors", "restart_required": true, "min": 0, "type": "integer" }, { "name": "in_memory_compaction_limit_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "preheat_kernel_page_cache", "restart_required": true, "type": "boolean" }, { "name": "sstable_preemptive_open_interval_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_allocation_type", "restart_required": true, "type": "string" }, { "name": "memtable_cleanup_threshold", "restart_required": true, "min": 0, "type": "float" }, { "name": "file_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_flush_queue_size", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_flush_writers", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_heap_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memtable_offheap_space_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "column_index_size_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "index_summary_capacity_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "index_summary_resize_interval_in_minutes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "reduce_cache_capacity_to", "restart_required": true, "min": 0, "max": 1.0, "type": "float" }, { "name": "reduce_cache_sizes_at", "restart_required": true, "min": 0, "max": 1.0, "type": "float" }, { "name": "stream_throughput_outbound_megabits_per_sec", "restart_required": true, "min": 0, "type": "integer" }, { "name": "inter_dc_stream_throughput_outbound_megabits_per_sec", "restart_required": true, "min": 0, "type": "integer" }, { "name": "trickle_fsync", "restart_required": true, "type": "boolean" }, { "name": "trickle_fsync_interval_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "auto_bootstrap", "restart_required": true, "type": "boolean" }, { "name": "batch_size_warn_threshold_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "broadcast_address", "restart_required": true, "type": "string" }, { "name": "initial_token", "restart_required": true, "type": "string" }, { "name": "initial_token", "restart_required": true, "type": "string" }, { "name": "num_tokens", "restart_required": true, "min": 0, "type": "integer" }, { "name": "partitioner", "restart_required": true, "type": "string" }, { "name": "key_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "key_cache_save_period", "restart_required": true, "min": 0, "type": "integer" }, { "name": "key_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "row_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "row_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "row_cache_save_period", "restart_required": true, "min": 0, "type": "integer" }, { "name": "memory_allocator", "restart_required": true, "type": "string" }, { "name": "counter_cache_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_cache_save_period", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_cache_keys_to_save", "restart_required": true, "min": 0, "type": "integer" }, { "name": "tombstone_warn_threshold", "restart_required": true, "min": 0, "type": "integer" }, { "name": "tombstone_failure_threshold", "restart_required": true, "min": 0, "type": "integer" }, { "name": "range_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "read_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "counter_write_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "cas_contention_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "truncate_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "write_request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "request_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "cross_node_timeout", "restart_required": true, "type": "boolean" }, { "name": "internode_send_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "internode_recv_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "internode_compression", "restart_required": true, "type": "string" }, { "name": "inter_dc_tcp_nodelay", "restart_required": true, "type": "boolean" }, { "name": "streaming_socket_timeout_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "native_transport_max_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "native_transport_max_frame_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "native_transport_max_concurrent_connections", "restart_required": true, "min": -1, "type": "integer" }, { "name": "native_transport_max_concurrent_connections_per_ip", "restart_required": true, "min": -1, "type": "integer" }, { "name": "broadcast_rpc_address", "restart_required": true, "type": "string" }, { "name": "rpc_keepalive", "restart_required": true, "type": "boolean" }, { "name": "rpc_max_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_min_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_recv_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_send_buff_size_in_bytes", "restart_required": true, "min": 0, "type": "integer" }, { "name": "rpc_server_type", "restart_required": true, "type": "string" }, { "name": "dynamic_snitch_badness_threshold", "restart_required": true, "min": 0, "type": "float" }, { "name": "dynamic_snitch_reset_interval_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "dynamic_snitch_update_interval_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "hinted_handoff_enabled", "restart_required": true, "type": "boolean" }, { "name": "hinted_handoff_throttle_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "max_hint_window_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "max_hints_delivery_threads", "restart_required": true, "min": 0, "type": "integer" }, { "name": "batchlog_replay_throttle_in_kb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "request_scheduler", "restart_required": true, "type": "string" }, { "name": "request_scheduler_id", "restart_required": true, "type": "string" }, { "name": "request_scheduler_options", "restart_required": true, "type": "list" }, { "name": "thrift_framed_transport_size_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "thrift_max_message_length_in_mb", "restart_required": true, "min": 0, "type": "integer" }, { "name": "permissions_validity_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "permissions_update_interval_in_ms", "restart_required": true, "min": 0, "type": "integer" }, { "name": "gc_warn_threshold_in_ms", "restart_required": true, "min": 0, "type": "integer" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/couchbase/0000755000175000017500000000000000000000000021240 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/couchbase/config.template0000644000175000017500000000000000000000000024230 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/couchdb/0000755000175000017500000000000000000000000020713 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/couchdb/config.template0000644000175000017500000000000000000000000023703 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/db2/0000755000175000017500000000000000000000000017753 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/db2/config.template0000644000175000017500000000000000000000000022743 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/db2/validation-rules.json0000644000175000017500000003326500000000000024141 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "description": "Agent stack size", "type": "integer", "name": "AGENT_STACK_SZ", "restart_required": true }, { "description": "Alternate encryption algorithm for incoming connections at server", "type": "integer", "name": "ALTERNATE_AUTH_ENC", "restart_required": true }, { "description": "Max number of concurrently active databases", "type": "integer", "name": "NUMDB", "restart_required": true }, { "description": "Federated Database Support", "type": "integer", "name": "FEDERATED", "restart_required": false }, { "description": "Transaction processor monitor name", "type": "string", "name": "TP_MON_NAME", "restart_required": true }, { "description": "Default charge-back account", "type": "string", "name": "DFT_ACCOUNT_STR", "restart_required": false }, { "description": "Size of rotating db2diag & notify logs (MB)", "type": "integer", "name": "DIAGSIZE", "restart_required": true }, { "description": "Default database monitor switch for Buffer pool", "type": "integer", "name": "DFT_MON_BUFPOOL", "restart_required": false }, { "description": "Default database monitor switch for Lock", "type": "integer", "name": "DFT_MON_LOCK", "restart_required": false }, { "description": "Default database monitor switch for Sort", "type": "integer", "name": "DFT_MON_SORT", "restart_required": false }, { "description": "Default database monitor switch for Statement", "type": "integer", "name": "DFT_MON_STMT", "restart_required": false }, { "description": "Default database monitor switch for Table", "type": "integer", "name": "DFT_MON_TABLE", "restart_required": false }, { "description": "Default database monitor switch for Timestamp", "type": "integer", "name": "DFT_MON_TIMESTAMP", "restart_required": false }, { "description": "Default database monitor switch for Unit of work", "type": "integer", "name": "DFT_MON_UOW", "restart_required": false }, { "description": "Monitor health of instance and databases", "type": "integer", "name": "HEALTH_MON", "restart_required": false }, { "description": "SYSADM group name", "type": "string", "name": "SYSADM_GROUP", "restart_required": true }, { "description": "SYSCTRL group name", "type": "string", "name": "SYSCTRL_GROUP", "restart_required": true }, { "description": "SYSMAINT group name", "type": "string", "name": "SYSMAINT_GROUP", "restart_required": true }, { "description": "SYSMON group name", "type": "string", "name": "SYSMAINT_GROUP", "restart_required": true }, { "description": "Client Userid-Password Plugin", "type": "string", "name": "CLNT_PW_PLUGIN", "restart_required": true }, { "description": "Client Kerberos Plugin", "type": "string", "name": "CLNT_KRB_PLUGIN", "restart_required": true }, { "description": "Group Plugin", "type": "string", "name": "GROUP_PLUGIN", "restart_required": true }, { "description": "GSS Plugin for Local Authorization", "type": "string", "name": "LOCAL_GSSPLUGIN", "restart_required": true }, { "description": "Server Plugin Mode", "type": "integer", "name": "SRV_PLUGIN_MODE", "restart_required": true }, { "description": "Server List of GSS Plugins", "type": "string", "name": "SRVCON_GSSPLUGIN_LIST", "restart_required": true }, { "description": "Server Userid-Password Plugin", "type": "string", "name": "SRVCON_PW_PLUGIN", "restart_required": true }, { "description": "Server Connection Authentication", "type": "string", "name": "SRVCON_AUTH", "restart_required": true }, { "description": "Database manager authentication", "type": "integer", "name": "AUTHENTICATION", "restart_required": true }, { "description": "Alternate authentication", "type": "integer", "name": "ALTERNATE_AUTH_ENC", "restart_required": true }, { "description": "Cataloging allowed without authority", "type": "integer", "name": "CATALOG_NOAUTH", "restart_required": false }, { "description": "Trust all clients", "type": "integer", "name": "TRUST_ALLCLNTS", "restart_required": true }, { "description": "Trusted client authentication", "type": "integer", "name": "TRUST_CLNTAUTH", "restart_required": true }, { "description": "Bypass federated authentication", "type": "integer", "name": "FED_NOAUTH", "restart_required": false }, { "description": "Database monitor heap size(4KB)", "type": "integer", "name": "MON_HEAP_SZ", "restart_required": false }, { "description": "Java Virtual Machine heap size(4KB)", "type": "integer", "name": "JAVA_HEAP_SZ", "restart_required": true }, { "description": "Audit buffer size(4KB)", "type": "integer", "name": "AUDIT_BUF_SZ", "restart_required": true }, { "description": "Agent stack size", "type": "integer", "name": "AGENT_STACK_SZ", "restart_required": true }, { "description": "Sort heap threshold (4KB)", "type": "integer", "name": "SHEAPTHRES", "restart_required": true }, { "description": "Directory cache support", "type": "integer", "name": "DIR_CACHE", "restart_required": true }, { "description": "Application support layer heap size (4KB)", "type": "integer", "name": "ASLHEAPSZ", "restart_required": true }, { "description": "Max requester I/O block size (bytes)", "type": "integer", "name": "RQRIOBLK", "restart_required": true }, { "description": "Workload impact by throttled utilities", "type": "integer", "name": "UTIL_IMPACT_LIM", "restart_required": true }, { "description": "Priority of agents", "type": "integer", "name": "AGENTPRI", "restart_required": true }, { "description": "Agent pool size", "type": "integer", "name": "NUM_POOLAGENTS", "restart_required": false }, { "description": "Initial number of agents in pool", "type": "integer", "name": "NUM_INITAGENTS", "restart_required": true }, { "description": "Max number of coordinating agents", "type": "integer", "name": "MAX_COORDAGENTS", "restart_required": false }, { "description": "Max number of client connections", "type": "integer", "name": "MAX_CONNECTIONS", "restart_required": false }, { "description": "Keep fenced process", "type": "integer", "name": "KEEPFENCED", "restart_required": true }, { "description": "Number of pooled fenced processes", "type": "integer", "name": "FENCED_POOL", "restart_required": false }, { "description": "Initial number of fenced processes", "type": "integer", "name": "NUM_INITFENCED", "restart_required": true }, { "description": "Index re-creation time and redo index build", "type": "integer", "name": "INDEXREC", "restart_required": false }, { "description": "Transaction manager database name", "type": "string", "name": "TM_DATABASE", "restart_required": true }, { "description": "Transaction resync interval (sec)", "type": "integer", "name": "RESYNC_INTERVAL", "restart_required": true }, { "description": "SPM name", "type": "string", "name": "SPM_NAME", "restart_required": true }, { "description": "SPM log size", "type": "integer", "name": "SPM_LOG_FILE_SZ", "restart_required": true }, { "description": "SPM resync agent limit", "type": "integer", "name": "SPM_MAX_RESYNC", "restart_required": true }, { "description": "Discovery mode", "type": "integer", "name": "DISCOVER", "restart_required": true }, { "description": "Discover server instance", "type": "integer", "name": "DISCOVER_INST", "restart_required": false }, { "description": "SSL server keydb file", "type": "string", "name": "SSL_SVR_KEYDB", "restart_required": true }, { "description": "SSL server stash file", "type": "string", "name": "SSL_SVR_STASH", "restart_required": true }, { "description": "SSL server certificate label", "type": "string", "name": "SSL_SVR_LABEL", "restart_required": true }, { "description": "SSL cipher specs", "type": "string", "name": "SSL_CIPHERSPECS", "restart_required": true }, { "description": "SSL versions", "type": "string", "name": "SSL_VERSIONS", "restart_required": true }, { "description": "SSL client keydb file", "type": "string", "name": "SSL_CLNT_KEYDB", "restart_required": true }, { "description": "SSL client stash file", "type": "string", "name": "SSL_CLNT_STASH", "restart_required": true }, { "description": "Maximum query degree of parallelism", "type": "integer", "name": "MAX_QUERYDEGREE", "restart_required": false }, { "description": "Enable intra-partition parallelism", "type": "integer", "name": "INTRA_PARALLEL", "restart_required": true }, { "description": "No. of int. communication buffers(4KB)", "type": "integer", "name": "FCM_NUM_BUFFERS", "restart_required": false }, { "description": "No. of int. communication channels", "type": "integer", "name": "FCM_NUM_CHANNELS", "restart_required": false }, { "description": "db2start/db2stop timeout (min)", "type": "integer", "name": "START_STOP_TIME", "restart_required": false }, { "description": "WLM dispatcher enabled", "type": "integer", "name": "WLM_DISPATCHER", "restart_required": false }, { "description": "WLM dispatcher concurrency", "type": "integer", "name": "WLM_DISP_CONCUR", "restart_required": false }, { "description": "WLM dispatcher CPU shares enabled", "type": "integer", "name": "WLM_DISP_CPU_SHARES", "restart_required": false }, { "description": "WLM dispatcher min. utilization (%)", "type": "integer", "name": "WLM_DISP_MIN_UTIL", "restart_required": false }, { "description": "Communication buffer exit library list", "type": "string", "name": "COMM_EXIT_LIST", "restart_required": false } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/mariadb/0000755000175000017500000000000000000000000020703 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mariadb/cluster.config.template0000644000175000017500000000111600000000000025364 0ustar00coreycorey00000000000000[mysqld] bind-address=0.0.0.0 default-storage-engine=innodb [galera] binlog_format=ROW innodb_autoinc_lock_mode=2 innodb_flush_log_at_trx_commit=0 innodb_doublewrite=1 query_cache_size=0 wsrep_on=ON wsrep_slave_threads=8 wsrep_provider=/usr/lib/libgalera_smm.so wsrep_provider_options="gcache.size={{ (128 * flavor['ram']/512)|int }}M; gcache.page_size=1G" wsrep_sst_method=rsync wsrep_sst_auth="{{ replication_user_pass }}" wsrep_cluster_address="gcomm://{{ cluster_ips }}" wsrep_cluster_name={{ cluster_name }} wsrep_node_name={{ instance_name }} wsrep_node_address={{ instance_ip }} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mariadb/config.template0000644000175000017500000000302000000000000023700 0ustar00coreycorey00000000000000[client] port = 3306 socket = /var/run/mysqld/mysqld.sock [mysqld_safe] nice = 0 [mysqld] user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data tmpdir = /var/tmp pid_file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover-options = BACKUP,FORCE query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mariadb/replica.config.template0000644000175000017500000000017600000000000025327 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mariadb-bin.log relay_log = /var/lib/mysql/data/mariadb-relay-bin.log read_only = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mariadb/replica_source.config.template0000644000175000017500000000006700000000000026706 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mariadb-bin.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/templates/mariadb/validation-rules.json0000644000175000017500000001530200000000000025061 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "innodb_file_per_table", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "autocommit", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "local_infile", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "lower_case_table_names", "restart_required": true, "max": 2, "min": 0, "type": "integer" }, { "name": "key_buffer_size", "restart_required": false, "max": 4294967295, "min": 8, "type": "integer" }, { "name": "connect_timeout", "restart_required": false, "max": 31536000, "min": 2, "type": "integer" }, { "name": "join_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 128, "type": "integer" }, { "name": "sort_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 32768, "type": "integer" }, { "name": "innodb_buffer_pool_size", "restart_required": true, "max": 18446744073709551615, "min": 5242880, "type": "integer" }, { "name": "innodb_flush_log_at_trx_commit", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_flush_method", "restart_required": true, "type": "string" }, { "name": "innodb_log_buffer_size", "restart_required": true, "max": 4294967295, "min": 262144, "type": "integer" }, { "name": "innodb_log_file_size", "restart_required": true, "max": 274877906944, "min": 4194304, "type": "integer" }, { "name": "innodb_open_files", "restart_required": true, "max": 4294967295, "min": 10, "type": "integer" }, { "name": "innodb_thread_concurrency", "restart_required": false, "max": 1000, "min": 0, "type": "integer" }, { "name": "sync_binlog", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "auto_increment_increment", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "auto_increment_offset", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "bulk_insert_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 0, "type": "integer" }, { "name": "expire_logs_days", "restart_required": false, "max": 99, "min": 0, "type": "integer" }, { "name": "interactive_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "max_allowed_packet", "restart_required": false, "max": 1073741824, "min": 1024, "type": "integer" }, { "name": "max_connect_errors", "restart_required": false, "max": 18446744073709551615, "min": 1, "type": "integer" }, { "name": "max_connections", "restart_required": false, "max": 100000, "min": 1, "type": "integer" }, { "name": "myisam_sort_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 4096, "type": "integer" }, { "name": "max_user_connections", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "server_id", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "wait_timeout", "restart_required": false, "max": 31536000, "min": 1, "type": "integer" }, { "name": "character_set_client", "restart_required": false, "type": "string" }, { "name": "character_set_connection", "restart_required": false, "type": "string" }, { "name": "character_set_database", "restart_required": false, "type": "string" }, { "name": "character_set_filesystem", "restart_required": false, "type": "string" }, { "name": "character_set_results", "restart_required": false, "type": "string" }, { "name": "character_set_server", "restart_required": false, "type": "string" }, { "name": "collation_connection", "restart_required": false, "type": "string" }, { "name": "collation_database", "restart_required": false, "type": "string" }, { "name": "collation_server", "restart_required": false, "type": "string" }, { "name": "performance_schema", "restart_required": true, "type": "boolean" }, { "name": "long_query_time", "restart_required": false, "min": 0, "type": "float" }, { "name": "max_prepared_stmt_count", "restart_required": false, "max": 1048576, "min": 0, "type": "integer" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/mongodb/0000755000175000017500000000000000000000000020731 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mongodb/config.template0000644000175000017500000000011700000000000023732 0ustar00coreycorey00000000000000# mongodb.conf storage.mmapv1.smallFiles: false storage.journal.enabled: true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mongodb/validation-rules.json0000644000175000017500000002155600000000000025117 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "systemLog.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.accessControl.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.command.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.control.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.geo.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.index.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.network.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.query.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.replication.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.sharding.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.storage.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.storage.journal.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.component.write.verbosity", "restart_required": true, "min": 0, "max": 5, "type": "integer" }, { "name": "systemLog.quiet", "restart_required": true, "type": "boolean" }, { "name": "systemLog.traceAllExceptions", "restart_required": true, "type": "boolean" }, { "name": "systemLog.logAppend", "restart_required": true, "type": "boolean" }, { "name": "systemLog.logRotate", "restart_required": true, "type": "string" }, { "name": "systemLog.timeStampFormat", "restart_required": true, "type": "string" }, { "name": "net.maxIncomingConnections", "restart_required": true, "min": 0, "type": "integer" }, { "name": "net.wireObjectCheck", "restart_required": true, "type": "boolean" }, { "name": "net.ipv6", "restart_required": true, "type": "boolean" }, { "name": "net.http.enabled", "restart_required": true, "type": "boolean" }, { "name": "net.http.JSONPEnabled", "restart_required": true, "type": "boolean" }, { "name": "net.http.RESTInterfaceEnabled", "restart_required": true, "type": "boolean" }, { "name": "security.authorization", "restart_required": true, "type": "string" }, { "name": "security.sasl.hostName", "restart_required": true, "type": "string" }, { "name": "security.sasl.serviceName", "restart_required": true, "type": "string" }, { "name": "security.sasl.saslauthdSocketPath", "restart_required": true, "type": "string" }, { "name": "security.javascriptEnabled", "restart_required": true, "type": "boolean" }, { "name": "operationProfiling.slowOpThresholdMs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "operationProfiling.mode", "restart_required": true, "type": "string" }, { "name": "storage.indexBuildRetry", "restart_required": true, "type": "boolean" }, { "name": "storage.journal.enabled", "restart_required": true, "type": "boolean" }, { "name": "storage.directoryPerDB", "restart_required": true, "type": "boolean" }, { "name": "storage.syncPeriodSecs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.engine", "restart_required": true, "type": "string" }, { "name": "storage.mmapv1.nsSize", "restart_required": true, "min": 0, "max": 2047, "type": "integer" }, { "name": "storage.mmapv1.quota.enforced", "restart_required": true, "type": "boolean" }, { "name": "storage.mmapv1.quota.maxFilesPerDB", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.mmapv1.smallFiles", "restart_required": true, "type": "boolean" }, { "name": "storage.mmapv1.journal.debugFlags", "restart_required": true, "type": "integer" }, { "name": "storage.mmapv1.journal.commitIntervalMs", "restart_required": true, "min": 2, "max": 300, "type": "integer" }, { "name": "storage.wiredTiger.engineConfig.cacheSizeGB", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.wiredTiger.engineConfig.statisticsLogDelaySecs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "storage.wiredTiger.engineConfig.journalCompressor", "restart_required": true, "type": "string" }, { "name": "storage.wiredTiger.collectionConfig.blockCompressor", "restart_required": true, "type": "string" }, { "name": "storage.wiredTiger.indexConfig.prefixCompression", "restart_required": true, "type": "boolean" }, { "name": "replication.oplogSizeMB", "restart_required": true, "min": 0, "type": "integer" }, { "name": "replication.secondaryIndexPrefetch", "restart_required": true, "type": "string" }, { "name": "sharding.clusterRole", "restart_required": true, "type": "string" }, { "name": "auditLog.format", "restart_required": true, "type": "string" }, { "name": "auditLog.filter", "restart_required": true, "type": "string" }, { "name": "snmp.subagent", "restart_required": true, "type": "boolean" }, { "name": "snmp.master", "restart_required": true, "type": "boolean" }, { "name": "replication.localPingThresholdMs", "restart_required": true, "min": 0, "type": "integer" }, { "name": "sharding.autoSplit", "restart_required": true, "type": "boolean" }, { "name": "sharding.chunkSize", "restart_required": true, "min": 0, "type": "integer" }, { "name": "setParameter", "restart_required": true, "type": "string" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/mysql/0000755000175000017500000000000000000000000020451 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7721107 trove-12.1.0.dev92/trove/templates/mysql/5.5/0000755000175000017500000000000000000000000020760 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mysql/5.5/replica.config.template0000644000175000017500000000017200000000000025400 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log read_only = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mysql/5.5/replica_source.config.template0000644000175000017500000000006500000000000026761 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mysql/config.template0000644000175000017500000000316300000000000023456 0ustar00coreycorey00000000000000[client] port = 3306 socket = /var/run/mysqld/mysqld.sock [mysqld_safe] pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data tmpdir = /var/tmp pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover-options = BACKUP,FORCE query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} performance_schema = ON [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/mysql/mysql-test/0000755000175000017500000000000000000000000022573 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mysql/mysql-test/config.template0000644000175000017500000000001300000000000025567 0ustar00coreycorey00000000000000hyper = 0M ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mysql/replica.config.template0000644000175000017500000000044100000000000025070 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED relay_log = /var/lib/mysql/data/mysql-relay-bin.log relay_log_info_repository = TABLE relay_log_recovery = 1 relay_log_purge = 1 log_slave_updates = ON enforce_gtid_consistency = ON gtid_mode = ON read_only = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/mysql/replica_source.config.template0000644000175000017500000000021700000000000026451 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/templates/mysql/validation-rules.json0000644000175000017500000004065300000000000024636 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "innodb_file_per_table", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "autocommit", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "local_infile", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "lower_case_table_names", "restart_required": true, "max": 2, "min": 0, "type": "integer" }, { "name": "key_buffer_size", "restart_required": false, "max": 4294967295, "min": 8, "type": "integer" }, { "name": "connect_timeout", "restart_required": false, "max": 31536000, "min": 2, "type": "integer" }, { "name": "join_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 128, "type": "integer" }, { "name": "sort_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 32768, "type": "integer" }, { "name": "innodb_buffer_pool_size", "restart_required": true, "max": 18446744073709551615, "min": 5242880, "type": "integer" }, { "name": "innodb_flush_log_at_trx_commit", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_flush_method", "restart_required": true, "type": "string" }, { "name": "innodb_log_buffer_size", "restart_required": true, "max": 4294967295, "min": 262144, "type": "integer" }, { "name": "innodb_log_file_size", "restart_required": true, "max": 274877906944, "min": 4194304, "type": "integer" }, { "name": "innodb_open_files", "restart_required": true, "max": 4294967295, "min": 10, "type": "integer" }, { "name": "innodb_thread_concurrency", "restart_required": false, "max": 1000, "min": 0, "type": "integer" }, { "name": "sync_binlog", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "auto_increment_increment", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "auto_increment_offset", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "bulk_insert_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 0, "type": "integer" }, { "name": "expire_logs_days", "restart_required": false, "max": 99, "min": 0, "type": "integer" }, { "name": "interactive_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "max_allowed_packet", "restart_required": false, "max": 1073741824, "min": 1024, "type": "integer" }, { "name": "max_connect_errors", "restart_required": false, "max": 18446744073709551615, "min": 1, "type": "integer" }, { "name": "max_connections", "restart_required": false, "max": 100000, "min": 1, "type": "integer" }, { "name": "myisam_sort_buffer_size", "restart_required": false, "max": 18446744073709551615, "min": 4096, "type": "integer" }, { "name": "max_user_connections", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "server_id", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "wait_timeout", "restart_required": false, "max": 31536000, "min": 1, "type": "integer" }, { "name": "character_set_client", "restart_required": false, "type": "string" }, { "name": "character_set_connection", "restart_required": false, "type": "string" }, { "name": "character_set_database", "restart_required": false, "type": "string" }, { "name": "character_set_filesystem", "restart_required": false, "type": "string" }, { "name": "character_set_results", "restart_required": false, "type": "string" }, { "name": "character_set_server", "restart_required": false, "type": "string" }, { "name": "collation_connection", "restart_required": false, "type": "string" }, { "name": "collation_database", "restart_required": false, "type": "string" }, { "name": "collation_server", "restart_required": false, "type": "string" }, { "name": "performance_schema", "restart_required": true, "type": "boolean" }, { "name": "long_query_time", "restart_required": false, "min": 0, "type": "float" }, { "name": "max_prepared_stmt_count", "restart_required": false, "max": 1048576, "min": 0, "type": "integer" }, { "name": "innodb_adaptive_flushing_lwm", "restart_required": false, "max": 70, "min": 0, "type": "integer" }, { "name": "innodb_adaptive_max_sleep_delay", "restart_required": false, "max": 1000000, "min": 0, "type": "integer" }, { "name": "innodb_buffer_pool_dump_at_shutdown", "restart_required": false, "type": "boolean" }, { "name": "innodb_change_buffer_max_size", "restart_required": false, "max": 50, "min": 25, "type": "integer" }, { "name": "innodb_checksum_algorithm", "restart_required": false, "type": "string" }, { "name": "innodb_cmp_per_index_enabled", "restart_required": false, "type": "boolean" }, { "name": "innodb_compression_failure_threshold_pct", "restart_required": false, "max": 100, "min": 0, "type": "integer" }, { "name": "innodb_compression_level", "restart_required": false, "max": 9, "min": 0, "type": "integer" }, { "name": "innodb_compression_pad_pct_max", "restart_required": false, "max": 75, "min": 0, "type": "integer" }, { "name": "innodb_disable_sort_file_cache", "restart_required": false, "type": "boolean" }, { "name": "innodb_flush_log_at_timeout", "restart_required": false, "max": 2700, "min": 1, "type": "integer" }, { "name": "innodb_flush_neighbors", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_flushing_avg_loops", "restart_required": false, "max": 1000, "min": 1, "type": "integer" }, { "name": "innodb_force_load_corrupted", "restart_required": true, "type": "boolean" }, { "name": "innodb_ft_aux_table", "restart_required": false, "type": "string" }, { "name": "innodb_ft_cache_size", "restart_required": true, "max": 80000000, "min": 1600000, "type": "integer" }, { "name": "innodb_ft_max_token_size", "restart_required": false, "max": 84, "min": 10, "type": "integer" }, { "name": "innodb_ft_num_word_optimize", "restart_required": false, "type": "integer" }, { "name": "innodb_ft_result_cache_limit", "restart_required": false, "min": 1000000, "type": "integer" }, { "name": "innodb_ft_server_stopword_table", "restart_required": false, "type": "string" }, { "name": "innodb_ft_sort_pll_degree", "restart_required": true, "max": 32, "min": 1, "type": "integer" }, { "name": "innodb_ft_user_stopword_table", "restart_required": false, "type": "string" }, { "name": "innodb_io_capacity_max", "restart_required": false, "min": 100, "type": "integer" }, { "name": "innodb_log_compressed_pages", "restart_required": false, "type": "boolean" }, { "name": "innodb_lru_scan_depth", "restart_required": false, "min": 100, "type": "integer" }, { "name": "innodb_max_dirty_pages_pct_lwm", "restart_required": false, "max": 99, "min": 0, "type": "integer" }, { "name": "innodb_max_purge_lag_delay", "restart_required": false, "min": 0, "type": "integer" }, { "name": "innodb_monitor_disable", "restart_required": false, "type": "string" }, { "name": "innodb_monitor_enable", "restart_required": false, "type": "string" }, { "name": "innodb_monitor_reset", "restart_required": false, "type": "string" }, { "name": "innodb_monitor_reset_all", "restart_required": false, "type": "string" }, { "name": "innodb_online_alter_log_max_size", "restart_required": false, "min": 65536, "type": "integer" }, { "name": "innodb_optimize_fulltext_only", "restart_required": false, "type": "boolean" }, { "name": "innodb_page_size", "restart_required": false, "type": "string" }, { "name": "innodb_random_read_ahead", "restart_required": false, "type": "boolean" }, { "name": "innodb_read_only", "restart_required": true, "type": "boolean" }, { "name": "innodb_rollback_segments", "restart_required": false, "max": 128, "min": 1, "type": "integer" }, { "name": "innodb_sort_buffer_size", "restart_required": true, "max": 67108864, "min": 65536, "type": "integer" }, { "name": "innodb_stats_method", "restart_required": false, "type": "string" }, { "name": "innodb_stats_persistent", "restart_required": false, "type": "boolean" }, { "name": "innodb_stats_persistent_sample_pages", "restart_required": false, "type": "integer" }, { "name": "innodb_stats_sample_pages", "restart_required": false, "min": 1, "type": "integer" }, { "name": "innodb_stats_transient_sample_pages", "restart_required": false, "type": "integer" }, { "name": "innodb_sync_array_size", "restart_required": true, "max": 1024, "min": 1, "type": "integer" }, { "name": "innodb_sync_spin_loops", "restart_required": false, "max": 4294967295, "min": 0, "type": "integer" }, { "name": "innodb_buffer_pool_dump_pct", "restart_required": false, "max": 100, "min": 1, "type": "integer" }, { "name": "innodb_adaptive_hash_index_parts", "restart_required": true, "max": 512, "min": 1, "type": "integer" }, { "name": "innodb_background_drop_list_empty", "restart_required": false, "type": "boolean" }, { "name": "innodb_buffer_pool_chunk_size", "restart_required": true, "min": 1048576, "type": "integer" }, { "name": "innodb_deadlock_detect", "restart_required": false, "type": "boolean" }, { "name": "innodb_default_row_format", "restart_required": false, "type": "string" }, { "name": "innodb_fill_factor", "restart_required": false, "max": 100, "min": 10, "type": "integer" }, { "name": "innodb_flush_sync", "restart_required": false, "type": "boolean" }, { "name": "innodb_ft_total_cache_size", "restart_required": true, "max": 1600000000, "min": 32000000, "type": "integer" }, { "name": "innodb_large_prefix", "restart_required": false, "type": "boolean" }, { "name": "innodb_log_checksums", "restart_required": false, "type": "boolean" }, { "name": "innodb_log_write_ahead_size", "restart_required": false, "min": 512, "type": "integer" }, { "name": "innodb_max_undo_log_size", "restart_required": false, "min": 10485760, "type": "integer" }, { "name": "innodb_page_cleaners", "restart_required": true, "max": 64, "min": 1, "type": "integer" }, { "name": "innodb_purge_rseg_truncate_frequency", "restart_required": false, "max": 128, "min": 1, "type": "integer" }, { "name": "innodb_stats_include_delete_marked", "restart_required": false, "type": "boolean" }, { "name": "innodb_support_xa", "restart_required": false, "type": "boolean" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/percona/0000755000175000017500000000000000000000000020733 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/percona/5.5/0000755000175000017500000000000000000000000021242 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/percona/5.5/replica.config.template0000644000175000017500000000017200000000000025662 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log read_only = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/percona/5.5/replica_source.config.template0000644000175000017500000000006500000000000027243 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/percona/config.template0000644000175000017500000000313300000000000023735 0ustar00coreycorey00000000000000[client] port = 3306 socket = /var/run/mysqld/mysqld.sock [mysqld_safe] pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data tmpdir = /var/tmp pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover-options = BACKUP,FORCE query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/percona/replica.config.template0000644000175000017500000000044100000000000025352 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED relay_log = /var/lib/mysql/data/mysql-relay-bin.log relay_log_info_repository = TABLE relay_log_recovery = 1 relay_log_purge = 1 enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON read_only = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/percona/replica_source.config.template0000644000175000017500000000025700000000000026737 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON enforce_storage_engine = InnoDB ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/percona/validation-rules.json0000644000175000017500000001405000000000000025110 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "innodb_file_per_table", "restart_required": true, "max": 1, "min": 0, "type": "integer" }, { "name": "autocommit", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "local_infile", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "key_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "connect_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "join_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 32768, "type": "integer" }, { "name": "innodb_buffer_pool_size", "restart_required": true, "max": 68719476736, "min": 0, "type": "integer" }, { "name": "innodb_flush_log_at_trx_commit", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_log_buffer_size", "restart_required": true, "max": 4294967296, "min": 1048576, "type": "integer" }, { "name": "innodb_open_files", "restart_required": true, "max": 4294967296, "min": 10, "type": "integer" }, { "name": "innodb_thread_concurrency", "restart_required": false, "max": 1000, "min": 0, "type": "integer" }, { "name": "sync_binlog", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "auto_increment_increment", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "auto_increment_offset", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "bulk_insert_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "expire_logs_days", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "interactive_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "max_allowed_packet", "restart_required": false, "max": 1073741824, "min": 1024, "type": "integer" }, { "name": "max_connect_errors", "restart_required": false, "max": 18446744073709547520, "min": 1, "type": "integer" }, { "name": "max_connections", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "myisam_sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 4, "type": "integer" }, { "name": "max_user_connections", "restart_required": false, "max": 100000, "min": 1, "type": "integer" }, { "name": "server_id", "restart_required": true, "max": 100000, "min": 1, "type": "integer" }, { "name": "wait_timeout", "restart_required": false, "max": 31536000, "min": 1, "type": "integer" }, { "name": "character_set_client", "restart_required": false, "type": "string" }, { "name": "character_set_connection", "restart_required": false, "type": "string" }, { "name": "character_set_database", "restart_required": false, "type": "string" }, { "name": "character_set_filesystem", "restart_required": false, "type": "string" }, { "name": "character_set_results", "restart_required": false, "type": "string" }, { "name": "character_set_server", "restart_required": false, "type": "string" }, { "name": "collation_connection", "restart_required": false, "type": "string" }, { "name": "collation_database", "restart_required": false, "type": "string" }, { "name": "collation_server", "restart_required": false, "type": "string" }, { "name": "long_query_time", "restart_required": false, "min": 0, "type": "float" }, { "name": "max_prepared_stmt_count", "restart_required": false, "max": 1048576, "min": 0, "type": "integer" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/postgresql/0000755000175000017500000000000000000000000021507 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/postgresql/config.template0000644000175000017500000005402100000000000024513 0ustar00coreycorey00000000000000# Pre-compute values used by the template expressions. # Note: The variables have to be in lists due to how scoping works in JINJA templates. # # The recommended amount for 'shared_buffers' on a dedicated database server is 25% of RAM. # Servers with less than 3GB of RAM require a more conservative value to save memory for other processes. {% set shared_buffers_mb = [(0.25 if flavor['ram'] >= 3072 else 0.10) * flavor['ram']] %} # # ----------------------------- # PostgreSQL configuration file # ----------------------------- # # This file consists of lines of the form: # # name = value # # (The "=" is optional.) Whitespace may be used. Comments are introduced with # "#" anywhere on a line. The complete list of parameter names and allowed # values can be found in the PostgreSQL documentation. # # The commented-out settings shown in this file represent the default values. # Re-commenting a setting is NOT sufficient to revert it to the default value; # you need to reload the server. # # This file is read on server startup and when the server receives a SIGHUP # signal. If you edit the file on a running system, you have to SIGHUP the # server for the changes to take effect, or use "pg_ctl reload". Some # parameters, which are marked below, require a server shutdown and restart to # take effect. # # Any parameter can also be given as a command-line option to the server, e.g., # "postgres -c log_connections=on". Some parameters can be changed at run time # with the "SET" SQL command. # # Memory units: kB = kilobytes Time units: ms = milliseconds # MB = megabytes s = seconds # GB = gigabytes min = minutes # TB = terabytes h = hours # d = days # # The properties marked as controlled by Trove are managed by the Trove # guest-agent. Any changes to them will be overwritten. #------------------------------------------------------------------------------ # FILE LOCATIONS #------------------------------------------------------------------------------ # The default values of these variables are driven from the -D command-line # option or PGDATA environment variable, represented here as ConfigDir. #data_directory = 'ConfigDir' # use data in another directory # (change requires restart) # (controlled by Trove) #hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file # (change requires restart) # (controlled by Trove) #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file # (change requires restart) # (controlled by Trove) # If external_pid_file is not explicitly set, no extra PID file is written. #external_pid_file = '' # write an extra PID file # (change requires restart) # (controlled by Trove) #------------------------------------------------------------------------------ # CONNECTIONS AND AUTHENTICATION #------------------------------------------------------------------------------ # - Connection Settings - #listen_addresses = 'localhost' # what IP address(es) to listen on; # comma-separated list of addresses; # defaults to 'localhost'; use '*' for all # (change requires restart) # (controlled by Trove) #port = 5432 # (change requires restart) # (controlled by Trove) #max_connections = 100 # (change requires restart) # Note: Increasing max_connections costs ~400 bytes of shared memory per # connection slot, plus lock space (see max_locks_per_transaction). #superuser_reserved_connections = 3 # (change requires restart) #unix_socket_directories = '/tmp' # comma-separated list of directories # (change requires restart) # (controlled by Trove) #unix_socket_group = '' # (change requires restart) # (controlled by Trove) #unix_socket_permissions = 0777 # begin with 0 to use octal notation # (change requires restart) # (controlled by Trove) #bonjour = off # advertise server via Bonjour # (change requires restart) #bonjour_name = '' # defaults to the computer name # (change requires restart) # - Security and Authentication - #authentication_timeout = 1min # 1s-600s #ssl = off # (change requires restart) #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers # (change requires restart) #ssl_prefer_server_ciphers = on # (change requires restart) #ssl_ecdh_curve = 'prime256v1' # (change requires restart) #ssl_renegotiation_limit = 0 # amount of data between renegotiations #ssl_cert_file = 'server.crt' # (change requires restart) #ssl_key_file = 'server.key' # (change requires restart) #ssl_ca_file = '' # (change requires restart) #ssl_crl_file = '' # (change requires restart) #password_encryption = on #db_user_namespace = off # GSSAPI using Kerberos #krb_server_keyfile = '' #krb_caseins_users = off # - TCP Keepalives - # see "man 7 tcp" for details #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; # 0 selects the system default #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; # 0 selects the system default #tcp_keepalives_count = 0 # TCP_KEEPCNT; # 0 selects the system default #------------------------------------------------------------------------------ # RESOURCE USAGE (except WAL) #------------------------------------------------------------------------------ # - Memory - shared_buffers = {{ shared_buffers_mb[0]|int }}MB # min 128kB # (change requires restart) #huge_pages = try # on, off, or try # (change requires restart) #temp_buffers = 8MB # min 800kB #max_prepared_transactions = 0 # zero disables the feature # (change requires restart) # Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory # per transaction slot, plus lock space (see max_locks_per_transaction). # It is not advisable to set max_prepared_transactions nonzero unless you # actively intend to use prepared transactions. #work_mem = 4MB # min 64kB #maintenance_work_mem = 64MB # min 1MB #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem max_stack_depth = 7MB # min 100kB # The ideal value is the actual limit enforced # by the OS (8MB on 64-bit flavors) less a safety # margin of 1MB or so. #dynamic_shared_memory_type = posix # the default is the first option # supported by the operating system: # posix # sysv # windows # mmap # use none to disable dynamic shared memory # - Disk - #temp_file_limit = -1 # limits per-session temp file space # in kB, or -1 for no limit # - Kernel Resource Usage - #max_files_per_process = 1000 # min 25 # (change requires restart) #shared_preload_libraries = '' # (change requires restart) # - Cost-Based Vacuum Delay - #vacuum_cost_delay = 0 # 0-100 milliseconds #vacuum_cost_page_hit = 1 # 0-10000 credits #vacuum_cost_page_miss = 10 # 0-10000 credits #vacuum_cost_page_dirty = 20 # 0-10000 credits #vacuum_cost_limit = 200 # 1-10000 credits # - Background Writer - #bgwriter_delay = 200ms # 10-10000ms between rounds #bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round #bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round # - Asynchronous Behavior - #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching #max_worker_processes = 8 #------------------------------------------------------------------------------ # WRITE AHEAD LOG #------------------------------------------------------------------------------ # - Settings - wal_level = minimal # minimal, archive, hot_standby, or logical # (change requires restart) # (controlled by Trove) #fsync = on # turns forced synchronization on or off #synchronous_commit = on # synchronization level; # off, local, remote_write, or on #wal_sync_method = fsync # the default is the first option # supported by the operating system: # open_datasync # fdatasync (default on Linux) # fsync # fsync_writethrough # open_sync #full_page_writes = on # recover from partial page writes #wal_log_hints = off # also do full page writes of non-critical updates # (change requires restart) #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers # (change requires restart) #wal_writer_delay = 200ms # 1-10000 milliseconds #commit_delay = 0 # range 0-100000, in microseconds #commit_siblings = 5 # range 1-1000 # - Checkpoints - #checkpoint_timeout = 5min # range 30s-1h #checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 #checkpoint_warning = 30s # 0 disables # - Archiving - archive_mode = off # allows archiving to be done # (change requires restart) # (controlled by Trove) #archive_command = '' # command to use to archive a logfile segment # placeholders: %p = path of file to archive # %f = file name only # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' # (controlled by Trove) #archive_timeout = 0 # force a logfile segment switch after this # number of seconds; 0 disables # (controlled by Trove) #------------------------------------------------------------------------------ # REPLICATION #------------------------------------------------------------------------------ # - Sending Server(s) - # Set these on the master and on any standby that will send replication data. #max_wal_senders = 0 # max number of walsender processes # (change requires restart) #wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables #wal_sender_timeout = 60s # in milliseconds; 0 disables #max_replication_slots = 0 # max number of replication slots # (change requires restart) # - Master Server - # These settings are ignored on a standby server. #synchronous_standby_names = '' # standby servers that provide sync rep # comma-separated list of application_name # from standby(s); '*' = all #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed # - Standby Servers - # These settings are ignored on a master server. #hot_standby = off # "on" allows queries during recovery # (change requires restart) #max_standby_archive_delay = 30s # max delay before canceling queries # when reading WAL from archive; # -1 allows indefinite delay #max_standby_streaming_delay = 30s # max delay before canceling queries # when reading streaming WAL; # -1 allows indefinite delay #wal_receiver_status_interval = 10s # send replies at least this often # 0 disables #hot_standby_feedback = off # send info from standby to prevent # query conflicts #wal_receiver_timeout = 60s # time that receiver waits for # communication from master # in milliseconds; 0 disables #------------------------------------------------------------------------------ # QUERY TUNING #------------------------------------------------------------------------------ # - Planner Method Configuration - #enable_bitmapscan = on #enable_hashagg = on #enable_hashjoin = on #enable_indexscan = on #enable_indexonlyscan = on #enable_material = on #enable_mergejoin = on #enable_nestloop = on #enable_seqscan = on #enable_sort = on #enable_tidscan = on # - Planner Cost Constants - #seq_page_cost = 1.0 # measured on an arbitrary scale #random_page_cost = 4.0 # same scale as above #cpu_tuple_cost = 0.01 # same scale as above #cpu_index_tuple_cost = 0.005 # same scale as above #cpu_operator_cost = 0.0025 # same scale as above effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the amount of available RAM # less the minimum required for other processes or 512MB. # - Genetic Query Optimizer - #geqo = on #geqo_threshold = 12 #geqo_effort = 5 # range 1-10 #geqo_pool_size = 0 # selects default based on effort #geqo_generations = 0 # selects default based on effort #geqo_selection_bias = 2.0 # range 1.5-2.0 #geqo_seed = 0.0 # range 0.0-1.0 # - Other Planner Options - #default_statistics_target = 100 # range 1-10000 #constraint_exclusion = partition # on, off, or partition #cursor_tuple_fraction = 0.1 # range 0.0-1.0 #from_collapse_limit = 8 #join_collapse_limit = 8 # 1 disables collapsing of explicit # JOIN clauses #------------------------------------------------------------------------------ # ERROR REPORTING AND LOGGING #------------------------------------------------------------------------------ # - Where to Log - #log_destination = 'stderr' # Valid values are combinations of # stderr, csvlog, syslog, and eventlog, # depending on platform. csvlog # requires logging_collector to be on. # (controlled by Trove) # This is used when logging to stderr: #logging_collector = off # Enable capturing of stderr and csvlog # into log files. Required to be on for # csvlogs. # (change requires restart) # (controlled by Trove) # These are only used if logging_collector is on: #log_directory = 'pg_log' # directory where log files are written, # can be absolute or relative to PGDATA # (controlled by Trove) #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, # can include strftime() escapes #log_file_mode = 0600 # creation mode for log files, # begin with 0 to use octal notation # (controlled by Trove) #log_truncate_on_rotation = off # If on, an existing log file with the # same name as the new log file will be # truncated rather than appended to. # But such truncation only occurs on # time-driven rotation, not on restarts # or size-driven rotation. Default is # off, meaning append to existing files # in all cases. #log_rotation_age = 1d # Automatic rotation of logfiles will # happen after that time. 0 disables. #log_rotation_size = 10MB # Automatic rotation of logfiles will # happen after that much log output. # 0 disables. # These are relevant when logging to syslog: #syslog_facility = 'LOCAL0' #syslog_ident = 'postgres' # This is only relevant when logging to eventlog (win32): #event_source = 'PostgreSQL' # - When to Log - #client_min_messages = notice # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # log # notice # warning # error #log_min_messages = warning # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # info # notice # warning # error # log # fatal # panic #log_min_error_statement = error # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # info # notice # warning # error # log # fatal # panic (effectively off) #log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements # and their durations, > 0 logs only # statements running at least this number # of milliseconds # - What to Log - #debug_print_parse = off #debug_print_rewritten = off #debug_print_plan = off #debug_pretty_print = on #log_checkpoints = off #log_connections = off #log_disconnections = off #log_duration = off #log_error_verbosity = default # terse, default, or verbose messages #log_hostname = off #log_line_prefix = '' # special values: # %a = application name # %u = user name # %d = database name # %r = remote host and port # %h = remote host # %p = process ID # %t = timestamp without milliseconds # %m = timestamp with milliseconds # %i = command tag # %e = SQL state # %c = session ID # %l = session line number # %s = session start timestamp # %v = virtual transaction ID # %x = transaction ID (0 if none) # %q = stop here in non-session # processes # %% = '%' # e.g. '<%u%%%d> ' #log_lock_waits = off # log lock waits >= deadlock_timeout #log_statement = 'none' # none, ddl, mod, all #log_temp_files = -1 # log temporary files equal or larger # than the specified size in kilobytes; # -1 disables, 0 logs all temp files #log_timezone = 'GMT' #------------------------------------------------------------------------------ # RUNTIME STATISTICS #------------------------------------------------------------------------------ # - Query/Index Statistics Collector - #track_activities = on #track_counts = on #track_io_timing = off #track_functions = none # none, pl, all #track_activity_query_size = 1024 # (change requires restart) update_process_title = off # (controlled by Trove) #stats_temp_directory = 'pg_stat_tmp' # - Statistics Monitoring - #log_parser_stats = off #log_planner_stats = off #log_executor_stats = off #log_statement_stats = off #------------------------------------------------------------------------------ # AUTOVACUUM PARAMETERS #------------------------------------------------------------------------------ #autovacuum = on # Enable autovacuum subprocess? 'on' # requires track_counts to also be on. #log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and # their durations, > 0 logs only # actions running at least this number # of milliseconds. #autovacuum_max_workers = 3 # max number of autovacuum subprocesses # (change requires restart) #autovacuum_naptime = 1min # time between autovacuum runs #autovacuum_vacuum_threshold = 50 # min number of row updates before # vacuum #autovacuum_analyze_threshold = 50 # min number of row updates before # analyze #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum # (change requires restart) #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age # before forced vacuum # (change requires restart) #autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for # autovacuum, in milliseconds; # -1 means use vacuum_cost_delay #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for # autovacuum, -1 means use # vacuum_cost_limit #------------------------------------------------------------------------------ # CLIENT CONNECTION DEFAULTS #------------------------------------------------------------------------------ # - Statement Behavior - #search_path = '"$user",public' # schema names #default_tablespace = '' # a tablespace name, '' uses the default #temp_tablespaces = '' # a list of tablespace names, '' uses # only default tablespace #check_function_bodies = on #default_transaction_isolation = 'read committed' #default_transaction_read_only = off #default_transaction_deferrable = off #session_replication_role = 'origin' #statement_timeout = 0 # in milliseconds, 0 is disabled #lock_timeout = 0 # in milliseconds, 0 is disabled #vacuum_freeze_min_age = 50000000 #vacuum_freeze_table_age = 150000000 #vacuum_multixact_freeze_min_age = 5000000 #vacuum_multixact_freeze_table_age = 150000000 #bytea_output = 'hex' # hex, escape #xmlbinary = 'base64' #xmloption = 'content' # - Locale and Formatting - #datestyle = 'iso, mdy' #intervalstyle = 'postgres' #timezone = 'GMT' #timezone_abbreviations = 'Default' # Select the set of available time zone # abbreviations. Currently, there are # Default # Australia (historical usage) # India # You can create your own file in # share/timezonesets/. #extra_float_digits = 0 # min -15, max 3 #client_encoding = sql_ascii # actually, defaults to database # encoding # These settings are initialized by initdb, but they can be changed. #lc_messages = 'C' # locale for system error message # strings #lc_monetary = 'C' # locale for monetary formatting #lc_numeric = 'C' # locale for number formatting #lc_time = 'C' # locale for time formatting # default configuration for text search #default_text_search_config = 'pg_catalog.simple' # - Other Defaults - #dynamic_library_path = '$libdir' #local_preload_libraries = '' #session_preload_libraries = '' #------------------------------------------------------------------------------ # LOCK MANAGEMENT #------------------------------------------------------------------------------ #deadlock_timeout = 1s #max_locks_per_transaction = 64 # min 10 # (change requires restart) # Note: Each lock table slot uses ~270 bytes of shared memory, and there are # max_locks_per_transaction * (max_connections + max_prepared_transactions) # lock table slots. #max_pred_locks_per_transaction = 64 # min 10 # (change requires restart) #------------------------------------------------------------------------------ # VERSION/PLATFORM COMPATIBILITY #------------------------------------------------------------------------------ # - Previous PostgreSQL Versions - #array_nulls = on #backslash_quote = safe_encoding # on, off, or safe_encoding #default_with_oids = off #escape_string_warning = on #lo_compat_privileges = off #quote_all_identifiers = off #sql_inheritance = on #standard_conforming_strings = on #synchronize_seqscans = on # - Other Platforms and Clients - #transform_null_equals = off #------------------------------------------------------------------------------ # ERROR HANDLING #------------------------------------------------------------------------------ #exit_on_error = off # terminate session on any error? #restart_after_crash = on # reinitialize after backend crash? #------------------------------------------------------------------------------ # CONFIG FILE INCLUDES #------------------------------------------------------------------------------ # These options allow settings to be loaded from files other than the # default postgresql.conf. #include_dir = 'conf.d' # include files ending in '.conf' from # directory 'conf.d' #include_if_exists = 'exists.conf' # include file only if it exists #include = 'special.conf' # include file #------------------------------------------------------------------------------ # CUSTOMIZED OPTIONS #------------------------------------------------------------------------------ # Add settings for extensions here ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/postgresql/replica.config.template0000644000175000017500000000002300000000000026122 0ustar00coreycorey00000000000000# Currently unused ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/postgresql/replica_source.config.template0000644000175000017500000000002300000000000027502 0ustar00coreycorey00000000000000# Currently unused ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/postgresql/validation-rules.json0000644000175000017500000005777400000000000025710 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "max_connections", "restart_required": true, "min": 0, "type": "integer" }, { "name": "superuser_reserved_connections", "restart_required": true, "min": 1, "type": "integer" }, { "name": "bonjour", "restart_required": true, "type": "boolean" }, { "name": "bonjour_name", "restart_required": true, "type": "string" }, { "name": "authentication_timeout", "restart_required": false, "type": "string" }, { "name": "password_encryption", "restart_required": false, "type": "boolean" }, { "name": "db_user_namespace", "restart_required": false, "type": "boolean" }, { "name": "tcp_keepalives_idle", "restart_required": false, "min": 0, "type": "integer" }, { "name": "tcp_keepalives_interval", "restart_required": false, "min": 0, "type": "integer" }, { "name": "tcp_keepalives_count", "restart_required": false, "min": 0, "type": "integer" }, { "name": "shared_buffers", "restart_required": true, "type": "string" }, { "name": "huge_pages", "restart_required": true, "type": "string" }, { "name": "temp_buffers", "restart_required": false, "type": "string" }, { "name": "max_prepared_transactions", "restart_required": true, "min": 0, "type": "integer" }, { "name": "work_mem", "restart_required": false, "type": "string" }, { "name": "maintenance_work_mem", "restart_required": false, "type": "string" }, { "name": "autovacuum_work_mem", "restart_required": false, "min": -1, "type": "integer" }, { "name": "max_stack_depth", "restart_required": false, "type": "string" }, { "name": "dynamic_shared_memory_type", "restart_required": false, "type": "string" }, { "name": "temp_file_limit", "restart_required": false, "min": -1, "type": "integer" }, { "name": "max_files_per_process", "restart_required": true, "min": 0, "type": "integer" }, { "name": "vacuum_cost_delay", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_page_hit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_page_miss", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_page_dirty", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_cost_limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "bgwriter_delay", "restart_required": false, "type": "string" }, { "name": "bgwriter_lru_maxpages", "restart_required": false, "min": 0, "type": "integer" }, { "name": "bgwriter_lru_multiplier", "restart_required": false, "min": 0, "type": "integer" }, { "name": "effective_io_concurrency", "restart_required": false, "min": 0, "type": "integer" }, { "name": "max_worker_processes", "restart_required": false, "min": 0, "type": "integer" }, { "name": "fsync", "restart_required": false, "type": "boolean" }, { "name": "synchronous_commit", "restart_required": false, "type": "boolean" }, { "name": "wal_sync_method", "restart_required": false, "type": "string" }, { "name": "full_page_writes", "restart_required": false, "type": "boolean" }, { "name": "wal_log_hints", "restart_required": true, "type": "boolean" }, { "name": "wal_buffers", "restart_required": true, "min": -1, "type": "integer" }, { "name": "wal_writer_delay", "restart_required": false, "type": "string" }, { "name": "commit_delay", "restart_required": false, "min": 0, "type": "integer" }, { "name": "commit_siblings", "restart_required": false, "min": 0, "type": "integer" }, { "name": "checkpoint_timeout", "restart_required": false, "type": "string" }, { "name": "checkpoint_completion_target", "restart_required": false, "type": "string" }, { "name": "checkpoint_warning", "restart_required": false, "type": "string" }, { "name": "wal_keep_segments", "restart_required": false, "min": 0, "type": "integer" }, { "name": "wal_sender_timeout", "restart_required": false, "type": "string" }, { "name": "synchronous_standby_names", "restart_required": false, "type": "string" }, { "name": "vacuum_defer_cleanup_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "hot_standby", "restart_required": true, "type": "boolean" }, { "name": "max_standby_archive_delay", "restart_required": false, "type": "string" }, { "name": "max_standby_streaming_delay", "restart_required": false, "type": "string" }, { "name": "wal_receiver_status_interval", "restart_required": false, "type": "string" }, { "name": "hot_standby_feedback", "restart_required": false, "type": "boolean" }, { "name": "wal_receiver_timeout", "restart_required": false, "type": "string" }, { "name": "enable_bitmapscan", "restart_required": false, "type": "boolean" }, { "name": "enable_hashagg", "restart_required": false, "type": "boolean" }, { "name": "enable_hashjoin", "restart_required": false, "type": "boolean" }, { "name": "enable_indexscan", "restart_required": false, "type": "boolean" }, { "name": "enable_indexonlyscan", "restart_required": false, "type": "boolean" }, { "name": "enable_material", "restart_required": false, "type": "boolean" }, { "name": "enable_mergejoin", "restart_required": false, "type": "boolean" }, { "name": "enable_nestloop", "restart_required": false, "type": "boolean" }, { "name": "enable_seqscan", "restart_required": false, "type": "boolean" }, { "name": "enable_sort", "restart_required": false, "type": "boolean" }, { "name": "enable_tidscan", "restart_required": false, "type": "boolean" }, { "name": "seq_page_cost", "restart_required": false, "min": 0, "type": "integer" }, { "name": "random_page_cost", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cpu_tuple_cost", "restart_required": false, "type": "string" }, { "name": "cpu_index_tuple_cost", "restart_required": false, "type": "string" }, { "name": "cpu_operator_cost", "restart_required": false, "type": "string" }, { "name": "effective_cache_size", "restart_required": false, "type": "string" }, { "name": "geqo", "restart_required": false, "type": "boolean" }, { "name": "geqo_threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_effort", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_pool_size", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_generations", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_selection_bias", "restart_required": false, "min": 0, "type": "integer" }, { "name": "geqo_seed", "restart_required": false, "min": 0, "type": "integer" }, { "name": "default_statistics_target", "restart_required": false, "min": 0, "type": "integer" }, { "name": "constraint_exclusion", "restart_required": false, "type": "string" }, { "name": "cursor_tuple_fraction", "restart_required": false, "type": "string" }, { "name": "from_collapse_limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "join_collapse_limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "log_truncate_on_rotation", "restart_required": false, "type": "boolean" }, { "name": "log_rotation_age", "restart_required": false, "type": "string" }, { "name": "log_rotation_size", "restart_required": false, "type": "string" }, { "name": "client_min_messages", "restart_required": false, "type": "string" }, { "name": "log_min_messages", "restart_required": false, "type": "string" }, { "name": "log_min_error_statement", "restart_required": false, "type": "string" }, { "name": "debug_print_parse", "restart_required": false, "type": "boolean" }, { "name": "debug_print_rewritten", "restart_required": false, "type": "boolean" }, { "name": "debug_print_plan", "restart_required": false, "type": "boolean" }, { "name": "debug_pretty_print", "restart_required": false, "type": "boolean" }, { "name": "log_checkpoints", "restart_required": false, "type": "boolean" }, { "name": "log_connections", "restart_required": false, "type": "boolean" }, { "name": "log_disconnections", "restart_required": false, "type": "boolean" }, { "name": "log_duration", "restart_required": false, "type": "boolean" }, { "name": "log_error_verbosity", "restart_required": false, "type": "string" }, { "name": "log_hostname", "restart_required": false, "type": "boolean" }, { "name": "log_line_prefix", "restart_required": false, "type": "string" }, { "name": "log_lock_waits", "restart_required": false, "type": "boolean" }, { "name": "log_statement", "restart_required": false, "type": "string" }, { "name": "log_temp_files", "restart_required": false, "min": -1, "type": "integer" }, { "name": "log_timezone", "restart_required": false, "type": "string" }, { "name": "track_activities", "restart_required": false, "type": "boolean" }, { "name": "track_counts", "restart_required": false, "type": "boolean" }, { "name": "track_io_timing", "restart_required": false, "type": "boolean" }, { "name": "track_functions", "restart_required": false, "type": "string" }, { "name": "track_activity_query_size", "restart_required": true, "min": 0, "type": "integer" }, { "name": "log_parser_stats", "restart_required": false, "type": "boolean" }, { "name": "log_planner_stats", "restart_required": false, "type": "boolean" }, { "name": "log_executor_stats", "restart_required": false, "type": "boolean" }, { "name": "log_statement_stats", "restart_required": false, "type": "boolean" }, { "name": "autovacuum", "restart_required": false, "type": "boolean" }, { "name": "log_autovacuum_min_duration", "restart_required": false, "min": -1, "type": "integer" }, { "name": "autovacuum_max_workers", "restart_required": true, "min": 0, "type": "integer" }, { "name": "autovacuum_naptime", "restart_required": false, "type": "string" }, { "name": "autovacuum_vacuum_threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "autovacuum_analyze_threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "autovacuum_vacuum_scale_factor", "restart_required": false, "type": "string" }, { "name": "autovacuum_analyze_scale_factor", "restart_required": false, "type": "string" }, { "name": "autovacuum_freeze_max_age", "restart_required": true, "min": 0, "type": "integer" }, { "name": "autovacuum_multixact_freeze_max_age", "restart_required": true, "min": 0, "type": "integer" }, { "name": "autovacuum_vacuum_cost_delay", "restart_required": false, "type": "string" }, { "name": "autovacuum_vacuum_cost_limit", "restart_required": false, "min": -1, "type": "integer" }, { "name": "search_path", "restart_required": false, "type": "string" }, { "name": "default_tablespace", "restart_required": false, "type": "string" }, { "name": "temp_tablespaces", "restart_required": false, "type": "string" }, { "name": "check_function_bodies", "restart_required": false, "type": "boolean" }, { "name": "default_transaction_isolation", "restart_required": false, "type": "string" }, { "name": "default_transaction_read_only", "restart_required": false, "type": "boolean" }, { "name": "default_transaction_deferrable", "restart_required": false, "type": "boolean" }, { "name": "session_replication_role", "restart_required": false, "type": "string" }, { "name": "statement_timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "lock_timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_freeze_min_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_freeze_table_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_multixact_freeze_min_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "vacuum_multixact_freeze_table_age", "restart_required": false, "min": 0, "type": "integer" }, { "name": "bytea_output", "restart_required": false, "type": "string" }, { "name": "xmlbinary", "restart_required": false, "type": "string" }, { "name": "xmloption", "restart_required": false, "type": "string" }, { "name": "datestyle", "restart_required": false, "type": "string" }, { "name": "intervalstyle", "restart_required": false, "type": "string" }, { "name": "timezone", "restart_required": false, "type": "string" }, { "name": "timezone_abbreviations", "restart_required": false, "type": "string" }, { "name": "extra_float_digits", "restart_required": false, "min": 0, "type": "integer" }, { "name": "client_encoding", "restart_required": false, "type": "string" }, { "name": "lc_messages", "restart_required": false, "type": "string" }, { "name": "lc_monetary", "restart_required": false, "type": "string" }, { "name": "lc_numeric", "restart_required": false, "type": "string" }, { "name": "lc_time", "restart_required": false, "type": "string" }, { "name": "default_text_search_config", "restart_required": false, "type": "string" }, { "name": "deadlock_timeout", "restart_required": false, "type": "string" }, { "name": "max_locks_per_transaction", "restart_required": true, "min": 0, "type": "integer" }, { "name": "max_pred_locks_per_transaction", "restart_required": true, "min": 0, "type": "integer" }, { "name": "array_nulls", "restart_required": false, "type": "boolean" }, { "name": "backslash_quote", "restart_required": false, "type": "string" }, { "name": "default_with_oids", "restart_required": false, "type": "boolean" }, { "name": "escape_string_warning", "restart_required": false, "type": "boolean" }, { "name": "lo_compat_privileges", "restart_required": false, "type": "boolean" }, { "name": "quote_all_identifiers", "restart_required": false, "type": "boolean" }, { "name": "sql_inheritance", "restart_required": false, "type": "boolean" }, { "name": "standard_conforming_strings", "restart_required": false, "type": "boolean" }, { "name": "synchronize_seqscans", "restart_required": false, "type": "boolean" }, { "name": "transform_null_equals", "restart_required": false, "type": "boolean" }, { "name": "exit_on_error", "restart_required": false, "type": "boolean" }, { "name": "restart_after_crash", "restart_required": false, "type": "boolean" }, { "name": "log_min_duration_statement", "restart_required": false, "type": "string" }, { "name": "backend_flush_after", "restart_required": false, "type": "string" }, { "name": "bgwriter_flush_after", "restart_required": false, "type": "string" }, { "name": "checkpoint_flush_after", "restart_required": false, "type": "string" }, { "name": "force_parallel_mode", "restart_required": false, "type": "boolean" }, { "name": "parallel_setup_cost", "restart_required": false, "type": "float" }, { "name": "parallel_tuple_cost", "restart_required": false, "type": "float" }, { "name": "gin_pending_list_limit", "restart_required": false, "type": "string" }, { "name": "idle_in_transaction_session_timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "replacement_sort_tuples", "restart_required": false, "min": 0, "type": "integer" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/pxc/0000755000175000017500000000000000000000000020076 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/pxc/5.5/0000755000175000017500000000000000000000000020405 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/pxc/5.5/replica.config.template0000644000175000017500000000017200000000000025025 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log read_only = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/pxc/5.5/replica_source.config.template0000644000175000017500000000006500000000000026406 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/pxc/cluster.config.template0000644000175000017500000000103100000000000024553 0ustar00coreycorey00000000000000[mysqld] binlog_format=ROW bind-address=0.0.0.0 default-storage-engine=innodb innodb_autoinc_lock_mode=2 innodb_flush_log_at_trx_commit=0 wsrep_slave_threads=8 wsrep_provider=/usr/lib/libgalera_smm.so wsrep_provider_options="gcache.size={{ (128 * flavor['ram']/512)|int }}M; gcache.page_size=1G" wsrep_sst_method=xtrabackup-v2 wsrep_sst_auth="{{ replication_user_pass }}" wsrep_cluster_address="gcomm://{{ cluster_ips }}" wsrep_cluster_name={{ cluster_name }} wsrep_node_name={{ instance_name }} wsrep_node_address={{ instance_ip }} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/pxc/config.template0000644000175000017500000000313300000000000023100 0ustar00coreycorey00000000000000[client] port = 3306 socket = /var/run/mysqld/mysqld.sock [mysqld_safe] pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data tmpdir = /var/tmp pid-file = /var/run/mysqld/mysqld.pid socket = /var/run/mysqld/mysqld.sock skip-external-locking = 1 key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K thread_stack = 192K thread_cache_size = {{ (4 * flavor['ram']/512)|int }} myisam-recover-options = BACKUP,FORCE query_cache_type = 1 query_cache_limit = 1M query_cache_size = {{ (8 * flavor['ram']/512)|int }}M innodb_data_file_path = ibdata1:10M:autoextend innodb_buffer_pool_size = {{ (150 * flavor['ram']/512)|int }}M innodb_file_per_table = 1 innodb_log_files_in_group = 2 innodb_log_file_size=50M innodb_log_buffer_size=25M connect_timeout = 15 wait_timeout = 120 join_buffer_size = 1M read_buffer_size = 512K read_rnd_buffer_size = 512K sort_buffer_size = 1M tmp_table_size = {{ (16 * flavor['ram']/512)|int }}M max_heap_table_size = {{ (16 * flavor['ram']/512)|int }}M table_open_cache = {{ (256 * flavor['ram']/512)|int }} table_definition_cache = {{ (256 * flavor['ram']/512)|int }} open_files_limit = {{ (512 * flavor['ram']/512)|int }} max_user_connections = {{ (100 * flavor['ram']/512)|int }} max_connections = {{ (100 * flavor['ram']/512)|int }} default_storage_engine = innodb local-infile = 0 server_id = {{server_id}} [mysqldump] quick = 1 quote-names = 1 max_allowed_packet = 16M [isamchk] key_buffer = 16M !includedir /etc/mysql/conf.d/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/pxc/replica.config.template0000644000175000017500000000041300000000000024514 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log relay_log = /var/lib/mysql/data/mysql-relay-bin.log relay_log_info_repository = TABLE relay_log_recovery = 1 relay_log_purge = 1 enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON read_only = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/pxc/replica_source.config.template0000644000175000017500000000025700000000000026102 0ustar00coreycorey00000000000000[mysqld] log_bin = /var/lib/mysql/data/mysql-bin.log binlog_format = MIXED enforce_gtid_consistency = ON gtid_mode = ON log_slave_updates = ON enforce_storage_engine = InnoDB ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/pxc/validation-rules.json0000644000175000017500000001405100000000000024254 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "innodb_file_per_table", "restart_required": true, "max": 1, "min": 0, "type": "integer" }, { "name": "autocommit", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "local_infile", "restart_required": false, "max": 1, "min": 0, "type": "integer" }, { "name": "key_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "connect_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "join_buffer_size", "restart_required": false, "max": 4294967296, "min": 0, "type": "integer" }, { "name": "sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 32768, "type": "integer" }, { "name": "innodb_buffer_pool_size", "restart_required": true, "max": 68719476736, "min": 0, "type": "integer" }, { "name": "innodb_flush_log_at_trx_commit", "restart_required": false, "max": 2, "min": 0, "type": "integer" }, { "name": "innodb_log_buffer_size", "restart_required": true, "max": 4294967296, "min": 1048576, "type": "integer" }, { "name": "innodb_open_files", "restart_required": true, "max": 4294967296, "min": 10, "type": "integer" }, { "name": "innodb_thread_concurrency", "restart_required": false, "max": 1000, "min": 0, "type": "integer" }, { "name": "sync_binlog", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "auto_increment_increment", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "auto_increment_offset", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "bulk_insert_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 0, "type": "integer" }, { "name": "expire_logs_days", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "interactive_timeout", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "max_allowed_packet", "restart_required": false, "max": 1073741824, "min": 1024, "type": "integer" }, { "name": "max_connect_errors", "restart_required": false, "max": 18446744073709547520, "min": 1, "type": "integer" }, { "name": "max_connections", "restart_required": false, "max": 65535, "min": 1, "type": "integer" }, { "name": "myisam_sort_buffer_size", "restart_required": false, "max": 18446744073709547520, "min": 4, "type": "integer" }, { "name": "max_user_connections", "restart_required": false, "max": 100000, "min": 1, "type": "integer" }, { "name": "server_id", "restart_required": true, "max": 100000, "min": 1, "type": "integer" }, { "name": "wait_timeout", "restart_required": false, "max": 31536000, "min": 1, "type": "integer" }, { "name": "character_set_client", "restart_required": false, "type": "string" }, { "name": "character_set_connection", "restart_required": false, "type": "string" }, { "name": "character_set_database", "restart_required": false, "type": "string" }, { "name": "character_set_filesystem", "restart_required": false, "type": "string" }, { "name": "character_set_results", "restart_required": false, "type": "string" }, { "name": "character_set_server", "restart_required": false, "type": "string" }, { "name": "collation_connection", "restart_required": false, "type": "string" }, { "name": "collation_database", "restart_required": false, "type": "string" }, { "name": "collation_server", "restart_required": false, "type": "string" }, { "name": "long_query_time", "restart_required": false, "min": 0, "type": "float" }, { "name": "max_prepared_stmt_count", "restart_required": false, "max": 1048576, "min": 0, "type": "integer" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/redis/0000755000175000017500000000000000000000000020412 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/redis/config.template0000644000175000017500000012762600000000000023432 0ustar00coreycorey00000000000000# Redis configuration file example # Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: # # 1k => 1000 bytes # 1kb => 1024 bytes # 1m => 1000000 bytes # 1mb => 1024*1024 bytes # 1g => 1000000000 bytes # 1gb => 1024*1024*1024 bytes # # units are case insensitive so 1GB 1Gb 1gB are all the same. ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you # have a standard template that goes to all Redis servers but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # # Notice option "include" won't be rewritten by command "CONFIG REWRITE" # from admin or Redis Sentinel. Since Redis always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. # # If instead you are interested in using includes to override configuration # options, it is better to use include as the last line. # # include /path/to/local.conf # include /path/to/other.conf ################################ GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. # # Trove currently requires the database to run as a service. daemonize yes # When running daemonized, Redis writes a pid file in /var/run/redis.pid by # default. You can specify a custom pid file location here. # # This has to be a writable path. # Trove will override this property based on the underlying OS. pidfile /var/run/redis/redis-server.pid # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. port 6379 # TCP listen() backlog. # # In high requests-per-second environments you need an high backlog in order # to avoid slow clients connections issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. tcp-backlog 511 # Protected mode is a layer of security protection, in order to avoid that # Redis instances left open on the internet are accessed and exploited. # # When protected mode is on and if: # # 1) The server is not binding explicitly to a set of addresses using the # "bind" directive. # 2) No password is configured. # # The server only accepts connections from clients connecting from the # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain # sockets. # # By default protected mode is enabled. You should disable it only if # you are sure you want clients from other hosts to connect to Redis # even if no authentication is configured, nor a specific set of interfaces # are explicitly listed using the "bind" directive. protected-mode no # By default Redis listens for connections from all the network interfaces # available on the server. It is possible to listen to just one or multiple # interfaces using the "bind" configuration directive, followed by one or # more IP addresses. # # Examples: # # bind 192.168.1.100 10.0.0.1 # bind 127.0.0.1 # Specify the path for the Unix socket that will be used to listen for # incoming connections. There is no default, so Redis will not listen # on a unix socket when not specified. # # Trove uses Unix sockets internally to connect to the database. # Trove will override this property based on the underlying OS. # unixsocket /tmp/redis.sock # unixsocketperm 700 # Close the connection after a client is idle for N seconds (0 to disable) timeout 0 # TCP keepalive. # # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence # of communication. This is useful for two reasons: # # 1) Detect dead peers. # 2) Take the connection alive from the point of view of network # equipment in the middle. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. # On other kernels the period depends on the kernel configuration. # # A reasonable value for this option is 60 seconds. tcp-keepalive 0 # If you run Redis from upstart or systemd, Redis can interact with your # supervision tree. Options: # supervised no - no supervision interaction # supervised upstart - signal upstart by putting Redis into SIGSTOP mode # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET # supervised auto - detect upstart or systemd method based on # UPSTART_JOB or NOTIFY_SOCKET environment variables # Note: these supervision methods only signal "process is ready." # They do not enable continuous liveness pings back to your supervisor. supervised systemd # Specify the server verbosity level. # This can be one of: # debug (a lot of information, useful for development/testing) # verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) loglevel notice # Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null # # Trove will override this property based on the underlying OS. logfile "" # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. # syslog-enabled no # Specify the syslog identity. # syslog-ident redis # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. # syslog-facility local0 # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 databases 16 ################################ SNAPSHOTTING ################################ # # Save the DB on disk: # # save # # Will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # # In the example below the behaviour will be to save: # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed # # Note: you can disable saving completely by commenting out all "save" lines. # # It is also possible to remove all the previously configured save # points by adding a save directive with a single empty string argument # like in the following example: # # save "" save 900 1 save 300 10 save 60 10000 # By default Redis will stop accepting writes if RDB snapshots are enabled # (at least one save point) and the latest background save failed. # This will make the user aware (in a hard way) that data is not persisting # on disk properly, otherwise chances are that no one will notice and some # disaster will happen. # # If the background saving process will start working again Redis will # automatically allow writes again. # # However if you have setup your proper monitoring of the Redis server # and persistence, you may want to disable this feature so that Redis will # continue to work as usual even if there are problems with disk, # permissions, and so forth. stop-writes-on-bgsave-error yes # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. # This makes the format more resistant to corruption but there is a performance # hit to pay (around 10%) when saving and loading RDB files, so you can disable it # for maximum performances. # # RDB files created with checksum disabled have a checksum of zero that will # tell the loading code to skip the check. rdbchecksum yes # The filename where to dump the DB dbfilename dump.rdb # The working directory. # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. # # The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. # # This has to be an existing path to a writable directory. # Trove will override this property based on the underlying OS. dir /tmp ################################# REPLICATION ################################# # Master-Slave replication. Use slaveof to make a Redis instance a copy of # another Redis server. A few things to understand ASAP about Redis replication. # # 1) Redis replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least # a given number of slaves. # 2) Redis slaves are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a # network partition slaves automatically try to reconnect to masters # and resynchronize with them. # # slaveof # If the master is password protected (using the "requirepass" configuration # directive below) it is possible to tell the slave to authenticate before # starting the replication synchronization process, otherwise the master will # refuse the slave request. # # masterauth # When a slave loses its connection with the master, or when the replication # is still in progress, the slave can act in two different ways: # # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # # 2) if slave-serve-stale-data is set to 'no' the slave will reply with # an error "SYNC with master in progress" to all the kind of commands # but to INFO and SLAVEOF. # slave-serve-stale-data yes # You can configure a slave instance to accept writes or not. Writing against # a slave instance may be useful to store some ephemeral data (because data # written on a slave will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # # Since Redis 2.6 by default slaves are read-only. # # Note: read only slaves are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. # Still a read only slave exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve # security of read only slaves using 'rename-command' to shadow all the # administrative / dangerous commands. slave-read-only yes # Replication SYNC strategy: disk or socket. # # ------------------------------------------------------- # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY # ------------------------------------------------------- # # New slaves and reconnecting slaves that are not able to continue the replication # process just receiving differences, need to do what is called a "full # synchronization". An RDB file is transmitted from the master to the slaves. # The transmission can happen in two different ways: # # 1) Disk-backed: The Redis master creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent # process to the slaves incrementally. # 2) Diskless: The Redis master creates a new process that directly writes the # RDB file to slave sockets, without touching the disk at all. # # With disk-backed replication, while the RDB file is generated, more slaves # can be queued and served with the RDB file as soon as the current child producing # the RDB file finishes its work. With diskless replication instead once # the transfer starts, new slaves arriving will be queued and a new transfer # will start when the current one terminates. # # When diskless replication is used, the master waits a configurable amount of # time (in seconds) before starting the transfer in the hope that multiple slaves # will arrive and the transfer can be parallelized. # # With slow disks and fast (large bandwidth) networks, diskless replication # works better. repl-diskless-sync no # When diskless replication is enabled, it is possible to configure the delay # the server waits in order to spawn the child that transfers the RDB via socket # to the slaves. # # This is important since once the transfer starts, it is not possible to serve # new slaves arriving, that will be queued for the next RDB transfer, so the server # waits a delay in order to let more slaves arrive. # # The delay is specified in seconds, and by default is 5 seconds. To disable # it entirely just set it to 0 seconds and the transfer will start ASAP. repl-diskless-sync-delay 5 # Slaves send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_slave_period option. The default value is 10 # seconds. # # repl-ping-slave-period 10 # The following option sets the replication timeout for: # # 1) Bulk transfer I/O during SYNC, from the point of view of slave. # 2) Master timeout from the point of view of slaves (data, pings). # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value # specified for repl-ping-slave-period otherwise a timeout will be detected # every time there is low traffic between the master and the slave. # # repl-timeout 60 # Disable TCP_NODELAY on the slave socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and # less bandwidth to send data to slaves. But this can add a delay for # the data to appear on the slave side, up to 40 milliseconds with # Linux kernels using a default configuration. # # If you select "no" the delay for data to appear on the slave side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions # or when the master and slaves are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no # Set the replication backlog size. The backlog is a buffer that accumulates # slave data when slaves are disconnected for some time, so that when a slave # wants to reconnect again, often a full resync is not needed, but a partial # resync is enough, just passing the portion of data the slave missed while # disconnected. # # The bigger the replication backlog, the longer the time the slave can be # disconnected and later be able to perform a partial resynchronization. # # The backlog is only allocated once there is at least a slave connected. # # repl-backlog-size 1mb # After a master has no longer connected slaves for some time, the backlog # will be freed. The following option configures the amount of seconds that # need to elapse, starting from the time the last slave disconnected, for # the backlog buffer to be freed. # # A value of 0 means to never release the backlog. # # repl-backlog-ttl 3600 # The slave priority is an integer number published by Redis in the INFO output. # It is used by Redis Sentinel in order to select a slave to promote into a # master if the master is no longer working correctly. # # A slave with a low priority number is considered better for promotion, so # for instance if there are three slaves with priority 10, 100, 25 Sentinel will # pick the one with priority 10, that is the lowest. # # However a special priority of 0 marks the slave as not able to perform the # role of master, so a slave with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. slave-priority 100 # It is possible for a master to stop accepting writes if there are less than # N slaves connected, having a lag less or equal than M seconds. # # The N slaves need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from # the last ping received from the slave, that is usually sent every second. # # This option does not GUARANTEE that N replicas will accept the write, but # will limit the window of exposure for lost writes in case not enough slaves # are available, to the specified number of seconds. # # For example to require at least 3 slaves with a lag <= 10 seconds use: # # min-slaves-to-write 3 # min-slaves-max-lag 10 # # Setting one or the other to 0 disables the feature. # # By default min-slaves-to-write is set to 0 (feature disabled) and # min-slaves-max-lag is set to 10. ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other # commands. This might be useful in environments in which you do not trust # others with access to the host running redis-server. # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). # # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # # requirepass foobared # Command renaming. # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something # hard to guess so that it will still be available for internal-use tools # but not available for general clients. # # Example: # # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 # # It is also possible to completely kill a command by renaming it into # an empty string: # # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the # AOF file or transmitted to slaves may cause problems. # # Trove uses 'rename-command' internally to hide certain commands. ################################### LIMITS #################################### # Set the max number of connected clients at the same time. By default # this limit is set to 10000 clients, however if the Redis server is not # able to configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit # minus 32 (as Redis reserves a few file descriptors for internal uses). # # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # # maxclients 10000 # Don't use more memory than the specified amount of bytes. # When the memory limit is reached Redis will try to remove keys # according to the eviction policy selected (see maxmemory-policy). # # If Redis can't remove keys according to the policy, or if the policy is # set to 'noeviction', Redis will start to reply with errors to commands # that would use more memory, like SET, LPUSH, and so on, and will continue # to reply to read-only commands like GET. # # This option is usually useful when using Redis as an LRU cache, or to set # a hard memory limit for an instance (using the 'noeviction' policy). # # WARNING: If you have slaves attached to an instance with maxmemory on, # the size of the output buffers needed to feed the slaves are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output # buffer of slaves is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # # In short... if you have slaves attached it is suggested that you set a lower # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: # # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key according to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations # # Note: with any of the above policies, Redis will return an error on write # operations, when there are no suitable keys for eviction. # # At the date of writing these commands are: set setnx setex append # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby # getset mset msetnx exec sort # # The default is: # # maxmemory-policy noeviction # LRU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or # accuracy. For default Redis will check five keys and pick the one that was # used less recently, you can change the sample size using the following # configuration directive. # # The default of 5 produces good enough results. 10 Approximates very closely # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. # # maxmemory-samples 5 ############################## APPEND ONLY MODE ############################### # By default Redis asynchronously dumps the dataset on disk. This mode is # good enough in many applications, but an issue with the Redis process or # a power outage may result into a few minutes of writes lost (depending on # the configured save points). # # The Append Only File is an alternative persistence mode that provides # much better durability. For instance using the default data fsync policy # (see later in the config file) Redis can lose just one second of writes in a # dramatic event like a server power outage, or a single write if something # wrong with the Redis process itself happens, but the operating system is # still running correctly. # # AOF and RDB persistence can be enabled at the same time without problems. # If the AOF is enabled on startup Redis will load the AOF, that is the file # with the better durability guarantees. # # Please check http://redis.io/topics/persistence for more information. appendonly no # The name of the append only file (default: "appendonly.aof") appendfilename "appendonly.aof" # The fsync() call tells the Operating System to actually write data on disk # instead of waiting for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # # Redis supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log. Slow, Safest. # everysec: fsync only one time every second. Compromise. # # The default is "everysec", as that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to # "no" that will let the operating system flush the output buffer when # it wants, for better performances (but if you can live with the idea of # some data loss consider the default persistence mode that's snapshotting), # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # # More details please check the following article: # http://antirez.com/post/redis-persistence-demystified.html # # If unsure, use "everysec". # appendfsync always appendfsync everysec # appendfsync no # When the AOF fsync policy is set to always or everysec, and a background # saving process (a background save or AOF log background rewriting) is # performing a lot of I/O against the disk, in some Linux configurations # Redis may block too long on the fsync() call. Note that there is no fix for # this currently, as even performing fsync in a different thread will block # our synchronous write(2) call. # # In order to mitigate this problem it's possible to use the following option # that will prevent fsync() from being called in the main process while a # BGSAVE or BGREWRITEAOF is in progress. # # This means that while another child is saving, the durability of Redis is # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). # # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. # # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). # # This base size is compared to the current size. If the current size is # bigger than the specified percentage, the rewrite is triggered. Also # you need to specify a minimal size for the AOF file to be rewritten, this # is useful to avoid rewriting the AOF file even if the percentage increase # is reached but it is still pretty small. # # Specify a percentage of zero in order to disable the automatic AOF # rewrite feature. auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb # An AOF file may be found to be truncated at the end during the Redis # startup process, when the AOF data gets loaded back into memory. # This may happen when the system where Redis is running # crashes, especially when an ext4 filesystem is mounted without the # data=ordered option (however this can't happen when Redis itself # crashes or aborts but the operating system still works correctly). # # Redis can either exit with an error when this happens, or load as much # data as possible (the default now) and start if the AOF file is found # to be truncated at the end. The following option controls this behavior. # # If aof-load-truncated is set to yes, a truncated AOF file is loaded and # the Redis server starts emitting a log to inform the user of the event. # Otherwise if the option is set to no, the server aborts with an error # and refuses to start. When the option is set to no, the user requires # to fix the AOF file using the "redis-check-aof" utility before to restart # the server. # # Note that if the AOF file will be found to be corrupted in the middle # the server will still exit with an error. This option only applies when # Redis will try to read more data from the AOF file but not enough bytes # will be found. aof-load-truncated yes ################################ LUA SCRIPTING ############################### # Max execution time of a Lua script in milliseconds. # # If the maximum execution time is reached Redis will log that a script is # still in execution after the maximum allowed time and will start to # reply to queries with an error. # # When a long running script exceeds the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be # used to stop a script that did not yet called write commands. The second # is the only way to shut down the server in the case a write command was # already issued by the script but the user doesn't want to wait for the natural # termination of the script. # # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 ################################ REDIS CLUSTER ############################### # # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however # in order to mark it as "mature" we need to wait for a non trivial percentage # of users to deploy it in production. # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # Normal Redis instances can't be part of a Redis Cluster; only nodes that are # started as cluster nodes can. In order to start a Redis instance as a # cluster node enable the cluster support uncommenting the following: # # cluster-enabled yes # Every cluster node has a cluster configuration file. This file is not # intended to be edited by hand. It is created and updated by Redis nodes. # Every Redis Cluster node requires a different cluster configuration file. # Make sure that instances running in the same system do not have # overlapping cluster configuration file names. # # cluster-config-file nodes-6379.conf # Cluster node timeout is the amount of milliseconds a node must be unreachable # for it to be considered in failure state. # Most other internal time limits are multiple of the node timeout. # # cluster-node-timeout 15000 # A slave of a failing master will avoid to start a failover if its data # looks too old. # # There is no simple way for a slave to actually have a exact measure of # its "data age", so the following two checks are performed: # # 1) If there are multiple slaves able to failover, they exchange messages # in order to try to give an advantage to the slave with the best # replication offset (more data from the master processed). # Slaves will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # # 2) Every single slave computes the time of the last interaction with # its master. This can be the last ping or command received (if the master # is still in the "connected" state), or the time that elapsed since the # disconnection with the master (if the replication link is currently down). # If the last interaction is too old, the slave will not try to failover # at all. # # The point "2" can be tuned by user. Specifically a slave will not perform # the failover if, since the last interaction with the master, the time # elapsed is greater than: # # (node-timeout * slave-validity-factor) + repl-ping-slave-period # # So for example if node-timeout is 30 seconds, and the slave-validity-factor # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the # slave will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # # A large slave-validity-factor may allow slaves with too old data to failover # a master, while a too small value may prevent the cluster from being able to # elect a slave at all. # # For maximum availability, it is possible to set the slave-validity-factor # to a value of 0, which means, that slaves will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their # offset rank). # # Zero is the only value able to guarantee that when all the partitions heal # the cluster will always be able to continue. # # cluster-slave-validity-factor 10 # Cluster slaves are able to migrate to orphaned masters, that are masters # that are left without working slaves. This improves the cluster ability # to resist to failures as otherwise an orphaned master can't be failed over # in case of failure if it has no working slaves. # # Slaves migrate to orphaned masters only if there are still at least a # given number of other working slaves for their old master. This number # is the "migration barrier". A migration barrier of 1 means that a slave # will migrate only if there is at least 1 other working slave for its master # and so forth. It usually reflects the number of slaves you want for every # master in your cluster. # # Default is 1 (slaves migrate only if their masters remain with at least # one slave). To disable migration just set it to a very large value. # A value of 0 can be set but is useful only for debugging and dangerous # in production. # # cluster-migration-barrier 1 # By default Redis Cluster nodes stop accepting queries if they detect there # is at least an hash slot uncovered (no available node is serving it). # This way if the cluster is partially down (for example a range of hash slots # are no longer covered) all the cluster becomes, eventually, unavailable. # It automatically returns available as soon as all the slots are covered again. # # However sometimes you want the subset of the cluster which is working, # to continue to accept queries for the part of the key space that is still # covered. In order to do so, just set the cluster-require-full-coverage # option to no. # # cluster-require-full-coverage yes # In order to setup your cluster make sure to read the documentation # available at http://redis.io web site. ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified # execution time. The execution time does not include the I/O operations # like talking with the client, sending the reply and so forth, # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). # # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the # slow log. When a new command is logged the oldest one is removed from the # queue of logged commands. # The following time is expressed in microseconds, so 1000000 is equivalent # to one second. Note that a negative number disables the slow log, while # a value of zero forces the logging of every command. slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. slowlog-max-len 128 ################################ LATENCY MONITOR ############################## # The Redis latency monitoring subsystem samples different operations # at runtime in order to collect data related to possible sources of # latency of a Redis instance. # # Via the LATENCY command this information is available to the user that can # print graphs and obtain reports. # # The system only logs operations that were performed in a time equal or # greater than the amount of milliseconds specified via the # latency-monitor-threshold configuration directive. When its value is set # to zero, the latency monitor is turned off. # # By default latency monitoring is disabled since it is mostly not needed # if you don't have latency issues, and collecting data has a performance # impact, that while very small, can be measured under big load. Latency # monitoring can easily be enabled at runtime using the command # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold 0 ############################# EVENT NOTIFICATION ############################## # Redis can notify Pub/Sub clients about events happening in the key space. # This feature is documented at http://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two # messages will be published via Pub/Sub: # # PUBLISH __keyspace@0__:foo del # PUBLISH __keyevent@0__:del foo # # It is possible to select the events that Redis will notify among a set # of classes. Every class is identified by a single character: # # K Keyspace events, published with __keyspace@__ prefix. # E Keyevent events, published with __keyevent@__ prefix. # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... # $ String commands # l List commands # s Set commands # h Hash commands # z Sorted set commands # x Expired events (events generated every time a key expires) # e Evicted events (events generated when a key is evicted for maxmemory) # A Alias for g$lshzxe, so that the "AKE" string means all the events. # # The "notify-keyspace-events" takes as argument a string that is composed # of zero or multiple characters. The empty string means that notifications # are disabled. # # Example: to enable list and generic events, from the point of view of the # event name, use: # # notify-keyspace-events Elg # # Example 2: to get the stream of the expired keys subscribing to channel # name __keyevent@0__:expired use: # # notify-keyspace-events Ex # # By default all notifications are disabled because most users don't need # this feature and the feature has some overhead. Note that if you don't # specify at least one of K or E, no events will be delivered. notify-keyspace-events "" ############################### ADVANCED CONFIG ############################### # Hashes are encoded using a memory efficient data structure when they have a # small number of entries, and the biggest entry does not exceed a given # threshold. These thresholds can be configured using the following directives. hash-max-ziplist-entries 512 hash-max-ziplist-value 64 # Similarly to hashes, small lists are also encoded in a special way in order # to save a lot of space. The special representation is only used when # you are under the following limits: list-max-ziplist-entries 512 list-max-ziplist-value 64 # Lists are also encoded in a special way to save a lot of space. # The number of entries allowed per internal list node can be specified # as a fixed maximum size or a maximum number of elements. # For a fixed maximum size, use -5 through -1, meaning: # -5: max size: 64 Kb <-- not recommended for normal workloads # -4: max size: 32 Kb <-- not recommended # -3: max size: 16 Kb <-- probably not recommended # -2: max size: 8 Kb <-- good # -1: max size: 4 Kb <-- good # Positive numbers mean store up to _exactly_ that number of elements # per list node. # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), # but if your use case is unique, adjust the settings as necessary. list-max-ziplist-size -2 # # Lists may also be compressed. # Compress depth is the number of quicklist ziplist nodes from *each* side of # the list to *exclude* from compression. The head and tail of the list # are always uncompressed for fast push/pop operations. Settings are: # 0: disable all list compression # 1: depth 1 means "don't start compressing until after 1 node into the list, # going from either the head or tail" # So: [head]->node->node->...->node->[tail] # [head], [tail] will always be uncompressed; inner nodes will compress. # 2: [head]->[next]->node->node->...->node->[prev]->[tail] # 2 here means: don't compress head or head->next or tail->prev or tail, # but compress all nodes between them. # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] # etc. list-compress-depth 0 # Sets have a special encoding in just one case: when a set is composed # of just strings that happen to be integers in radix 10 in the range # of 64 bit signed integers. # The following configuration setting sets the limit in the size of the # set in order to use this special memory saving encoding. set-max-intset-entries 512 # Similarly to hashes and lists, sorted sets are also specially encoded in # order to save a lot of space. This encoding is only used when the length and # elements of a sorted set are below the following limits: zset-max-ziplist-entries 128 zset-max-ziplist-value 64 # HyperLogLog sparse representation bytes limit. The limit includes the # 16 bytes header. When an HyperLogLog using the sparse representation crosses # this limit, it is converted into the dense representation. # # A value greater than 16000 is totally useless, since at that point the # dense representation is more memory efficient. # # The suggested value is ~ 3000 in order to have the benefits of # the space efficient encoding without slowing down too much PFADD, # which is O(N) with the sparse encoding. The value can be raised to # ~ 10000 when CPU is not a concern, but space is, and the data set is # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes 3000 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) # performs a lazy rehashing: the more operation you run into a hash table # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. # # The default is to use this millisecond 10 times every second in order to # actively rehash the main dictionaries, freeing memory when possible. # # If unsure: # use "activerehashing no" if you have hard latency requirements and it is # not a good thing in your environment that Redis can reply from time to time # to queries with 2 milliseconds delay. # # use "activerehashing yes" if you don't have such hard requirements but # want to free memory asap when possible. activerehashing yes # The client output buffer limits can be used to force disconnection of clients # that are not reading data from the server fast enough for some reason (a # common reason is that a Pub/Sub client can't consume messages as fast as the # publisher can produce them). # # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients # slave -> slave clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: # # client-output-buffer-limit # # A client is immediately disconnected once the hard limit is reached, or if # the soft limit is reached and remains reached for the specified number of # seconds (continuously). # So for instance if the hard limit is 32 megabytes and the soft limit is # 16 megabytes / 10 seconds, the client will get disconnected immediately # if the size of the output buffers reach 32 megabytes, but will also get # disconnected if the client reaches 16 megabytes and continuously overcomes # the limit for 10 seconds. # # By default normal clients are not limited because they don't receive data # without asking (in a push way), but just after a request, so only # asynchronous clients may create a scenario where data is requested faster # than it can read. # # Instead there is a default limit for pubsub and slave clients, since # subscribers and slaves receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 client-output-buffer-limit slave 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 unixsocket /var/run/redis/redis.sock unixsocketperm 777 # Redis calls an internal function to perform many background tasks, like # closing connections of clients in timeout, purging expired keys that are # never requested, and so forth. # # Not all tasks are performed with the same frequency, but Redis checks for # tasks to perform according to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when # Redis is idle, but at the same time will make Redis more responsive when # there are many keys expiring at the same time, and timeouts may be # handled with more precision. # # The range is between 1 and 500, however a value over 100 is usually not # a good idea. Most users should use the default of 10 and raise this up to # 100 only in environments where very low latency is required. hz 10 # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/redis/replica.config.template0000644000175000017500000000002400000000000025026 0ustar00coreycorey00000000000000slave-read-only yes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/redis/replica_source.config.template0000644000175000017500000000006300000000000026411 0ustar00coreycorey00000000000000repl-diskless-sync yes repl-diskless-sync-delay 10 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/redis/validation-rules.json0000644000175000017500000001720500000000000024574 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "name": "tcp-backlog", "restart_required": true, "min": 0, "type": "integer" }, { "name": "timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "tcp-keepalive", "restart_required": false, "min": 0, "type": "integer" }, { "name": "loglevel", "restart_required": false, "type": "string" }, { "name": "databases", "restart_required": true, "min": 0, "type": "integer" }, { "name": "save", "restart_required": false, "type": "string" }, { "name": "stop-writes-on-bgsave-error", "restart_required": false, "type": "boolean" }, { "name": "rdbcompression", "restart_required": false, "type": "boolean" }, { "name": "rdbchecksum", "restart_required": true, "type": "boolean" }, { "name": "slave-serve-stale-data", "restart_required": false, "type": "boolean" }, { "name": "slave-read-only", "restart_required": false, "type": "boolean" }, { "name": "repl-diskless-sync", "restart_required": false, "type": "boolean" }, { "name": "repl-diskless-sync-delay", "restart_required": false, "min": 0, "type": "integer" }, { "name": "repl-ping-slave-period", "restart_required": false, "min": 0, "type": "integer" }, { "name": "repl-timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "repl-disable-tcp-nodelay", "restart_required": false, "type": "boolean" }, { "name": "repl-backlog-size", "restart_required": false, "min": 16384, "type": "integer" }, { "name": "repl-backlog-ttl", "restart_required": false, "min": 0, "type": "integer" }, { "name": "slave-priority", "restart_required": false, "min": 0, "type": "integer" }, { "name": "min-slaves-to-write", "restart_required": false, "min": 0, "type": "integer" }, { "name": "min-slaves-max-lag", "restart_required": false, "min": 0, "type": "integer" }, { "name": "requirepass", "restart_required": false, "type": "string" }, { "name": "maxclients", "restart_required": false, "min": 0, "type": "integer" }, { "name": "maxmemory", "restart_required": false, "min": 0, "type": "integer" }, { "name": "maxmemory-policy", "restart_required": false, "type": "string" }, { "name": "maxmemory-samples", "restart_required": false, "min": 0, "type": "integer" }, { "name": "appendonly", "restart_required": false, "type": "boolean" }, { "name": "appendfsync", "restart_required": false, "type": "string" }, { "name": "no-appendfsync-on-rewrite", "restart_required": false, "type": "boolean" }, { "name": "auto-aof-rewrite-percentage", "restart_required": false, "min": 0, "type": "integer" }, { "name": "auto-aof-rewrite-min-size", "restart_required": false, "min": 0, "type": "integer" }, { "name": "aof-load-truncated", "restart_required": false, "type": "boolean" }, { "name": "lua-time-limit", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-node-timeout", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-slave-validity-factor", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-migration-barrier", "restart_required": false, "min": 0, "type": "integer" }, { "name": "cluster-require-full-coverage", "restart_required": false, "type": "boolean" }, { "name": "slowlog-log-slower-than", "restart_required": false, "min": 0, "type": "integer" }, { "name": "slowlog-max-len", "restart_required": false, "min": 0, "type": "integer" }, { "name": "latency-monitor-threshold", "restart_required": false, "min": 0, "type": "integer" }, { "name": "notify-keyspace-events", "restart_required": false, "type": "string" }, { "name": "hash-max-ziplist-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "hash-max-ziplist-value", "restart_required": false, "min": 0, "type": "integer" }, { "name": "list-max-ziplist-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "list-max-ziplist-value", "restart_required": false, "min": 0, "type": "integer" }, { "name": "set-max-intset-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "zset-max-ziplist-entries", "restart_required": false, "min": 0, "type": "integer" }, { "name": "zset-max-ziplist-value", "restart_required": false, "min": 0, "type": "integer" }, { "name": "hll-sparse-max-bytes", "restart_required": false, "min": 0, "max": 16000, "type": "integer" }, { "name": "activerehashing", "restart_required": false, "type": "boolean" }, { "name": "client-output-buffer-limit", "restart_required": false, "type": "string" }, { "name": "hz", "restart_required": false, "min": 1, "max": 500, "type": "integer" }, { "name": "aof-rewrite-incremental-fsync", "restart_required": false, "type": "boolean" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/templates/vertica/0000755000175000017500000000000000000000000020741 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/vertica/config.template0000644000175000017500000000000000000000000023731 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/templates/vertica/validation-rules.json0000644000175000017500000016042100000000000025122 0ustar00coreycorey00000000000000{ "configuration-parameters": [ { "description": "No of active partitions", "type": "integer", "name": "ActivePartitionCount", "restart_required": false }, { "description": "Interval between collection of nodes' addresses (seconds)", "type": "integer", "name": "AddressCollectorInterval", "restart_required": false }, { "description": "Interval between advancing the AHM (seconds)", "type": "integer", "name": "AdvanceAHMInterval", "restart_required": false }, { "description": "Consider backup epochs when setting new AHM", "type": "integer", "name": "AHMBackupManagement", "restart_required": false }, { "description": "Allow names containing non-ASCII UTF-8 characters", "type": "integer", "name": "AllowNonAsciiNames", "restart_required": false }, { "description": "Interval between Tuple Mover row count statistics updates (seconds)", "type": "integer", "name": "AnalyzeRowCountInterval", "restart_required": false }, { "description": "Maximum number of columns to analyze with each analyze stats plan", "type": "integer", "name": "AnalyzeStatsPlanMaxColumns", "restart_required": false }, { "description": "Number of sampling bands to use when not using entire data set.", "type": "integer", "name": "AnalyzeStatsSampleBands", "restart_required": false }, { "description": "ARC will commit only if the change is more than the percentage specified", "type": "integer", "name": "ARCCommitPercentage", "restart_required": false }, { "description": "The confidence level at which to run audits of license size utilization. Represent 99.5% as 99.5.", "type": "integer", "name": "AuditConfidenceLevel", "restart_required": false }, { "description": "The error tolerance for audits of license size utilization. Represent 4.5% as 4.5.", "type": "integer", "name": "AuditErrorTolerance", "restart_required": false }, { "description": "Use as recommended by Technical Support", "type": "integer", "name": "BasicVerticaOptions", "restart_required": false }, { "description": "size of memory managed by memory manager (in MB)", "type": "integer", "name": "BlockCacheSize", "restart_required": true }, { "description": "Buffer query output to allow possible retry of the query. Values allowed: 0(never buffer), 1(always buffer), 2(default: vertica decides before query begins executing to buffer based on certain criteria)", "type": "integer", "name": "BufferQueryOutputForPossibleRetry", "restart_required": false }, { "description": "If set to 1, position index will be cached", "type": "integer", "name": "CachePositionIndex", "restart_required": true }, { "description": "If true, cascade to a target resource pool will always lead to replanning the query on the target pool", "type": "integer", "name": "CascadeResourcePoolAlwaysReplan", "restart_required": false }, { "description": "Split catalog checkpoint into chunks of approximately this size", "type": "integer", "name": "CatalogCheckpointChunkSizeKB", "restart_required": false }, { "description": "Minimum transaction log size before a new catalog checkpoint is created", "type": "integer", "name": "CatalogCheckpointMinLogSizeKB", "restart_required": false }, { "description": "Transaction log size must be at least this fraction of the checkpoint size before a new catalog checkpoint is created", "type": "integer", "name": "CatalogCheckpointPercent", "restart_required": false }, { "description": "Rename storage files during final cleanup upon removal from the catalog", "type": "integer", "name": "CatalogDeindexRename", "restart_required": false }, { "description": "Check data integrity using CRCs (should be enabled unless performance is adversely impacted", "type": "integer", "name": "CheckCRCs", "restart_required": false }, { "description": "Check data sortedness before writing to ROS", "type": "integer", "name": "CheckDataTargetSortOrder", "restart_required": false }, { "description": "Seconds to wait for 'late' nodes to finish recovery actions and proceed with the cluster", "type": "integer", "name": "ClusterRecoveryWait", "restart_required": false }, { "description": "The collation function column width is 4 plus its data column width times CollationExpansion octets", "type": "integer", "name": "CollationExpansion", "restart_required": false }, { "description": "Catalog compression (0: off, 1: chkpt+systxnlogs, 2: chkpt+txnlogs)", "type": "integer", "name": "CompressCatalogOnDisk", "restart_required": true }, { "description": "When enabled, control traffic will be compressed", "type": "integer", "name": "CompressDistCalls", "restart_required": false }, { "description": "When enabled, data traffic will be compressed; this reduces data bandwidth at a cost of CPU time", "type": "integer", "name": "CompressNetworkData", "restart_required": false }, { "description": "Compute APPROXCOUNTDISTINCTs when analyzing statistics. Default is false", "type": "integer", "name": "ComputeApproxNDVsDuringAnalyzeStats", "restart_required": false }, { "description": "Number of ROS containers that are allowed before new ROSs are prevented (ROS pushback)", "type": "integer", "name": "ContainersPerProjectionLimit", "restart_required": false }, { "description": "When doing a COPY FROM VERTICA without an explicit columns list, include IDENTITY columns in the implicit list", "type": "integer", "name": "CopyFromVerticaWithIdentity", "restart_required": false }, { "description": "Time interval in seconds after which a node sends a heartbeat (Set to 0 to disable.)", "type": "integer", "name": "DatabaseHeartbeatInterval", "restart_required": false }, { "description": "Number of rows to be sampled for correlation analysis during DBD design. Default: 4500", "type": "integer", "name": "DBDCorrelationSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for correlation analysis during DBD design. Default: 0 Use DBDCorrelationSampleRowCount", "type": "integer", "name": "DBDCorrelationSampleRowPct", "restart_required": false }, { "description": "Number of rows to be sampled for count distinct analysis during DBD design. Default: 0 Use DBDCountDistinctSampleRowPct", "type": "integer", "name": "DBDCountDistinctSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for count distinct analysis during DBD design. Default: 100", "type": "integer", "name": "DBDCountDistinctSampleRowPct", "restart_required": false }, { "description": "Concurrency setting for the deployment/rebalance process in the DBD. Default: 0", "type": "integer", "name": "DBDDeploymentParallelism", "restart_required": false }, { "description": "Dynamic sampling scheme to be used for encoding analysis during DBD design. Default: true", "type": "integer", "name": "DBDDynamicSampling", "restart_required": false }, { "description": "Number of rows to be sampled for encoding analysis during DBD design. Default: 1000000", "type": "integer", "name": "DBDEncodingSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for encoding analysis during DBD design. Default: 0 Use DBDEncodingSampleRowCount", "type": "integer", "name": "DBDEncodingSampleRowPct", "restart_required": false }, { "description": "Number of minimum rows expected in Fact Table, default 1M rows", "type": "integer", "name": "DBDLargestTableRowCountBoundary", "restart_required": false }, { "description": "Log internal DBD design process in DC tables. Default: false DC logging of design process is turned OFF by default", "type": "integer", "name": "DBDLogInternalDesignProcess", "restart_required": false }, { "description": "Concurrency setting for the parallelism in the Storage Optimization phase of Database Designer. Default: 0", "type": "integer", "name": "DBDMaxConcurrencyForEncodingExperiment", "restart_required": false }, { "description": "If the largest fact table has more than rows, then use this percentage off the largest fact table to define the number of rows below which a table should be replicated Default: 1%", "type": "integer", "name": "DBDRepLargeRowCountPct", "restart_required": false }, { "description": "If the largest fact table has less than rows, then use this percentage off the largest fact table to define the number of rows below which a table should be replicated Default: 10%", "type": "integer", "name": "DBDRepSmallRowCountPct", "restart_required": false }, { "description": "Number of bands sampled using Dynamic Sampling Algorithm. Default: 100", "type": "integer", "name": "DBDSampleStorageBandCount", "restart_required": false }, { "description": "Number of rows to be sampled for segmentation skew analysis during DBD design. Default: 8000", "type": "integer", "name": "DBDSkewDetectionSampleRowCount", "restart_required": false }, { "description": "Percentage of rows to be sampled for segmentation skew analysis during DBD design. Default: 0 Use DBDSkewDetectionSampleRowCount", "type": "integer", "name": "DBDSkewDetectionSampleRowPct", "restart_required": false }, { "description": "Determines source for resource allocation during a designer invocation. Default: false Uses user's resource pool", "type": "integer", "name": "DBDUseOnlyDesignerResourcePool", "restart_required": false }, { "description": "Default setting for intervalstyle; 1 is UNITS; 0 is PLAIN and conforms to standard SQL", "type": "integer", "name": "DefaultIntervalStyle", "restart_required": false }, { "description": "Defines the default session startup Locale for the database", "type": "string", "name": "DefaultSessionLocale", "restart_required": false }, { "description": "Disable schema-level privileges on tables.", "type": "integer", "name": "DisableInheritedPrivileges", "restart_required": false }, { "description": "Set to disable local resegmentation", "type": "integer", "name": "DisableLocalResegmentation", "restart_required": false }, { "description": "If \u2018false\u2019, the optimizer randomly chooses nodes to do the work of any down nodes", "type": "integer", "name": "DisableNodeDownOptimization", "restart_required": false }, { "description": "Do not allow creating prejoin projections", "type": "integer", "name": "DisablePrejoinProjections", "restart_required": false }, { "description": "Disallow the MultipleActiveResultSets (MARS) feature to be enabled", "type": "integer", "name": "DisallowMars", "restart_required": false }, { "description": "Interval between disk space polls (for disk resource management) (seconds)", "type": "integer", "name": "DiskSpacePollingInterval", "restart_required": true }, { "description": "Enable DMLs to cancel conflicting TM tasks to acquire lock", "type": "integer", "name": "DMLCancelTM", "restart_required": false }, { "description": "Use as recommended by Technical Support", "type": "integer", "name": "EEVerticaOptions", "restart_required": false }, { "description": "Enables Access Policy feature", "type": "integer", "name": "EnableAccessPolicy", "restart_required": false }, { "description": "Enable all granted roles on login", "type": "integer", "name": "EnableAllRolesOnLogin", "restart_required": false }, { "description": "If true and apportionable source/parser are defined for the load, input may get split into multiple parts (portions) and loaded by multiple threads/servers in parallel", "type": "integer", "name": "EnableApportionLoad", "restart_required": false }, { "description": "Turn on/off the automatic update row count, min and max when DML queries are run", "type": "integer", "name": "EnableAutoDMLStats", "restart_required": false }, { "description": "A value of 1 enables block memory manager or 0 disables memory manager", "type": "integer", "name": "EnableBlockMemoryManager", "restart_required": true }, { "description": "If true and a chunker is defined for the corresponding parser multiple parse threads can cooperate to parse the output of a single source", "type": "integer", "name": "EnableCooperativeParse", "restart_required": false }, { "description": "Enable the usage data collector", "type": "integer", "name": "EnableDataCollector", "restart_required": false }, { "description": "Enabled cipher suites for TLS", "type": "string", "name": "EnabledCipherSuites", "restart_required": true }, { "description": "Enable EE Thread Pool to reduce threads used", "type": "integer", "name": "EnableEEThreadPool", "restart_required": false }, { "description": "Enable SIPS for early materialized merge join, on multi-block inners", "type": "integer", "name": "EnableEMMJMultiblockSIPS", "restart_required": false }, { "description": "Allow expression results to be materialized as projection columns", "type": "integer", "name": "EnableExprsInProjections", "restart_required": false }, { "description": "allow user-specified force outer rule", "type": "integer", "name": "EnableForceOuter", "restart_required": false }, { "description": "Allow aggregate projections using GROUP BY", "type": "integer", "name": "EnableGroupByProjections", "restart_required": false }, { "description": "Enable JIT compilation optimizations", "type": "integer", "name": "EnableJIT", "restart_required": false }, { "description": "Determines whether new primary key constraints will be enabled by default", "type": "integer", "name": "EnableNewPrimaryKeysByDefault", "restart_required": false }, { "description": "Determines whether new unique key constraints will be enabled by default", "type": "integer", "name": "EnableNewUniqueKeysByDefault", "restart_required": false }, { "description": "Enable Parallel Hash build to improve join performance", "type": "integer", "name": "EnableParallelHashBuild", "restart_required": false }, { "description": "Enable Parallel Sort to improve sort performance", "type": "integer", "name": "EnableParallelSort", "restart_required": false }, { "description": "0 if the special ANY_ROW event for pattern matching is not enabled. Otherwise it is enabled.", "type": "integer", "name": "EnablePatternMatchingAnyRow", "restart_required": false }, { "description": "Enable Plan Stability Feature", "type": "integer", "name": "EnablePlanStability", "restart_required": false }, { "description": "Execute active directed queries in Plan Stability Store", "type": "integer", "name": "EnablePlanStabilityLookup", "restart_required": false }, { "description": "Query threads can be restricted to executing on specific CPUs via session resource pool attributes", "type": "integer", "name": "EnableResourcePoolCPUAffinity", "restart_required": false }, { "description": "Enable runtime task priority scheduler to allow high priority queries to use more CPU time and IO bandwidth", "type": "integer", "name": "EnableRuntimePriorityScheduler", "restart_required": false }, { "description": "Enable SSL for the server", "type": "integer", "name": "EnableSSL", "restart_required": true }, { "description": "Enable bundling data files along with their index files. Also MaxBundleableROSSizeKB is effective when this is enabled.", "type": "integer", "name": "EnableStorageBundling", "restart_required": false }, { "description": "Force casts from varchar to Time,TimeTz,Timestamp,TimestampTz,Interval to error instead of returning null", "type": "integer", "name": "EnableStrictTimeCasts", "restart_required": false }, { "description": "Allow aggregate projections using Top K / LIMIT", "type": "integer", "name": "EnableTopKProjections", "restart_required": false }, { "description": "Allow aggregate projections using UDTransforms", "type": "integer", "name": "EnableUDTProjections", "restart_required": false }, { "description": "Enable optimizations based on guarantee of uniqueness", "type": "integer", "name": "EnableUniquenessOptimization", "restart_required": false }, { "description": "If set to 1 total threads will be equal to number of virtual processors else thread will be set to total real core", "type": "integer", "name": "EnableVirtualCoreCount", "restart_required": false }, { "description": "Granularity of time to epoch mapping (seconds)", "type": "integer", "name": "EpochMapInterval", "restart_required": false }, { "description": "Warn of strings which use backslash quoting; using an E'...' escape string will avoid this warning", "type": "integer", "name": "EscapeStringWarning", "restart_required": false }, { "description": "The number of bands or clusters to form when taking samples while executing evaluating_delete_performance().", "type": "integer", "name": "EvaluateDeletePerformanceSampleStorageBandCount", "restart_required": false }, { "description": "The number of samples to take while executing evaluating_delete_performance().", "type": "integer", "name": "EvaluateDeletePerformanceSampleStorageCount", "restart_required": false }, { "description": "Exclude ephemeral nodes in SELECT queries. Default is false", "type": "integer", "name": "ExcludeEphemeralNodesInQueries", "restart_required": false }, { "description": "Maximum number of rejected/exceptions records that will be written while querying an external table. Default: 100, Unlimited: -1", "type": "integer", "name": "ExternalTablesExceptionsLimit", "restart_required": false }, { "description": "Time interval to wait before replacing a DOWN node with a STANDBY node", "type": "integer", "name": "FailoverToStandbyAfter", "restart_required": false }, { "description": "Maximum memory (in MB) for each UDx side process", "type": "string", "name": "FencedUDxMemoryLimitMB", "restart_required": false }, { "description": "Number of files (per projection) that are allowed before new ROSs are prevented (ROS pushback)", "type": "integer", "name": "FilesPerProjectionLimit", "restart_required": false }, { "description": "Multiplier to pad the observed length of fields in flex tables when casting them to regular vs long types", "type": "integer", "name": "FlexTableDataTypeGuessMultiplier", "restart_required": false }, { "description": "Default size of __raw__ column in flex tables", "type": "integer", "name": "FlexTableRawSize", "restart_required": false }, { "description": "Force all UDx's to run in fenced mode", "type": "integer", "name": "ForceUDxFencedMode", "restart_required": false }, { "description": "Ensure catalog durable on disk after each commit", "type": "integer", "name": "FsyncCatalogForLuck", "restart_required": false }, { "description": "Call fsync after each data file is written", "type": "integer", "name": "FsyncDataForLuck", "restart_required": false }, { "description": "Maximum amount of memory (in megabytes) that can be used by a single GROUP BY", "type": "string", "name": "GBHashMemCapMB", "restart_required": false }, { "description": "Enables profiling for all statements at the EE operator level", "type": "integer", "name": "GlobalEEProfiling", "restart_required": false }, { "description": "A user who'll inherit objects of dropped users. Should be unset (blank) by default (opt-in)", "type": "string", "name": "GlobalHeirUsername", "restart_required": false }, { "description": "Enables profiling for all statements", "type": "integer", "name": "GlobalQueryProfiling", "restart_required": false }, { "description": "Enables profiling for all sessions", "type": "integer", "name": "GlobalSessionProfiling", "restart_required": false }, { "description": "When enabled, the hash prepass mode of the EE GroupGenerator operator will be used", "type": "integer", "name": "GroupGeneratorHashingEnabled", "restart_required": false }, { "description": "Amount of time in seconds waiting for connection to WebHCat before abort", "type": "integer", "name": "HCatConnectionTimeout", "restart_required": false }, { "description": "Name of the HCatalog User Defined Parser", "type": "string", "name": "HCatParserName", "restart_required": false }, { "description": "Slow transfer in bytes/sec limit lower than which transfer will abort after 'HCatSlowTransferTime' amount of time", "type": "integer", "name": "HCatSlowTransferLimit", "restart_required": false }, { "description": "Amount of time allowed for transfer below slow transfer limit before abort", "type": "integer", "name": "HCatSlowTransferTime", "restart_required": false }, { "description": "Name of the HCatalog User Defined Source", "type": "string", "name": "HCatSourceName", "restart_required": false }, { "description": "Name of the HCatalog webservice, used in constructing a url to query this service", "type": "string", "name": "HCatWebserviceName", "restart_required": false }, { "description": "Version of the HCatalog webservice, used in constructing a url to query this service", "type": "string", "name": "HCatWebserviceVersion", "restart_required": false }, { "description": "Upper bound on the number of epochs kept in the epoch map", "type": "integer", "name": "HistoryRetentionEpochs", "restart_required": false }, { "description": "Number of seconds of epochs kept in the epoch map (seconds)", "type": "integer", "name": "HistoryRetentionTime", "restart_required": false }, { "description": "Path to the java binary for executing UDx written in Java", "type": "string", "name": "JavaBinaryForUDx", "restart_required": false }, { "description": "Minimum heap size (in MB) for Java UDx side process", "type": "integer", "name": "JavaSideProcessMinHeapSizeMB", "restart_required": false }, { "description": "Keep ROS Min/Max values on all columns. Enables some optimizations, at a cost of catalog space.", "type": "integer", "name": "KeepMinMaxOnAllColumns", "restart_required": false }, { "description": "SNMP event when the LGE for the node lags more than LGELagThreshold seconds behind the last epoch close time", "type": "integer", "name": "LGELagThreshold", "restart_required": false }, { "description": "Controls the maximum ROS output in data load; Negative and zero will be considered as 1.", "type": "integer", "name": "LoadMaxFinalROSCount", "restart_required": false }, { "description": "Time to wait for a table lock before giving up (seconds)", "type": "integer", "name": "LockTimeout", "restart_required": false }, { "description": "Interval (in seconds) at which heartbeat messages are sent to vertica.log. (Set to 0 to disable.)", "type": "integer", "name": "LogHeartbeatInterval", "restart_required": false }, { "description": "SNMP event LowDiskSpace is raised when disk utilization exceeds this percentage", "type": "integer", "name": "LowDiskSpaceWarningPct", "restart_required": false }, { "description": "Max number of columns used in auto projection segmentation expression (0 is to use all columns)", "type": "integer", "name": "MaxAutoSegColumns", "restart_required": false }, { "description": "ROS files which are smaller than this size (KB) are selected for bundling. '0' means bundling of separate ROS's is disabled, though pidx and fdb files of an individual ROS will be bundled if EnableStorageBundling is set. Maximum allowed size is 1024 (1048576 bytes)", "type": "integer", "name": "MaxBundleableROSSizeKB", "restart_required": false }, { "description": "Maximum number of client sessions; in addition five dbadmin sessions are allowed", "type": "integer", "name": "MaxClientSessions", "restart_required": false }, { "description": "Defines the no. of constraint violation checks per internal query in analyze_constraints(). Default is -1", "type": "integer", "name": "MaxConstraintChecksPerQuery", "restart_required": false }, { "description": "Maximum file size for Data Collector logs in KB", "type": "integer", "name": "MaxDataCollectorFileSize", "restart_required": false }, { "description": "Maximum desired size of an EE block (used to move tuples between operators), actual block size be larger (must have capacity for at least 2 rows)", "type": "integer", "name": "MaxDesiredEEBlockSize", "restart_required": false }, { "description": "Maximum number of DVROSes attached to a single ROS container once reached dv merge out happens", "type": "integer", "name": "MaxDVROSPerContainer", "restart_required": false }, { "description": "Maximum length of individual lines (entries) in the vertica.log log file. Longer lines are truncated. '0' means no limit.", "type": "integer", "name": "MaxLogLineLength", "restart_required": false }, { "description": "The max ROS size in MB a merge out job can produce", "type": "integer", "name": "MaxMrgOutROSSizeMB", "restart_required": false }, { "description": "Maximum amount of memory used by the Optimizer; Increasing this value may help with 'Optimizer memory use exceeds allowed limit' errors (MB)", "type": "integer", "name": "MaxOptMemMB", "restart_required": false }, { "description": "Maximum amount of memory used by the Optimizer in the context of DBD; Increasing this value may help with 'Run Database Designer with more memory or increase Database Designer memory usage limit' errors (MB)", "type": "integer", "name": "MaxOptMemMBInDBD", "restart_required": false }, { "description": "Maximum amount of memory allowed for parsing a single request; Increasing this value may help with 'Request size too big' errors (MB)", "type": "integer", "name": "MaxParsedQuerySizeMB", "restart_required": false }, { "description": "Max no of partitions per projection", "type": "integer", "name": "MaxPartitionCount", "restart_required": false }, { "description": "The number of times the system might try to re-run a query if the first run does not succeed.", "type": "integer", "name": "MaxQueryRetries", "restart_required": false }, { "description": "# of failed attempts before recovery gives up", "type": "integer", "name": "MaxRecoverErrors", "restart_required": true }, { "description": "Maximum number of historic passes made by Recovery before moving to the current pass where locks are taken", "type": "integer", "name": "MaxRecoverHistoricPasses", "restart_required": true }, { "description": "# of failed attempts before refresh gives up", "type": "integer", "name": "MaxRefreshErrors", "restart_required": false }, { "description": "Maximum number of historic passes made by Refresh before moving to the current pass where locks are taken", "type": "integer", "name": "MaxRefreshHistoricPasses", "restart_required": false }, { "description": "Maximum of number of ROSes in a stratum once reached merge out happens", "type": "integer", "name": "MaxROSPerStratum", "restart_required": false }, { "description": "Memory to allocate to per-node Merge Join", "type": "integer", "name": "MergeJoinInnerInitialMB", "restart_required": false }, { "description": "If set to true cache will be used pick the projection for mergeout", "type": "integer", "name": "MergeOutCache", "restart_required": false }, { "description": "Interval between Tuple Mover checks for mergeouts to perform (seconds)", "type": "integer", "name": "MergeOutInterval", "restart_required": false }, { "description": "Minimum free before catalog writes are refused (MB) (Default(-1) is 1GB or 2% free)", "type": "integer", "name": "MinimumCatalogDiskMegabytes", "restart_required": false }, { "description": "Minimum free before column data writes are refused (MB) (Default(-1) is 4GB or 5% free)", "type": "integer", "name": "MinimumDataDiskMegabytes", "restart_required": false }, { "description": "Minimum free before temp data writes are refused (MB) (Default(-1) is 8GB or 10% free)", "type": "integer", "name": "MinimumDataDiskTempMegabytes", "restart_required": false }, { "description": "Minimum size of the inner input to a join that will trigger the Optimizer to attempt a sort merge join (MB)", "type": "integer", "name": "MinSortMergeJoinMB", "restart_required": false }, { "description": "Interval between Tuple Mover checks for moveouts to perform (seconds)", "type": "integer", "name": "MoveOutInterval", "restart_required": false }, { "description": "The number of epochs data resides in WOS before the Tuple Mover triggers a moveout based on age", "type": "integer", "name": "MoveOutMaxAgeEpochs", "restart_required": false }, { "description": "The amount of time data resides in WOS before the Tuple Mover triggers a moveout based on age (seconds)", "type": "integer", "name": "MoveOutMaxAgeTime", "restart_required": false }, { "description": "The amount of WOS used before the Tuple Mover triggers a moveout based on utilization (percent)", "type": "integer", "name": "MoveOutSizePct", "restart_required": false }, { "description": "Memory to allocate to per-node Prepass GroupBy Operator", "type": "integer", "name": "NewEEGroupBySmallMemMB", "restart_required": false }, { "description": "Number of rows the EE will assign per thread when multiple threads are processing the same large ROS", "type": "integer", "name": "NewEEROSSubdivisionRows", "restart_required": false }, { "description": "Number of threads the EE will attempt to use for processing (per plan)", "type": "integer", "name": "NewEEThreads", "restart_required": false }, { "description": "Time to wait during shutdown, when auto recovery is not possible", "type": "integer", "name": "NoRecoverShutdownWait", "restart_required": true }, { "description": "Use as recommended by Technical Support", "type": "string", "name": "OptVerticaOptions", "restart_required": false }, { "description": "If true use a DT per local segment, even when sorting", "type": "integer", "name": "ParallelizeLocalSegmentLoad", "restart_required": false }, { "description": "Override the heap memory allocator for PCRE pattern matching library", "type": "integer", "name": "PatternMatchAllocator", "restart_required": true }, { "description": "Sets the recursion limit for PCRE used by pattern matching.", "type": "integer", "name": "PatternMatchingMatchLimitRecursion", "restart_required": false }, { "description": "The max number of rows per partition for pattern matching", "type": "integer", "name": "PatternMatchingMaxPartition", "restart_required": false }, { "description": "The max number of matches per partition for pattern matching", "type": "integer", "name": "PatternMatchingMaxPartitionMatches", "restart_required": false }, { "description": "The size of ovector for pcre_exec. Increase if using ANY_ROW and there are many subpattern groupings", "type": "integer", "name": "PatternMatchingPerMatchWorkspaceSize", "restart_required": false }, { "description": "Use JIT for PCRE regex matching in queries", "type": "integer", "name": "PatternMatchingUseJit", "restart_required": false }, { "description": "Override the stack memory allocator for PCRE pattern matching library", "type": "integer", "name": "PatternMatchStackAllocator", "restart_required": true }, { "description": "Pin Vertica to given number of CPUs (-1 = no pin)", "type": "integer", "name": "PinProcessors", "restart_required": true }, { "description": "Processor index to start with when pinning processors", "type": "integer", "name": "PinProcessorsOffset", "restart_required": true }, { "description": "If set to true, allows creating unsegmented projection using pre-excavator style", "type": "integer", "name": "PreExcavatorReplicatedProjection", "restart_required": false }, { "description": "Only load DataCollector records which satisfy time based predicates", "type": "integer", "name": "PruneDataCollectorByTime", "restart_required": false }, { "description": "Only load system table columns which participate in the query into SysWOS", "type": "integer", "name": "PruneSystemTableColumns", "restart_required": false }, { "description": "Maximum % of rows that may be deleted before Tuple Mover purges the ROS through mergeout", "type": "integer", "name": "PurgeMergeoutPercent", "restart_required": false }, { "description": "The max amount of memory MIN/MAX for RANGE moving window can use in MB", "type": "integer", "name": "RangeWindowMaxMem", "restart_required": false }, { "description": "Before starting recovery, perform cleanup of deleted files", "type": "integer", "name": "ReapBeforeRecover", "restart_required": true }, { "description": "When recovering a projection with an identical buddy from scratch recovery may be able to directly copy storage containers if this feature is enabled", "type": "integer", "name": "RecoverByContainer", "restart_required": false }, { "description": "Seconds to wait for dirty transactions before cancelling the session", "type": "integer", "name": "RecoveryDirtyTxnWait", "restart_required": false }, { "description": "Trigger moveout automatically on commit of data to WOS", "type": "integer", "name": "ReflexiveMoveout", "restart_required": false }, { "description": "When refreshing a projection from an identical buddy refresh may be able to directly copy storage containers if this feature is enabled", "type": "integer", "name": "RefreshByContainer", "restart_required": false }, { "description": "Configure the buffer size (in Byte) for remote initiator to cache query result. Value zero will turn off the feature of routing one-executor query plan to remote initator", "type": "integer", "name": "RemoteInitiatorBufSize", "restart_required": false }, { "description": "Interval between checking and removing unused database snapshots (seconds)", "type": "integer", "name": "RemoveSnapshotInterval", "restart_required": false }, { "description": "Switch Replay Delete to use the new algorithm once runtime scan statistics exceeds the threshold", "type": "integer", "name": "ReplayDeleteAlgorithmSwitchThreshold", "restart_required": false }, { "description": "Fraction of total resources that can be assigned to locally initiated requests; the remainder is only used by remote requests", "type": "integer", "name": "ResLowLimPctOfHighLim", "restart_required": true }, { "description": "Restrict non-dbadmin users from viewing system-tables", "type": "integer", "name": "RestrictSystemTables", "restart_required": true }, { "description": "Number of 64K ROS blocks that may be kept in the decompression cache (default is 4096 blocks; 256MB)", "type": "integer", "name": "ROSCacheBlocks", "restart_required": true }, { "description": "Number of large (>64K) ROS blocks that may be kept in the decompression cache (for Grouped ROSs)", "type": "integer", "name": "ROSCacheLargeBlocks", "restart_required": true }, { "description": "Maximum of number of ROSes in a stratum once reached merge out happens", "type": "integer", "name": "ROSPerStratum", "restart_required": false }, { "description": "Set the query duration threshold in microseconds to save profiling information to the dc_execution_engine_profiles table.", "type": "integer", "name": "SaveDCEEProfileThresholdUS", "restart_required": false }, { "description": "NONE (Default) MD5 and SHA512", "type": "string", "name": "SecurityAlgorithm", "restart_required": false }, { "description": "Auto projections will always be segmented. Default: true", "type": "integer", "name": "SegmentAutoProjection", "restart_required": false }, { "description": "Create range segmentation on node_name for DataCollector tables", "type": "integer", "name": "SegmentDataCollector", "restart_required": false }, { "description": "# Days. Delete closed session profiling data when data SessionProfilingAgeOut days old.", "type": "integer", "name": "SessionProfilingAgeOut", "restart_required": false }, { "description": "If the average of the rows scanned squared during a delete is higher than this limit, a warning message about the projection is printed to the console. Disable by setting to -1.", "type": "integer", "name": "SlowDeleteConsoleWarningLimit", "restart_required": false }, { "description": "If the average of the rows scanned squared during a delete is higher than this limit, a warning message about the projection is printed to the vertica.log and saved in vs_comments. This limit is also used by evaluate_delete_performance to determine if a projection is likely to have slow delete performance. Disable by setting to -1. Console delete performance warnings will still be printed to log.", "type": "integer", "name": "SlowDeleteSystemWarningLimit", "restart_required": false }, { "description": "Size per column below which a ROS is automatically stored grouped (bytes)", "type": "integer", "name": "SmallROSSize", "restart_required": false }, { "description": "Least length of time (in seconds) a snapshot has existed for before systemTask tries to remove it", "type": "integer", "name": "SnapshotRetentionTime", "restart_required": false }, { "description": "Where Vertica sends SNMP traps - 'host_name port CommunityString'. This is a comma-separated list.", "type": "string", "name": "SnmpTrapDestinationsList", "restart_required": false }, { "description": "List of events that Vertica traps: Low Disk Space, Read Only File System, Loss of K Safety, Current Fault Tolerance at Critical Level, Too Many ROS Containers, WOS Over Flow, Node State Change, Recovery Failure, Recovery Error, Recovery Lock Error, Recovery Projection Retrieval Error, Refresh Error, Tuple Mover Error, Stale Checkpoint", "type": "string", "name": "SnmpTrapEvents", "restart_required": false }, { "description": "Enable sending of SNMP traps", "type": "integer", "name": "SnmpTrapsEnabled", "restart_required": false }, { "description": "SortCheck to generate SortCheck step in plan", "type": "integer", "name": "SortCheckOption", "restart_required": false }, { "description": "Report Level for Sort Order violation. default for error. 0 for logging, 1 for error, 2 for panic", "type": "integer", "name": "SortOrderReportLevel", "restart_required": false }, { "description": "Controls the number of Sort worker threads; 0 disables background threads", "type": "integer", "name": "SortWorkerThreads", "restart_required": false }, { "description": "The server's SSL CA certificate", "type": "string", "name": "SSLCA", "restart_required": true }, { "description": "The server's SSL certificate", "type": "string", "name": "SSLCertificate", "restart_required": true }, { "description": "The server's SSL private key", "type": "string", "name": "SSLPrivateKey", "restart_required": true }, { "description": "Disable backslash quoting in string constants; required to conform to standard SQL", "type": "integer", "name": "StandardConformingStrings", "restart_required": false }, { "description": "Sets the behavior to deal with undeclared UDx function parameters", "type": "integer", "name": "StrictUDxParameterChecking", "restart_required": false }, { "description": "Enable event trapping for Syslog", "type": "integer", "name": "SyslogEnabled", "restart_required": false }, { "description": "Low Disk Space, Read Only File System, Loss of K Safety, Current Fault Tolerance at Critical Level, Too Many ROS Containers, WOS Over Flow, Node State Change, Recovery Failure, Recovery Error, Recovery Lock Error, Recovery Projection Retrieval Error, Refresh Error, Tuple Mover Error, Stale Checkpoint", "type": "string", "name": "SyslogEvents", "restart_required": false }, { "description": "auth, uucp, authpriv (Linux only) local0, cron, local1, daemon, local2, ftp (Linux only) local3, lpr, local4, mail, local5, news, local6, user (default system) local7", "type": "string", "name": "SyslogFacility", "restart_required": false }, { "description": "Interval between system resource utilization monitoring checks (seconds)", "type": "integer", "name": "SystemMonitorInterval", "restart_required": false }, { "description": "A message is logged if a monitored system resource changes by at least SystemMonitorThreshold (percent)", "type": "integer", "name": "SystemMonitorThreshold", "restart_required": false }, { "description": "Turn on terraced (multi-level) data network routing if it would reduce stream count by this factor", "type": "integer", "name": "TerraceRoutingFactor", "restart_required": false }, { "description": "When updating the index to account for deleted records in the source table, pre-compute the tokens for the deleted records. This takes time, but speeds up the actual deletion.", "type": "integer", "name": "TextIndexComputeDeletedTokens", "restart_required": false }, { "description": "Default maximum size of tokens stored for text-indexing. Only affects new indices. Larger values will decrease the performance of the index.", "type": "integer", "name": "TextIndexMaxTokenLength", "restart_required": false }, { "description": "The max amount of memory TopK(Heap) can use in MB", "type": "integer", "name": "TopKHeapMaxMem", "restart_required": false }, { "description": "READ COMMITTED (Default) - Last epoch for reads and current epoch for writes. SERIALIZABLE - Current epoch for reads and writes", "type": "string", "name": "TransactionIsolationLevel", "restart_required": false }, { "description": "Determines whether the transaction is read/write or read-only. Read/write is the default", "type": "string", "name": "TransactionMode", "restart_required": false }, { "description": "Trust pk and unique constraint as guarantee of uniqueness", "type": "integer", "name": "TrustConstraintsAsUnique", "restart_required": false }, { "description": "Number of seconds to wait for UDx to finish a block of data before give up", "type": "integer", "name": "UDxFencedBlockTimeout", "restart_required": false }, { "description": "Number of seconds to wait for UDx to finish cancel related clean up work", "type": "integer", "name": "UDxFencedCancelTimeout", "restart_required": false }, { "description": "Number of seconds to wait for external procedures in UDx fenced mode", "type": "integer", "name": "UDxFencedExternalProcedureTimeout", "restart_required": false }, { "description": "Optimizer uses modular hash for resegmenting intermediate results.", "type": "integer", "name": "UseModularHashForReseg", "restart_required": false }, { "description": "Optimizer only considers redistribution choices which are cost model resilient.", "type": "integer", "name": "UseOnlyResilientRedistribution", "restart_required": false }, { "description": "Include virtual table data from recovering nodes in monitoring query results", "type": "integer", "name": "UseRecoveringNodesInVirtualTableQueries", "restart_required": false }, { "description": "Use safer decompression scheme to reduce the chance of crashes if disk data has been corrupted", "type": "integer", "name": "UseSafeDecompression", "restart_required": false }, { "description": "Use 5.0-style truncating integer division for the '/' operator", "type": "integer", "name": "UseV50IntegerDivision", "restart_required": false }, { "description": "Execute the external procedures in the zygote process", "type": "integer", "name": "UseZygoteForExternalProcedures", "restart_required": false }, { "description": "Create a log entry each time a client opens a connection and closes it immediately; load balancers often do this for a health check", "type": "integer", "name": "WarnOnIncompleteStartupPacket", "restart_required": false }, { "description": "Use materialization strategy to support the WITH clause", "type": "integer", "name": "WithClauseMaterialization", "restart_required": false } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.776111 trove-12.1.0.dev92/trove/tests/0000755000175000017500000000000000000000000016450 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/__init__.py0000644000175000017500000000302200000000000020556 0ustar00coreycorey00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os # Groups DBAAS_API = "dbaas.api" DBAAS_API_INSTANCES = "dbaas.api.instances" DBAAS_API_INSTANCES_DELETE = "dbaas.api.instances.delete" DBAAS_API_USERS = "dbaas.api.users" DBAAS_API_USERS_ACCESS = "dbaas.api.users.access" DBAAS_API_USERS_ROOT = "dbaas.api.users.root" DBAAS_API_DATABASES = "dbaas.api.databases" DBAAS_API_VERSIONS = "dbaas.api.versions" DBAAS_API_DATASTORES = "dbaas.api.datastores" DBAAS_API_MGMT_DATASTORES = "dbaas.api.mgmt.datastores" DBAAS_API_INSTANCE_ACTIONS = "dbaas.api.instances.actions" DBAAS_API_BACKUPS = "dbaas.api.backups" DBAAS_API_CONFIGURATIONS = "dbaas.api.configurations" DBAAS_API_REPLICATION = "dbaas.api.replication" # Use '-t' to avoid the warning message 'mesg: ttyname failed: Inappropriate # ioctl for device' SSH_CMD = ("ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no " "-o LogLevel=quiet -t -i %s" % os.environ.get("TROVE_TEST_SSH_KEY_FILE", "")) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7801108 trove-12.1.0.dev92/trove/tests/api/0000755000175000017500000000000000000000000017221 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/api/__init__.py0000644000175000017500000000000000000000000021320 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/backups.py0000644000175000017500000004572100000000000021234 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis.asserts import assert_equal from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import fail from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common import cfg from trove.common import exception from trove.common.utils import generate_uuid from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import instance_info from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE from trove.tests.api.instances import WaitForGuestInstallationToFinish from trove.tests.config import CONFIG from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements BACKUP_NAME = 'backup_test' BACKUP_DESC = 'test description' TIMEOUT_BACKUP_CREATE = 60 * 30 TIMEOUT_BACKUP_DELETE = 120 backup_info = None incremental_info = None incremental_db = generate_uuid() incremental_restore_instance_id = None total_num_dbs = 0 backup_count_prior_to_create = 0 backup_count_for_instance_prior_to_create = 0 @test(depends_on_groups=[tests.DBAAS_API_INSTANCE_ACTIONS], groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled) class CreateBackups(object): @test def test_backup_create_instance(self): """Test create backup for a given instance.""" # Necessary to test that the count increases. global backup_count_prior_to_create backup_count_prior_to_create = len(instance_info.dbaas.backups.list()) global backup_count_for_instance_prior_to_create backup_count_for_instance_prior_to_create = len( instance_info.dbaas.instances.backups(instance_info.id)) datastore_version = instance_info.dbaas.datastore_versions.get( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version) result = instance_info.dbaas.backups.create(BACKUP_NAME, instance_info.id, BACKUP_DESC) global backup_info backup_info = result assert_equal(BACKUP_NAME, result.name) assert_equal(BACKUP_DESC, result.description) assert_equal(instance_info.id, result.instance_id) assert_equal('NEW', result.status) instance = instance_info.dbaas.instances.get(instance_info.id) assert_true(instance.status in ['ACTIVE', 'BACKUP', 'HEALTHY']) assert_equal(instance_info.dbaas_datastore, result.datastore['type']) assert_equal(instance_info.dbaas_datastore_version, result.datastore['version']) assert_equal(datastore_version.id, result.datastore['version_id']) class BackupRestoreMixin(object): def verify_backup(self, backup_id): def result_is_active(): backup = instance_info.dbaas.backups.get(backup_id) if backup.status == "COMPLETED": return True else: assert_not_equal("FAILED", backup.status) return False poll_until(result_is_active) def instance_is_totally_gone(self, instance_id): def instance_is_gone(): try: instance_info.dbaas.instances.get( instance_id) return False except exceptions.NotFound: return True poll_until( instance_is_gone, time_out=TIMEOUT_INSTANCE_DELETE) def backup_is_totally_gone(self, backup_id): def backup_is_gone(): try: instance_info.dbaas.backups.get(backup_id) return False except exceptions.NotFound: return True poll_until(backup_is_gone, time_out=TIMEOUT_BACKUP_DELETE) def verify_instance_is_active(self, instance_id): # This version just checks the REST API status. def result_is_active(): instance = instance_info.dbaas.instances.get(instance_id) if instance.status in CONFIG.running_status: return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) if instance_info.volume is not None: assert_equal(instance.volume.get('used', None), None) return False poll_until(result_is_active, sleep_time=5, time_out=TIMEOUT_INSTANCE_CREATE) @test(depends_on_classes=[CreateBackups], groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled) class WaitForBackupCreateToFinish(BackupRestoreMixin): """Wait until the backup creation is finished.""" @test @time_out(TIMEOUT_BACKUP_CREATE) def test_backup_created(self): """Wait for the backup to be finished.""" self.verify_backup(backup_info.id) @test(depends_on=[WaitForBackupCreateToFinish], groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled) class ListBackups(object): @test def test_backup_list(self): """Test list backups.""" result = instance_info.dbaas.backups.list() assert_equal(backup_count_prior_to_create + 1, len(result)) backup = result[0] assert_equal(BACKUP_NAME, backup.name) assert_equal(BACKUP_DESC, backup.description) assert_not_equal(0.0, backup.size) assert_equal(instance_info.id, backup.instance_id) assert_equal('COMPLETED', backup.status) @test def test_backup_list_filter_datastore(self): """Test list backups and filter by datastore.""" result = instance_info.dbaas.backups.list( datastore=instance_info.dbaas_datastore) assert_equal(backup_count_prior_to_create + 1, len(result)) backup = result[0] assert_equal(BACKUP_NAME, backup.name) assert_equal(BACKUP_DESC, backup.description) assert_not_equal(0.0, backup.size) assert_equal(instance_info.id, backup.instance_id) assert_equal('COMPLETED', backup.status) @test def test_backup_list_filter_different_datastore(self): """Test list backups and filter by datastore.""" result = instance_info.dbaas.backups.list( datastore=CONFIG.dbaas_datastore_name_no_versions) # There should not be any backups for this datastore assert_equal(0, len(result)) @test def test_backup_list_filter_datastore_not_found(self): """Test list backups and filter by datastore.""" assert_raises(exceptions.NotFound, instance_info.dbaas.backups.list, datastore='NOT_FOUND') @test def test_backup_list_for_instance(self): """Test backup list for instance.""" result = instance_info.dbaas.instances.backups(instance_info.id) assert_equal(backup_count_for_instance_prior_to_create + 1, len(result)) backup = result[0] assert_equal(BACKUP_NAME, backup.name) assert_equal(BACKUP_DESC, backup.description) assert_not_equal(0.0, backup.size) assert_equal(instance_info.id, backup.instance_id) assert_equal('COMPLETED', backup.status) @test def test_backup_get(self): """Test get backup.""" backup = instance_info.dbaas.backups.get(backup_info.id) assert_equal(backup_info.id, backup.id) assert_equal(backup_info.name, backup.name) assert_equal(backup_info.description, backup.description) assert_equal(instance_info.id, backup.instance_id) assert_not_equal(0.0, backup.size) assert_equal('COMPLETED', backup.status) assert_equal(instance_info.dbaas_datastore, backup.datastore['type']) assert_equal(instance_info.dbaas_datastore_version, backup.datastore['version']) datastore_version = instance_info.dbaas.datastore_versions.get( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version) assert_equal(datastore_version.id, backup.datastore['version_id']) # Test to make sure that user in other tenant is not able # to GET this backup reqs = Requirements(is_admin=False) other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user]) other_client = create_dbaas_client(other_user) assert_raises(exceptions.NotFound, other_client.backups.get, backup_info.id) @test(runs_after=[ListBackups], depends_on=[WaitForBackupCreateToFinish], groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled) class IncrementalBackups(BackupRestoreMixin): @test def test_create_db(self): global total_num_dbs total_num_dbs = len(instance_info.dbaas.databases.list( instance_info.id)) databases = [{'name': incremental_db}] instance_info.dbaas.databases.create(instance_info.id, databases) assert_equal(202, instance_info.dbaas.last_http_code) total_num_dbs += 1 @test(runs_after=['test_create_db']) def test_create_incremental_backup(self): result = instance_info.dbaas.backups.create("incremental-backup", backup_info.instance_id, parent_id=backup_info.id) global incremental_info incremental_info = result assert_equal(202, instance_info.dbaas.last_http_code) # Wait for the backup to finish self.verify_backup(incremental_info.id) assert_equal(backup_info.id, incremental_info.parent_id) @test(groups=[tests.DBAAS_API_BACKUPS], depends_on_classes=[IncrementalBackups], enabled=CONFIG.swift_enabled) class RestoreUsingBackup(object): @classmethod def _restore(cls, backup_ref): restorePoint = {"backupRef": backup_ref} result = instance_info.dbaas.instances.create( instance_info.name + "_restore", instance_info.dbaas_flavor_href, instance_info.volume, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, nics=instance_info.nics, restorePoint=restorePoint) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal("BUILD", result.status) return result.id @test(depends_on=[IncrementalBackups]) def test_restore_incremental(self): """Restore from incremental backup.""" global incremental_restore_instance_id incremental_restore_instance_id = self._restore(incremental_info.id) @test(depends_on_classes=[RestoreUsingBackup], groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled) class WaitForRestoreToFinish(object): @classmethod def _poll(cls, instance_id_to_poll): """Shared "instance restored" test logic.""" # This version just checks the REST API status. def result_is_active(): instance = instance_info.dbaas.instances.get(instance_id_to_poll) if instance.status in CONFIG.running_status: return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) if instance_info.volume is not None: assert_equal(instance.volume.get('used', None), None) return False poll_until(result_is_active, time_out=TIMEOUT_INSTANCE_CREATE, sleep_time=10) @test def test_instance_restored_incremental(self): try: self._poll(incremental_restore_instance_id) except exception.PollTimeOut: fail('Timed out') @test(enabled=(not CONFIG.fake_mode and CONFIG.swift_enabled), depends_on_classes=[WaitForRestoreToFinish], groups=[tests.DBAAS_API_BACKUPS]) class VerifyRestore(object): @classmethod def _poll(cls, instance_id, db): def db_is_found(): databases = instance_info.dbaas.databases.list(instance_id) if db in [d.name for d in databases]: return True else: return False poll_until(db_is_found, time_out=60 * 10, sleep_time=10) @test def test_database_restored_incremental(self): try: self._poll(incremental_restore_instance_id, incremental_db) assert_equal(total_num_dbs, len(instance_info.dbaas.databases.list( incremental_restore_instance_id))) except exception.PollTimeOut: fail('Timed out') @test(groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled, depends_on_classes=[VerifyRestore]) class DeleteRestoreInstance(object): @classmethod def _delete(cls, instance_id): """Test delete restored instance.""" instance_info.dbaas.instances.delete(instance_id) assert_equal(202, instance_info.dbaas.last_http_code) def instance_is_gone(): try: instance_info.dbaas.instances.get(instance_id) return False except exceptions.NotFound: return True poll_until(instance_is_gone, time_out=TIMEOUT_INSTANCE_DELETE) assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get, instance_id) @test def test_delete_restored_instance_incremental(self): try: self._delete(incremental_restore_instance_id) except exception.PollTimeOut: fail('Timed out') @test(depends_on_classes=[DeleteRestoreInstance], groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled) class DeleteBackups(object): @test def test_backup_delete_not_found(self): """Test delete unknown backup.""" assert_raises(exceptions.NotFound, instance_info.dbaas.backups.delete, 'nonexistent_backup') @test def test_backup_delete_other(self): """Test another user cannot delete backup.""" # Test to make sure that user in other tenant is not able # to DELETE this backup reqs = Requirements(is_admin=False) other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user]) other_client = create_dbaas_client(other_user) assert_raises(exceptions.NotFound, other_client.backups.delete, backup_info.id) @test(runs_after=[test_backup_delete_other]) def test_backup_delete(self): """Test backup deletion.""" instance_info.dbaas.backups.delete(backup_info.id) assert_equal(202, instance_info.dbaas.last_http_code) def backup_is_gone(): try: instance_info.dbaas.backups.get(backup_info.id) return False except exceptions.NotFound: return True poll_until(backup_is_gone, time_out=TIMEOUT_BACKUP_DELETE) @test(runs_after=[test_backup_delete]) def test_incremental_deleted(self): """Test backup children are deleted.""" if incremental_info is None: raise SkipTest("Incremental Backup not created") assert_raises(exceptions.NotFound, instance_info.dbaas.backups.get, incremental_info.id) @test(depends_on=[WaitForGuestInstallationToFinish], runs_after=[DeleteBackups], enabled=CONFIG.swift_enabled) class FakeTestHugeBackupOnSmallInstance(BackupRestoreMixin): report = CONFIG.get_report() def tweak_fake_guest(self, size): from trove.tests.fakes import guestagent guestagent.BACKUP_SIZE = size @test def test_load_mysql_with_data(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.tweak_fake_guest(1.9) @test(depends_on=[test_load_mysql_with_data]) def test_create_huge_backup(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.new_backup = instance_info.dbaas.backups.create( BACKUP_NAME, instance_info.id, BACKUP_DESC) assert_equal(202, instance_info.dbaas.last_http_code) @test(depends_on=[test_create_huge_backup]) def test_verify_huge_backup_completed(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.verify_backup(self.new_backup.id) @test(depends_on=[test_verify_huge_backup_completed]) def test_try_to_restore_on_small_instance_with_volume(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") assert_raises(exceptions.Forbidden, instance_info.dbaas.instances.create, instance_info.name + "_restore", instance_info.dbaas_flavor_href, {'size': 1}, datastore=instance_info.dbaas_datastore, datastore_version=(instance_info. dbaas_datastore_version), nics=instance_info.nics, restorePoint={"backupRef": self.new_backup.id}) assert_equal(403, instance_info.dbaas.last_http_code) @test(depends_on=[test_verify_huge_backup_completed]) def test_try_to_restore_on_small_instance_with_flavor_only(self): if not CONFIG.fake_mode: raise SkipTest("Must run in fake mode.") self.orig_conf_value = cfg.CONF.get( instance_info.dbaas_datastore).volume_support cfg.CONF.get(instance_info.dbaas_datastore).volume_support = False assert_raises(exceptions.Forbidden, instance_info.dbaas.instances.create, instance_info.name + "_restore", 11, datastore=instance_info.dbaas_datastore, datastore_version=(instance_info. dbaas_datastore_version), nics=instance_info.nics, restorePoint={"backupRef": self.new_backup.id}) assert_equal(403, instance_info.dbaas.last_http_code) cfg.CONF.get( instance_info.dbaas_datastore ).volume_support = self.orig_conf_value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/configurations.py0000644000175000017500000011446100000000000022634 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import json import netaddr from time import sleep import uuid from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import fail from proboscis import before_class from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test import six from troveclient.compat import exceptions from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import assert_unprocessable from trove.tests.api.instances import instance_info from trove.tests.api.instances import InstanceTestInfo from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE from trove.tests.config import CONFIG from trove.tests.util.check import AttrCheck from trove.tests.util.check import CollectionCheck from trove.tests.util.check import TypeCheck from trove.tests.util import create_dbaas_client from trove.tests.util.mysql import create_mysql_connection from trove.tests.util.users import Requirements CONFIG_NAME = "test_configuration" CONFIG_DESC = "configuration description" configuration_default = None configuration_info = None configuration_href = None configuration_instance = InstanceTestInfo() configuration_instance_id = None sql_variables = [ 'key_buffer_size', 'connect_timeout', 'join_buffer_size', ] def _is_valid_timestamp(time_string): try: datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S") except ValueError: return False return True # helper methods to validate configuration is applied to instance def _execute_query(host, user_name, password, query): print("Starting to query database, host: %s, user: %s, password: %s, " "query: %s" % (host, user_name, password, query)) with create_mysql_connection(host, user_name, password) as db: result = db.execute(query) return result def _get_address(instance_id): result = instance_info.dbaas_admin.mgmt.instances.show(instance_id) try: return next(str(ip) for ip in result.ip if netaddr.valid_ipv4(ip)) except StopIteration: fail("No IPV4 ip found") def _test_configuration_is_applied_to_instance(instance, configuration_id): if CONFIG.fake_mode: raise SkipTest("configuration from sql does not work in fake mode") instance_test = instance_info.dbaas.instances.get(instance.id) assert_equal(configuration_id, instance_test.configuration['id']) if configuration_id: testconfig_info = instance_info.dbaas.configurations.get( configuration_id) else: testconfig_info = instance_info.dbaas.instance.configuration( instance.id) testconfig_info['configuration'] conf_instances = instance_info.dbaas.configurations.instances( configuration_id) config_instance_ids = [inst.id for inst in conf_instances] assert_true(instance_test.id in config_instance_ids) cfg_names = testconfig_info.values.keys() host = _get_address(instance.id) for user in instance.users: username = user['name'] password = user['password'] concat_variables = "','".join(cfg_names) query = ("show variables where Variable_name " "in ('%s');" % concat_variables) actual_values = _execute_query(host, username, password, query) print("actual_values %s" % actual_values) print("testconfig_info.values %s" % testconfig_info.values) assert_true(len(actual_values) == len(cfg_names)) # check the configs exist attrcheck = AttrCheck() allowed_attrs = [actual_key for actual_key, actual_value in actual_values] attrcheck.contains_allowed_attrs( testconfig_info.values, allowed_attrs, msg="Configurations parameters") def _get_parameter_type(name): instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, name) resp, body = instance_info.dbaas.client.last_response print(resp) print(body) return json.loads(body.decode())['type'] # check the config values are correct for key, value in actual_values: key_type = _get_parameter_type(key) # mysql returns 'ON' and 'OFF' for True and False respectively if value == 'ON': converted_key_value = (str(key), 1) elif value == 'OFF': converted_key_value = (str(key), 0) else: if key_type == 'integer': value = int(value) converted_key_value = (str(key), value) print("converted_key_value: %s" % str(converted_key_value)) assert_true(converted_key_value in testconfig_info.values.items()) class ConfigurationsTestBase(object): @staticmethod def expected_instance_datastore_configs(instance_id): """Given an instance retrieve the expected test configurations for instance's datastore. """ instance = instance_info.dbaas.instances.get(instance_id) datastore_type = instance.datastore['type'] datastore_test_configs = CONFIG.get(datastore_type, {}) return datastore_test_configs.get("configurations", {}) @staticmethod def expected_default_datastore_configs(): """Returns the expected test configurations for the default datastore defined in the Test Config as dbaas_datastore. """ default_datastore = CONFIG.get('dbaas_datastore', None) datastore_test_configs = CONFIG.get(default_datastore, {}) return datastore_test_configs.get("configurations", {}) @test(depends_on_groups=[tests.DBAAS_API_BACKUPS], groups=[tests.DBAAS_API_CONFIGURATIONS]) class CreateConfigurations(ConfigurationsTestBase): @test def test_expected_configurations_parameters(self): """Test get expected configurations parameters.""" allowed_attrs = ["configuration-parameters"] instance_info.dbaas.configuration_parameters.parameters( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version) resp, body = instance_info.dbaas.client.last_response attrcheck = AttrCheck() config_parameters_dict = json.loads(body.decode()) attrcheck.contains_allowed_attrs( config_parameters_dict, allowed_attrs, msg="Configurations parameters") # sanity check that a few options are in the list config_params_list = config_parameters_dict['configuration-parameters'] config_param_keys = [] for param in config_params_list: config_param_keys.append(param['name']) expected_configs = self.expected_default_datastore_configs() expected_config_params = expected_configs.get('parameters_list') # check for duplicate configuration parameters msg = "check for duplicate configuration parameters" assert_equal(len(config_param_keys), len(set(config_param_keys)), msg) for expected_config_item in expected_config_params: assert_true(expected_config_item in config_param_keys) @test def test_expected_get_configuration_parameter(self): # tests get on a single parameter to verify it has expected attributes param_name = 'key_buffer_size' allowed_config_params = ['name', 'restart_required', 'max', 'min', 'type', 'deleted', 'deleted_at', 'datastore_version_id'] param = instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, param_name) resp, body = instance_info.dbaas.client.last_response print("params: %s" % param) print("resp: %s" % resp) print("body: %s" % body) attrcheck = AttrCheck() config_parameter_dict = json.loads(body.decode()) print("config_parameter_dict: %s" % config_parameter_dict) attrcheck.contains_allowed_attrs( config_parameter_dict, allowed_config_params, msg="Get Configuration parameter") assert_equal(param_name, config_parameter_dict['name']) with TypeCheck('ConfigurationParameter', param) as parameter: parameter.has_field('name', six.string_types) parameter.has_field('restart_required', bool) parameter.has_field('max', six.integer_types) parameter.has_field('min', six.integer_types) parameter.has_field('type', six.string_types) parameter.has_field('datastore_version_id', six.text_type) @test def test_configurations_create_invalid_values(self): """Test create configurations with invalid values.""" values = '{"this_is_invalid": 123}' try: instance_info.dbaas.configurations.create( CONFIG_NAME, values, CONFIG_DESC) except exceptions.UnprocessableEntity: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 422) @test def test_configurations_create_invalid_value_type(self): """Test create configuration with invalid value type.""" values = '{"key_buffer_size": "this is a string not int"}' assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) @test def test_configurations_create_value_out_of_bounds(self): """Test create configuration with value out of bounds.""" expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('out_of_bounds_over')) assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) values = json.dumps(expected_configs.get('out_of_bounds_under')) assert_unprocessable(instance_info.dbaas.configurations.create, CONFIG_NAME, values, CONFIG_DESC) @test def test_valid_configurations_create(self): # create a configuration with valid parameters expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('valid_values')) expected_values = json.loads(values) result = instance_info.dbaas.configurations.create( CONFIG_NAME, values, CONFIG_DESC, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) with TypeCheck('Configuration', result) as configuration: configuration.has_field('name', six.string_types) configuration.has_field('description', six.string_types) configuration.has_field('values', dict) configuration.has_field('datastore_name', six.string_types) configuration.has_field('datastore_version_id', six.text_type) configuration.has_field('datastore_version_name', six.string_types) global configuration_info configuration_info = result assert_equal(configuration_info.name, CONFIG_NAME) assert_equal(configuration_info.description, CONFIG_DESC) assert_equal(configuration_info.values, expected_values) @test(runs_after=[test_valid_configurations_create]) def test_appending_to_existing_configuration(self): # test being able to update and insert new parameter name and values # to an existing configuration expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('appending_values')) # ensure updated timestamp is different than created if not CONFIG.fake_mode: sleep(1) instance_info.dbaas.configurations.edit(configuration_info.id, values) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) @test(depends_on_classes=[CreateConfigurations], groups=[tests.DBAAS_API_CONFIGURATIONS]) class AfterConfigurationsCreation(ConfigurationsTestBase): @test def test_assign_configuration_to_invalid_instance(self): """test assigning to an instance that does not exist""" invalid_id = "invalid-inst-id" try: instance_info.dbaas.instances.modify(invalid_id, configuration_info.id) except exceptions.NotFound: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 404) @test def test_assign_configuration_to_valid_instance(self): """test assigning a configuration to an instance""" print("instance_info.id: %s" % instance_info.id) print("configuration_info: %s" % configuration_info) print("configuration_info.id: %s" % configuration_info.id) config_id = configuration_info.id instance_info.dbaas.instances.modify(instance_info.id, configuration=config_id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) @test(depends_on=[test_assign_configuration_to_valid_instance]) def test_assign_configuration_to_instance_with_config(self): """test assigning a configuration to an instance conflicts""" config_id = configuration_info.id assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.modify, instance_info.id, configuration=config_id) @test(depends_on=[test_assign_configuration_to_valid_instance]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): """validate the configuration after attaching""" print("instance_info.id: %s" % instance_info.id) inst = instance_info.dbaas.instances.get(instance_info.id) configuration_id = inst.configuration['id'] print("configuration_info: %s" % configuration_id) assert_not_equal(None, configuration_id) _test_configuration_is_applied_to_instance(instance_info, configuration_id) @test(depends_on=[test_get_configuration_details_from_instance_validation]) def test_configurations_get(self): """test that the instance shows up on the assigned configuration""" result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(configuration_info.id, result.id) assert_equal(configuration_info.name, result.name) assert_equal(configuration_info.description, result.description) # check the result field types with TypeCheck("configuration", result) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("description", six.string_types) check.has_field("values", dict) check.has_field("created", six.string_types) check.has_field("updated", six.string_types) check.has_field("instance_count", int) print(result.values) # check for valid timestamps assert_true(_is_valid_timestamp(result.created)) assert_true(_is_valid_timestamp(result.updated)) # check that created and updated timestamps differ, since # test_appending_to_existing_configuration should have changed the # updated timestamp if not CONFIG.fake_mode: assert_not_equal(result.created, result.updated) assert_equal(result.instance_count, 1) with CollectionCheck("configuration_values", result.values) as check: # check each item has the correct type according to the rules for (item_key, item_val) in result.values.items(): print("item_key: %s" % item_key) print("item_val: %s" % item_val) dbaas = instance_info.dbaas param = dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, item_key) if param.type == 'integer': check.has_element(item_key, int) if param.type == 'string': check.has_element(item_key, six.string_types) if param.type == 'boolean': check.has_element(item_key, bool) # Test to make sure that another user is not able to GET this config reqs = Requirements(is_admin=False) test_auth_user = instance_info.user.auth_user other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user]) other_user_tenant_id = other_user.tenant_id client_tenant_id = instance_info.user.tenant_id if other_user_tenant_id == client_tenant_id: other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user, other_user]) print(other_user) print(other_user.__dict__) other_client = create_dbaas_client(other_user) assert_raises(exceptions.NotFound, other_client.configurations.get, configuration_info.id) @test(depends_on_classes=[AfterConfigurationsCreation], groups=[tests.DBAAS_API_CONFIGURATIONS]) class ListConfigurations(ConfigurationsTestBase): @test def test_configurations_list(self): # test listing configurations show up result = instance_info.dbaas.configurations.list() for conf in result: with TypeCheck("Configuration", conf) as check: check.has_field('id', six.string_types) check.has_field('name', six.string_types) check.has_field('description', six.string_types) check.has_field('datastore_version_id', six.string_types) check.has_field('datastore_version_name', six.string_types) check.has_field('datastore_name', six.string_types) exists = [config for config in result if config.id == configuration_info.id] assert_equal(1, len(exists)) configuration = exists[0] assert_equal(configuration.id, configuration_info.id) assert_equal(configuration.name, configuration_info.name) assert_equal(configuration.description, configuration_info.description) @test def test_configurations_list_for_instance(self): # test getting an instance shows the configuration assigned shows up instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal(instance.configuration['id'], configuration_info.id) assert_equal(instance.configuration['name'], configuration_info.name) # expecting two things in links, href and bookmark assert_equal(2, len(instance.configuration['links'])) link = instance.configuration['links'][0] global configuration_href configuration_href = link['href'] @test def test_get_default_configuration_on_instance(self): # test the api call to get the default template of an instance exists result = instance_info.dbaas.instances.configuration(instance_info.id) global configuration_default configuration_default = result assert_not_equal(None, result.configuration) @test def test_changing_configuration_with_nondynamic_parameter(self): # test that changing a non-dynamic parameter is applied to instance # and show that the instance requires a restart expected_configs = self.expected_default_datastore_configs() values = json.dumps(expected_configs.get('nondynamic_parameter')) instance_info.dbaas.configurations.update(configuration_info.id, values) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.configurations.get(configuration_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) @test(depends_on=[test_changing_configuration_with_nondynamic_parameter]) @time_out(20) def test_waiting_for_instance_in_restart_required(self): def result_is_not_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status in CONFIG.running_status: return False else: return True poll_until(result_is_not_active) instance = instance_info.dbaas.instances.get(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_waiting_for_instance_in_restart_required]) def test_restart_service_should_return_active(self): # test that after restarting the instance it becomes active instance_info.dbaas.instances.restart(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status in CONFIG.running_status: return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_restart_service_should_return_active]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): # validate that the configuraiton was applied correctly to the instance inst = instance_info.dbaas.instances.get(instance_info.id) configuration_id = inst.configuration['id'] assert_not_equal(None, inst.configuration['id']) _test_configuration_is_applied_to_instance(instance_info, configuration_id) @test(depends_on=[test_configurations_list]) def test_compare_list_and_details_timestamps(self): # compare config timestamps between list and details calls result = instance_info.dbaas.configurations.list() list_config = [config for config in result if config.id == configuration_info.id] assert_equal(1, len(list_config)) details_config = instance_info.dbaas.configurations.get( configuration_info.id) assert_equal(list_config[0].created, details_config.created) assert_equal(list_config[0].updated, details_config.updated) @test(depends_on_classes=[ListConfigurations], groups=[tests.DBAAS_API_CONFIGURATIONS]) class StartInstanceWithConfiguration(ConfigurationsTestBase): @test def test_start_instance_with_configuration(self): """test that a new instance will apply the configuration on create""" global configuration_instance databases = [] databases.append({"name": "firstdbconfig", "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": "db2"}) configuration_instance.databases = databases users = [] users.append({"name": "liteconf", "password": "liteconfpass", "databases": [{"name": "firstdbconfig"}]}) configuration_instance.users = users configuration_instance.name = "TEST_" + str(uuid.uuid4()) + "_config" flavor_href = instance_info.dbaas_flavor_href configuration_instance.dbaas_flavor_href = flavor_href configuration_instance.volume = instance_info.volume configuration_instance.dbaas_datastore = instance_info.dbaas_datastore configuration_instance.dbaas_datastore_version = \ instance_info.dbaas_datastore_version configuration_instance.nics = instance_info.nics result = instance_info.dbaas.instances.create( configuration_instance.name, configuration_instance.dbaas_flavor_href, configuration_instance.volume, configuration_instance.databases, configuration_instance.users, nics=configuration_instance.nics, availability_zone="nova", datastore=configuration_instance.dbaas_datastore, datastore_version=configuration_instance.dbaas_datastore_version, configuration=configuration_href) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal("BUILD", result.status) configuration_instance.id = result.id @test(depends_on_classes=[StartInstanceWithConfiguration], groups=[tests.DBAAS_API_CONFIGURATIONS]) class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase): @test @time_out(TIMEOUT_INSTANCE_CREATE) def test_instance_with_configuration_active(self): """wait for the instance created with configuration""" def result_is_active(): instance = instance_info.dbaas.instances.get( configuration_instance.id) if instance.status in CONFIG.running_status: return True else: assert_equal("BUILD", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_instance_with_configuration_active]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): """Test configuration is applied correctly to the instance.""" inst = instance_info.dbaas.instances.get(configuration_instance.id) configuration_id = inst.configuration['id'] assert_not_equal(None, configuration_id) _test_configuration_is_applied_to_instance(configuration_instance, configuration_id) @test(depends_on=[WaitForConfigurationInstanceToFinish], groups=[tests.DBAAS_API_CONFIGURATIONS]) class DeleteConfigurations(ConfigurationsTestBase): @before_class def setUp(self): # need to store the parameter details that will be deleted config_param_name = sql_variables[1] instance_info.dbaas.configuration_parameters.get_parameter( instance_info.dbaas_datastore, instance_info.dbaas_datastore_version, config_param_name) resp, body = instance_info.dbaas.client.last_response print(resp) print(body) self.config_parameter_dict = json.loads(body.decode()) @after_class(always_run=True) def tearDown(self): # need to "undelete" the parameter that was deleted from the mgmt call ds = instance_info.dbaas_datastore ds_v = instance_info.dbaas_datastore_version version = instance_info.dbaas.datastore_versions.get( ds, ds_v) client = instance_info.dbaas_admin.mgmt_configs print(self.config_parameter_dict) client.create(version.id, self.config_parameter_dict['name'], self.config_parameter_dict['restart_required'], self.config_parameter_dict['type'], self.config_parameter_dict['max'], self.config_parameter_dict['min']) @test def test_delete_invalid_configuration_not_found(self): # test deleting a configuration that does not exist throws exception invalid_configuration_id = "invalid-config-id" assert_raises(exceptions.NotFound, instance_info.dbaas.configurations.delete, invalid_configuration_id) @test(depends_on=[test_delete_invalid_configuration_not_found]) def test_delete_configuration_parameter_with_mgmt_api(self): # testing a param that is assigned to an instance can be deleted # and doesn't affect an unassign later. So we delete a parameter # that is used by a test (connect_timeout) ds = instance_info.dbaas_datastore ds_v = instance_info.dbaas_datastore_version version = instance_info.dbaas.datastore_versions.get( ds, ds_v) client = instance_info.dbaas_admin.mgmt_configs config_param_name = self.config_parameter_dict['name'] client.delete(version.id, config_param_name) assert_raises( exceptions.NotFound, instance_info.dbaas.configuration_parameters.get_parameter, ds, ds_v, config_param_name) @test(depends_on=[test_delete_configuration_parameter_with_mgmt_api]) def test_unable_delete_instance_configurations(self): # test deleting a configuration that is assigned to # an instance is not allowed. assert_raises(exceptions.BadRequest, instance_info.dbaas.configurations.delete, configuration_info.id) @test(depends_on=[test_unable_delete_instance_configurations]) @time_out(30) def test_unassign_configuration_from_instances(self): """test to unassign configuration from instance""" instance_info.dbaas.instances.modify(configuration_instance.id, configuration="") resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.instances.get(configuration_instance.id) # test that config group is not removed instance_info.dbaas.instances.modify(instance_info.id, configuration=None) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) instance_info.dbaas.instances.get(instance_info.id) def result_has_no_configuration(): instance = instance_info.dbaas.instances.get(inst_info.id) if hasattr(instance, 'configuration'): return False else: return True inst_info = instance_info poll_until(result_has_no_configuration) inst_info = configuration_instance poll_until(result_has_no_configuration) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_unassign_configuration_from_instances]) def test_assign_in_wrong_state(self): # test assigning a config to an instance in RESTART state assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.modify, configuration_instance.id, configuration=configuration_info.id) @test(depends_on=[test_assign_in_wrong_state]) def test_no_instances_on_configuration(self): """test_no_instances_on_configuration""" result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(configuration_info.id, result.id) assert_equal(configuration_info.name, result.name) assert_equal(configuration_info.description, result.description) assert_equal(result.instance_count, 0) print(configuration_instance.id) print(instance_info.id) @test(depends_on=[test_unassign_configuration_from_instances]) @time_out(120) def test_restart_service_after_unassign_return_active(self): """test_restart_service_after_unassign_return_active""" def result_is_not_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status in CONFIG.running_status: return False else: return True poll_until(result_is_not_active) config = instance_info.dbaas.configurations.list() print(config) instance = instance_info.dbaas.instances.get(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 200) assert_equal('RESTART_REQUIRED', instance.status) @test(depends_on=[test_restart_service_after_unassign_return_active]) @time_out(120) def test_restart_service_should_return_active(self): """test that after restarting the instance it becomes active""" instance_info.dbaas.instances.restart(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status in CONFIG.running_status: return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) @test(depends_on=[test_restart_service_should_return_active]) def test_assign_config_and_name_to_instance_using_patch(self): """test_assign_config_and_name_to_instance_using_patch""" new_name = 'new_name' report = CONFIG.get_report() report.log("instance_info.id: %s" % instance_info.id) report.log("configuration_info: %s" % configuration_info) report.log("configuration_info.id: %s" % configuration_info.id) report.log("instance name:%s" % instance_info.name) report.log("instance new name:%s" % new_name) saved_name = instance_info.name config_id = configuration_info.id instance_info.dbaas.instances.edit(instance_info.id, configuration=config_id, name=new_name) assert_equal(202, instance_info.dbaas.last_http_code) check = instance_info.dbaas.instances.get(instance_info.id) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal(check.name, new_name) # restore instance name instance_info.dbaas.instances.edit(instance_info.id, name=saved_name) assert_equal(202, instance_info.dbaas.last_http_code) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) # restart to be sure configuration is applied instance_info.dbaas.instances.restart(instance_info.id) assert_equal(202, instance_info.dbaas.last_http_code) sleep(2) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status in CONFIG.running_status: return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) # test assigning a configuration to an instance that # already has an assigned configuration with patch config_id = configuration_info.id assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.edit, instance_info.id, configuration=config_id) @test(runs_after=[test_assign_config_and_name_to_instance_using_patch]) def test_unassign_configuration_after_patch(self): """Remove the configuration from the instance""" instance_info.dbaas.instances.edit(instance_info.id, remove_configuration=True) assert_equal(202, instance_info.dbaas.last_http_code) instance = instance_info.dbaas.instances.get(instance_info.id) assert_equal('RESTART_REQUIRED', instance.status) # restart to be sure configuration has been unassigned instance_info.dbaas.instances.restart(instance_info.id) assert_equal(202, instance_info.dbaas.last_http_code) sleep(2) def result_is_active(): instance = instance_info.dbaas.instances.get( instance_info.id) if instance.status in CONFIG.running_status: return True else: assert_equal("REBOOT", instance.status) return False poll_until(result_is_active) result = instance_info.dbaas.configurations.get(configuration_info.id) assert_equal(result.instance_count, 0) @test def test_unassign_configuration_from_invalid_instance_using_patch(self): # test unassign config group from an invalid instance invalid_id = "invalid-inst-id" try: instance_info.dbaas.instances.edit(invalid_id, remove_configuration=True) except exceptions.NotFound: resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 404) @test(runs_after=[test_unassign_configuration_after_patch]) def test_delete_unassigned_configuration(self): """test_delete_unassigned_configuration""" instance_info.dbaas.configurations.delete(configuration_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) @test(depends_on=[test_delete_unassigned_configuration]) @time_out(TIMEOUT_INSTANCE_DELETE) def test_delete_configuration_instance(self): """test_delete_configuration_instance""" instance_info.dbaas.instances.delete(configuration_instance.id) assert_equal(202, instance_info.dbaas.last_http_code) def instance_is_gone(): try: instance_info.dbaas.instances.get(configuration_instance.id) return False except exceptions.NotFound: return True poll_until(instance_is_gone) assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get, configuration_instance.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/databases.py0000644000175000017500000001665400000000000021536 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.instances import instance_info from trove.tests import util from trove.tests.util import test_config FAKE = test_config.values['fake_mode'] @test(depends_on_groups=[tests.DBAAS_API_USERS_ACCESS], groups=[tests.DBAAS_API_DATABASES]) class TestDatabases(object): """Test the creation and deletion of additional MySQL databases""" dbname = "third #?@some_-" dbname_urlencoded = "third%20%23%3F%40some_-" dbname2 = "seconddb" created_dbs = [dbname, dbname2] system_dbs = ['information_schema', 'mysql', 'lost+found'] @before_class def setUp(self): self.dbaas = util.create_dbaas_client(instance_info.user) self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user) @test def test_cannot_create_taboo_database_names(self): for name in self.system_dbs: databases = [{"name": name, "character_set": "latin2", "collate": "latin2_general_ci"}] assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_create_database(self): databases = [] databases.append({"name": self.dbname, "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": self.dbname2}) self.dbaas.databases.create(instance_info.id, databases) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) @test(depends_on=[test_create_database]) def test_create_database_list(self): databases = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = False for db in self.created_dbs: for result in databases: if result.name == db: found = True assert_true(found, "Database '%s' not found in result" % db) found = False @test(depends_on=[test_create_database]) def test_fails_when_creating_a_db_twice(self): databases = [] databases.append({"name": self.dbname, "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": self.dbname2}) assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_create_database_list_system(self): # Databases that should not be returned in the list databases = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = False for db in self.system_dbs: found = any(result.name == db for result in databases) msg = "Database '%s' SHOULD NOT be found in result" % db assert_false(found, msg) found = False @test def test_create_database_on_missing_instance(self): databases = [{"name": "invalid_db", "character_set": "latin2", "collate": "latin2_general_ci"}] assert_raises(exceptions.NotFound, self.dbaas.databases.create, -1, databases) assert_equal(404, self.dbaas.last_http_code) @test(runs_after=[test_create_database]) def test_delete_database(self): self.dbaas.databases.delete(instance_info.id, self.dbname_urlencoded) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) dbs = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = any(result.name == self.dbname_urlencoded for result in dbs) assert_false(found, "Database '%s' SHOULD NOT be found in result" % self.dbname_urlencoded) @test(runs_after=[test_delete_database]) def test_cannot_delete_taboo_database_names(self): for name in self.system_dbs: assert_raises(exceptions.BadRequest, self.dbaas.databases.delete, instance_info.id, name) assert_equal(400, self.dbaas.last_http_code) @test(runs_after=[test_delete_database]) def test_delete_database_on_missing_instance(self): assert_raises(exceptions.NotFound, self.dbaas.databases.delete, -1, self.dbname_urlencoded) assert_equal(404, self.dbaas.last_http_code) @test def test_database_name_too_long(self): databases = [] name = ("aasdlkhaglkjhakjdkjgfakjgadgfkajsg" "34523dfkljgasldkjfglkjadsgflkjagsdd") databases.append({"name": name}) assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_invalid_database_name(self): databases = [] databases.append({"name": "sdfsd,"}) assert_raises(exceptions.BadRequest, self.dbaas.databases.create, instance_info.id, databases) assert_equal(400, self.dbaas.last_http_code) @test def test_pagination(self): databases = [] databases.append({"name": "Sprockets", "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": "Cogs"}) databases.append({"name": "Widgets"}) self.dbaas.databases.create(instance_info.id, databases) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) limit = 2 databases = self.dbaas.databases.list(instance_info.id, limit=limit) assert_equal(200, self.dbaas.last_http_code) marker = databases.next # Better get only as many as we asked for assert_true(len(databases) <= limit) assert_true(databases.next is not None) assert_equal(marker, databases[-1].name) marker = databases.next # I better get new databases if I use the marker I was handed. databases = self.dbaas.databases.list(instance_info.id, limit=limit, marker=marker) assert_equal(200, self.dbaas.last_http_code) assert_true(marker not in [database.name for database in databases]) # Now fetch again with a larger limit. databases = self.dbaas.databases.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) assert_true(databases.next is None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/datastores.py0000644000175000017500000002024500000000000021747 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nose.tools import assert_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis import test import six from troveclient.compat import exceptions from trove import tests from trove.tests.util.check import TypeCheck from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements NAME = "nonexistent" @test(groups=[tests.DBAAS_API_DATASTORES], depends_on_groups=[tests.DBAAS_API_VERSIONS]) class Datastores(object): @before_class def setUp(self): rd_user = test_config.users.find_user( Requirements(is_admin=False, services=["trove"])) rd_admin = test_config.users.find_user( Requirements(is_admin=True, services=["trove"])) self.rd_client = create_dbaas_client(rd_user) self.rd_admin = create_dbaas_client(rd_admin) @test def test_datastore_list_attrs(self): datastores = self.rd_client.datastores.list() for datastore in datastores: with TypeCheck('Datastore', datastore) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("links", list) check.has_field("versions", list) @test def test_datastore_get(self): # Test get by name datastore_by_name = self.rd_client.datastores.get( test_config.dbaas_datastore) with TypeCheck('Datastore', datastore_by_name) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("links", list) assert_equal(datastore_by_name.name, test_config.dbaas_datastore) # test get by id datastore_by_id = self.rd_client.datastores.get( datastore_by_name.id) with TypeCheck('Datastore', datastore_by_id) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("links", list) check.has_field("versions", list) assert_equal(datastore_by_id.id, datastore_by_name.id) @test def test_datastore_not_found(self): try: assert_raises(exceptions.NotFound, self.rd_client.datastores.get, NAME) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore '%s' cannot be found." % NAME) @test def test_create_inactive_datastore_by_admin(self): datastore = self.rd_client.datastores.get(test_config.dbaas_datastore) ds_version = self.rd_client.datastore_versions.list(datastore.id)[0] ds_version_info = self.rd_admin.datastore_versions.get_by_uuid( ds_version.id) # Create datastore version for testing # 'Test_Datastore_1' is also used in other test cases. # Will be deleted in test_delete_datastore_version self.rd_admin.mgmt_datastore_versions.create( "inactive_version", test_config.dbaas_datastore_name_no_versions, "test_manager", ds_version_info.image, active='false', default='false' ) @test(depends_on=[test_create_inactive_datastore_by_admin]) def test_datastore_with_no_active_versions_is_hidden(self): datastores = self.rd_client.datastores.list() name_list = [datastore.name for datastore in datastores] assert_true( test_config.dbaas_datastore_name_no_versions not in name_list) @test(depends_on=[test_create_inactive_datastore_by_admin]) def test_datastore_with_no_active_versions_is_visible_for_admin(self): datastores = self.rd_admin.datastores.list() name_list = [datastore.name for datastore in datastores] assert_true(test_config.dbaas_datastore_name_no_versions in name_list) @test(groups=[tests.DBAAS_API_DATASTORES]) class DatastoreVersions(object): @before_class def setUp(self): rd_user = test_config.users.find_user( Requirements(is_admin=False, services=["trove"])) self.rd_client = create_dbaas_client(rd_user) self.datastore_active = self.rd_client.datastores.get( test_config.dbaas_datastore) self.datastore_version_active = self.rd_client.datastore_versions.list( self.datastore_active.id)[0] @test def test_datastore_version_list_attrs(self): versions = self.rd_client.datastore_versions.list( self.datastore_active.name) for version in versions: with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("links", list) @test def test_datastore_version_get_attrs(self): version = self.rd_client.datastore_versions.get( self.datastore_active.name, self.datastore_version_active.name) with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("datastore", six.string_types) check.has_field("links", list) assert_equal(version.name, self.datastore_version_active.name) @test def test_datastore_version_get_by_uuid_attrs(self): version = self.rd_client.datastore_versions.get_by_uuid( self.datastore_version_active.id) with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("datastore", six.string_types) check.has_field("links", list) assert_equal(version.name, self.datastore_version_active.name) @test def test_datastore_version_not_found(self): try: assert_raises(exceptions.NotFound, self.rd_client.datastore_versions.get, self.datastore_active.name, NAME) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore version '%s' cannot be found." % NAME) @test def test_datastore_version_list_by_uuid(self): versions = self.rd_client.datastore_versions.list( self.datastore_active.id) for version in versions: with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("links", list) @test def test_datastore_version_get_by_uuid(self): version = self.rd_client.datastore_versions.get( self.datastore_active.id, self.datastore_version_active.id) with TypeCheck('DatastoreVersion', version) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("datastore", six.string_types) check.has_field("links", list) assert_equal(version.name, self.datastore_version_active.name) @test def test_datastore_version_invalid_uuid(self): try: self.rd_client.datastore_versions.get_by_uuid( self.datastore_version_active.id) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore version '%s' cannot be found." % test_config.dbaas_datastore_version) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/instances.py0000644000175000017500000012545700000000000021600 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import os import time import unittest import uuid from proboscis import asserts from proboscis.asserts import assert_equal from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import fail from proboscis import before_class from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common import cfg from trove.common.utils import poll_until from trove.datastore import models as datastore_models from trove import tests from trove.tests.config import CONFIG from trove.tests.util.check import AttrCheck from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.usage import create_usage_verifier from trove.tests.util.users import Requirements CONF = cfg.CONF FAKE = test_config.values['fake_mode'] TIMEOUT_INSTANCE_CREATE = 60 * 32 TIMEOUT_INSTANCE_DELETE = 120 class InstanceTestInfo(object): """Stores new instance information used by dependent tests.""" def __init__(self): self.dbaas = None # The rich client instance used by these tests. self.dbaas_admin = None # The rich client with admin access. self.dbaas_flavor = None # The flavor object of the instance. self.dbaas_flavor_href = None # The flavor of the instance. self.dbaas_datastore = None # The datastore id self.dbaas_datastore_version = None # The datastore version id self.id = None # The ID of the instance in the database. self.local_id = None # The IP address of the database instance for the user. self.address = None # The management network IP address. self.mgmt_address = None self.nics = None # The dict of type/id for nics used on the instance. shared_network = CONFIG.get('shared_network', None) if shared_network: self.nics = [{'net-id': shared_network}] self.initial_result = None # The initial result from the create call. self.result = None # The instance info returned by the API self.nova_client = None # The instance of novaclient. self.volume_client = None # The instance of the volume client. self.name = None # Test name, generated each test run. self.pid = None # The process ID of the instance. self.user = None # The user instance who owns the instance. self.admin_user = None # The admin user for the management interfaces. self.volume = None # The volume the instance will have. self.volume_id = None # Id for the attached vo186lume self.storage = None # The storage device info for the volumes. self.databases = None # The databases created on the instance. self.host_info = None # Host Info before creating instances self.user_context = None # A regular user context self.users = None # The users created on the instance. self.consumer = create_usage_verifier() def find_default_flavor(self): if EPHEMERAL_SUPPORT: flavor_name = CONFIG.values.get('instance_eph_flavor_name', 'eph.rd-tiny') else: flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny') flavors = self.dbaas.find_flavors_by_name(flavor_name) assert_equal(len(flavors), 1, "Number of flavors with name '%s' " "found was '%d'." % (flavor_name, len(flavors))) flavor = flavors[0] flavor_href = self.dbaas.find_flavor_self_href(flavor) assert_true(flavor_href is not None, "Flavor href '%s' not found!" % flavor_name) return flavor, flavor_href def get_address(self, mgmt=False): if mgmt: if self.mgmt_address: return self.mgmt_address mgmt_netname = test_config.get("trove_mgmt_network", "trove-mgmt") result = self.dbaas_admin.mgmt.instances.show(self.id) mgmt_interfaces = result.server['addresses'].get(mgmt_netname, []) mgmt_addresses = [str(inf["addr"]) for inf in mgmt_interfaces if inf["version"] == 4] if len(mgmt_addresses) == 0: fail("No IPV4 ip found for management network.") self.mgmt_address = mgmt_addresses[0] return self.mgmt_address else: if self.address: return self.address result = self.dbaas.instances.get(self.id) addresses = [str(ip) for ip in result.ip if netaddr.valid_ipv4(ip)] if len(addresses) == 0: fail("No IPV4 ip found for database network.") self.address = addresses[0] return self.address def get_local_id(self): mgmt_instance = self.dbaas_admin.management.show(self.id) return mgmt_instance.server["local_id"] def get_volume_filesystem_size(self): mgmt_instance = self.dbaas_admin.management.show(self.id) return mgmt_instance.volume["total"] # The two variables are used below by tests which depend on an instance # existing. instance_info = InstanceTestInfo() dbaas = None # Rich client used throughout this test. dbaas_admin = None # Same as above, with admin privs. ROOT_ON_CREATE = CONFIG.get('root_on_create', False) VOLUME_SUPPORT = CONFIG.get('trove_volume_support', False) EPHEMERAL_SUPPORT = not VOLUME_SUPPORT and CONFIG.get('device_path', '/dev/vdb') is not None ROOT_PARTITION = not VOLUME_SUPPORT and CONFIG.get('device_path', None) is None # This is like a cheat code which allows the tests to skip creating a new # instance and use an old one. def existing_instance(): return os.environ.get("TESTS_USE_INSTANCE_ID", None) def create_new_instance(): return existing_instance() is None def assert_unprocessable(func, *args): try: func(*args) # If the exception didn't get raised, but the instance is still in # the BUILDING state, that's a bug. result = dbaas.instances.get(instance_info.id) if result.status == "BUILD": fail("When an instance is being built, this function should " "always raise UnprocessableEntity.") except exceptions.UnprocessableEntity: assert_equal(422, dbaas.last_http_code) pass # Good class CheckInstance(AttrCheck): """Class to check various attributes of Instance details.""" def __init__(self, instance): super(CheckInstance, self).__init__() self.instance = instance def flavor(self): if 'flavor' not in self.instance: self.fail("'flavor' not found in instance.") else: allowed_attrs = ['id', 'links'] self.contains_allowed_attrs( self.instance['flavor'], allowed_attrs, msg="Flavor") self.links(self.instance['flavor']['links']) def datastore(self): if 'datastore' not in self.instance: self.fail("'datastore' not found in instance.") else: allowed_attrs = ['type', 'version'] self.contains_allowed_attrs( self.instance['datastore'], allowed_attrs, msg="datastore") def volume_key_exists(self): if 'volume' not in self.instance: self.fail("'volume' not found in instance.") return False return True def volume(self): if not VOLUME_SUPPORT: return if self.volume_key_exists(): allowed_attrs = ['size'] if not create_new_instance(): allowed_attrs.append('used') self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def used_volume(self): if not VOLUME_SUPPORT: return if self.volume_key_exists(): allowed_attrs = ['size', 'used'] print(self.instance) self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def volume_mgmt(self): if not VOLUME_SUPPORT: return if self.volume_key_exists(): allowed_attrs = ['description', 'id', 'name', 'size'] self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def addresses(self): allowed_attrs = ['addr', 'version'] print(self.instance) networks = ['usernet'] for network in networks: for address in self.instance['addresses'][network]: self.contains_allowed_attrs( address, allowed_attrs, msg="Address") def guest_status(self): allowed_attrs = ['created_at', 'deleted', 'deleted_at', 'instance_id', 'state', 'state_description', 'updated_at'] self.contains_allowed_attrs( self.instance['guest_status'], allowed_attrs, msg="Guest status") def mgmt_volume(self): if not VOLUME_SUPPORT: return allowed_attrs = ['description', 'id', 'name', 'size'] self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volume") def replica_of(self): if 'replica_of' not in self.instance: self.fail("'replica_of' not found in instance.") else: allowed_attrs = ['id', 'links'] self.contains_allowed_attrs( self.instance['replica_of'], allowed_attrs, msg="Replica-of links not found") self.links(self.instance['replica_of']['links']) def slaves(self): if 'replicas' not in self.instance: self.fail("'replicas' not found in instance.") else: allowed_attrs = ['id', 'links'] for slave in self.instance['replicas']: self.contains_allowed_attrs( slave, allowed_attrs, msg="Replica links not found") self.links(slave['links']) @test(groups=[tests.DBAAS_API_INSTANCES], depends_on_groups=[tests.DBAAS_API_MGMT_DATASTORES]) class TestInstanceSetup(object): """Prepare the instance creation parameters.""" @before_class def setUp(self): """Sets up the client.""" reqs = Requirements(is_admin=True) instance_info.admin_user = CONFIG.users.find_user(reqs) instance_info.dbaas_admin = create_dbaas_client( instance_info.admin_user) global dbaas_admin dbaas_admin = instance_info.dbaas_admin # Make sure we create the client as the correct user if we're using # a pre-built instance. if existing_instance(): mgmt_inst = dbaas_admin.mgmt.instances.show(existing_instance()) t_id = mgmt_inst.tenant_id instance_info.user = CONFIG.users.find_user_by_tenant_id(t_id) else: reqs = Requirements(is_admin=False) instance_info.user = CONFIG.users.find_user(reqs) instance_info.dbaas = create_dbaas_client(instance_info.user) global dbaas dbaas = instance_info.dbaas @test def test_find_flavor(self): flavor, flavor_href = instance_info.find_default_flavor() instance_info.dbaas_flavor = flavor instance_info.dbaas_flavor_href = flavor_href @test def create_instance_name(self): id = existing_instance() if id is None: instance_info.name = "TEST_" + str(uuid.uuid4()) else: instance_info.name = dbaas.instances.get(id).name @test(groups=[tests.DBAAS_API_INSTANCES], depends_on_classes=[TestInstanceSetup]) class TestCreateInstanceQuota(unittest.TestCase): def tearDown(self): quota_dict = {'instances': CONFIG.trove_max_instances_per_tenant, 'volumes': CONFIG.trove_max_volumes_per_tenant} dbaas_admin.quota.update(instance_info.user.tenant_id, quota_dict) def test_instance_size_too_big(self): if ('trove_max_accepted_volume_size' in CONFIG.values and VOLUME_SUPPORT): too_big = CONFIG.trove_max_accepted_volume_size assert_raises(exceptions.OverLimit, dbaas.instances.create, "volume_size_too_large", instance_info.dbaas_flavor_href, {'size': too_big + 1}, nics=instance_info.nics) def test_update_quota_invalid_resource_should_fail(self): quota_dict = {'invalid_resource': 100} assert_raises(exceptions.NotFound, dbaas_admin.quota.update, instance_info.user.tenant_id, quota_dict) def test_update_quota_volume_should_fail_volume_not_supported(self): if VOLUME_SUPPORT: raise SkipTest("Volume support needs to be disabled") quota_dict = {'volumes': 100} assert_raises(exceptions.NotFound, dbaas_admin.quota.update, instance_info.user.tenant_id, quota_dict) def test_create_too_many_instances(self): instance_quota = 0 quota_dict = {'instances': instance_quota} new_quotas = dbaas_admin.quota.update(instance_info.user.tenant_id, quota_dict) set_quota = dbaas_admin.quota.show(instance_info.user.tenant_id) verify_quota = {q.resource: q.limit for q in set_quota} assert_equal(new_quotas['instances'], quota_dict['instances']) assert_equal(0, verify_quota['instances']) volume = None if VOLUME_SUPPORT: assert_equal(CONFIG.trove_max_volumes_per_tenant, verify_quota['volumes']) volume = {'size': CONFIG.get('trove_volume_size', 1)} assert_raises(exceptions.OverLimit, dbaas.instances.create, "too_many_instances", instance_info.dbaas_flavor_href, volume, nics=instance_info.nics) assert_equal(413, dbaas.last_http_code) def test_create_instances_total_volume_exceeded(self): if not VOLUME_SUPPORT: raise SkipTest("Volume support not enabled") volume_quota = 3 quota_dict = {'volumes': volume_quota} new_quotas = dbaas_admin.quota.update(instance_info.user.tenant_id, quota_dict) assert_equal(volume_quota, new_quotas['volumes']) assert_raises(exceptions.OverLimit, dbaas.instances.create, "too_large_volume", instance_info.dbaas_flavor_href, {'size': volume_quota + 1}, nics=instance_info.nics) assert_equal(413, dbaas.last_http_code) @test(groups=[tests.DBAAS_API_INSTANCES], depends_on_classes=[TestCreateInstanceQuota]) class CreateInstanceFail(object): """Negative instance creation tests.""" def instance_in_error(self, instance_id): def check_if_error(): instance = dbaas.instances.get(instance_id) if instance.status == "ERROR": return True else: # The status should still be BUILD assert_equal("BUILD", instance.status) return False return check_if_error def delete_async(self, instance_id): dbaas.instances.delete(instance_id) while True: try: dbaas.instances.get(instance_id) except exceptions.NotFound: return True time.sleep(1) @test def test_create_with_bad_availability_zone(self): instance_name = "instance-failure-with-bad-az" if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None databases = [] result = dbaas.instances.create(instance_name, instance_info.dbaas_flavor_href, volume, databases, availability_zone="BAD_ZONE", nics=instance_info.nics) poll_until(self.instance_in_error(result.id), sleep_time=5, time_out=30) instance = dbaas.instances.get(result.id) assert_equal("ERROR", instance.status) self.delete_async(result.id) @test def test_create_with_invalid_net_id(self): instance_name = "instance-failure-with-invalid-net" if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None databases = [] bad_nic = [{"net-id": "1234"}] assert_raises( exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=bad_nic ) assert_equal(400, dbaas.last_http_code) @test def test_create_with_multiple_net_id(self): instance_name = "instance_failure_with_multiple_net_id" volume = {'size': CONFIG.get('trove_volume_size', 1)} databases = [] multi_nics = [ {"net-id": str(uuid.uuid4())}, {"net-id": str(uuid.uuid4())} ] assert_raises( exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=multi_nics ) assert_equal(400, dbaas.last_http_code) @test def test_create_with_port_id(self): instance_name = "instance-failure-with-port-id" if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None databases = [] bad_nic = [{"port-id": "1234"}] assert_raises( exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=bad_nic ) assert_equal(400, dbaas.last_http_code) @test def test_create_failure_with_empty_flavor(self): instance_name = "instance-failure-with-empty-flavor" databases = [] if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, '', volume, databases, nics=instance_info.nics) assert_equal(400, dbaas.last_http_code) @test(enabled=VOLUME_SUPPORT) def test_create_failure_with_empty_volume(self): instance_name = "instance-failure-with-no-volume-size" databases = [] volume = {} assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=instance_info.nics) assert_equal(400, dbaas.last_http_code) @test(enabled=VOLUME_SUPPORT) def test_create_failure_with_no_volume_size(self): instance_name = "instance-failure-with-no-volume-size" databases = [] volume = {'size': None} assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=instance_info.nics) assert_equal(400, dbaas.last_http_code) @test(enabled=not VOLUME_SUPPORT) def test_create_failure_with_volume_size_and_volume_disabled(self): instance_name = "instance-failure-volume-size_and_volume_disabled" databases = [] volume = {'size': 2} assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=instance_info.nics) assert_equal(501, dbaas.last_http_code) @test(enabled=EPHEMERAL_SUPPORT) def test_create_failure_with_no_ephemeral_flavor(self): instance_name = "instance-failure-with-no-ephemeral-flavor" databases = [] flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny') flavors = dbaas.find_flavors_by_name(flavor_name) assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, flavors[0].id, None, databases, nics=instance_info.nics) assert_equal(400, dbaas.last_http_code) @test def test_create_failure_with_no_name(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "" databases = [] assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=instance_info.nics) assert_equal(400, dbaas.last_http_code) @test def test_create_failure_with_spaces_for_name(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = " " databases = [] assert_raises(exceptions.BadRequest, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, nics=instance_info.nics) assert_equal(400, dbaas.last_http_code) @test def test_mgmt_get_instance_on_create(self): if CONFIG.test_mgmt: result = dbaas_admin.management.show(instance_info.id) allowed_attrs = ['account_id', 'addresses', 'created', 'databases', 'flavor', 'guest_status', 'host', 'hostname', 'id', 'name', 'datastore', 'server_state_description', 'status', 'updated', 'users', 'volume', 'root_enabled_at', 'root_enabled_by', 'fault', 'service_status_updated'] with CheckInstance(result._info) as check: check.contains_allowed_attrs( result._info, allowed_attrs, msg="Mgmt get instance") check.flavor() check.datastore() check.guest_status() @test def test_create_failure_with_datastore_default_not_defined(self): if not FAKE: raise SkipTest("This test only for fake mode.") if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_default_notfound" databases = [] users = [] origin_default_datastore = (datastore_models.CONF. default_datastore) datastore_models.CONF.default_datastore = "" try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, nics=instance_info.nics) except exceptions.BadRequest as e: assert_equal(e.message, "Please specify datastore. No default datastore " "is defined.") datastore_models.CONF.default_datastore = \ origin_default_datastore @test def test_create_failure_with_datastore_default_version_notfound(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_default_version_notfound" databases = [] users = [] datastore = CONFIG.dbaas_datastore_name_no_versions try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, datastore=datastore, nics=instance_info.nics) except exceptions.BadRequest as e: assert_equal(e.message, "Default version for datastore '%s' not found." % datastore) @test def test_create_failure_with_datastore_notfound(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_notfound" databases = [] users = [] datastore = "nonexistent" try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, datastore=datastore, nics=instance_info.nics) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore '%s' cannot be found." % datastore) @test def test_create_failure_with_datastore_version_notfound(self): if VOLUME_SUPPORT: volume = {'size': CONFIG.get('trove_volume_size', 1)} else: volume = None instance_name = "datastore_version_notfound" databases = [] users = [] datastore = CONFIG.dbaas_datastore datastore_version = "nonexistent" try: assert_raises(exceptions.NotFound, dbaas.instances.create, instance_name, instance_info.dbaas_flavor_href, volume, databases, users, datastore=datastore, datastore_version=datastore_version, nics=instance_info.nics) except exceptions.BadRequest as e: assert_equal(e.message, "Datastore version '%s' cannot be found." % datastore_version) @test( groups=[tests.DBAAS_API_INSTANCES], depends_on_classes=[CreateInstanceFail], ) class CreateInstance(object): """Test to create a Database Instance If the call returns without raising an exception this test passes. """ @test def test_create(self): databases = [] databases.append({"name": "firstdb", "character_set": "latin2", "collate": "latin2_general_ci"}) databases.append({"name": "db2"}) instance_info.databases = databases users = [] users.append({"name": "lite", "password": "litepass", "databases": [{"name": "firstdb"}]}) instance_info.users = users instance_info.dbaas_datastore = CONFIG.dbaas_datastore instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version if VOLUME_SUPPORT: instance_info.volume = {'size': CONFIG.get('trove_volume_size', 2)} else: instance_info.volume = None if create_new_instance(): instance_info.initial_result = dbaas.instances.create( instance_info.name, instance_info.dbaas_flavor_href, instance_info.volume, databases, users, nics=instance_info.nics, availability_zone="nova", datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version) assert_equal(200, dbaas.last_http_code) else: id = existing_instance() instance_info.initial_result = dbaas.instances.get(id) result = instance_info.initial_result instance_info.id = result.id instance_info.dbaas_datastore_version = result.datastore['version'] report = CONFIG.get_report() report.log("Instance UUID = %s" % instance_info.id) if create_new_instance(): assert_equal("BUILD", instance_info.initial_result.status) else: report.log("Test was invoked with TESTS_USE_INSTANCE_ID=%s, so no " "instance was actually created." % id) # Check these attrs only are returned in create response allowed_attrs = ['created', 'flavor', 'addresses', 'id', 'links', 'name', 'status', 'updated', 'datastore', 'fault', 'region', 'service_status_updated'] if ROOT_ON_CREATE: allowed_attrs.append('password') if VOLUME_SUPPORT: allowed_attrs.append('volume') if CONFIG.trove_dns_support: allowed_attrs.append('hostname') with CheckInstance(result._info) as check: if create_new_instance(): check.contains_allowed_attrs( result._info, allowed_attrs, msg="Create response") # Don't CheckInstance if the instance already exists. check.flavor() check.datastore() check.links(result._info['links']) if VOLUME_SUPPORT: check.volume() @test( groups=[tests.DBAAS_API_INSTANCES], depends_on_classes=[CreateInstance], enabled=create_new_instance() ) class AfterInstanceCreation(unittest.TestCase): # instance calls def test_instance_delete_right_after_create(self): assert_unprocessable(dbaas.instances.delete, instance_info.id) # root calls def test_root_create_root_user_after_create(self): assert_unprocessable(dbaas.root.create, instance_info.id) def test_root_is_root_enabled_after_create(self): assert_unprocessable(dbaas.root.is_root_enabled, instance_info.id) # database calls def test_database_index_after_create(self): assert_unprocessable(dbaas.databases.list, instance_info.id) def test_database_delete_after_create(self): assert_unprocessable(dbaas.databases.delete, instance_info.id, "testdb") def test_database_create_after_create(self): assert_unprocessable(dbaas.databases.create, instance_info.id, instance_info.databases) # user calls def test_users_index_after_create(self): assert_unprocessable(dbaas.users.list, instance_info.id) def test_users_delete_after_create(self): assert_unprocessable(dbaas.users.delete, instance_info.id, "testuser") def test_users_create_after_create(self): users = list() users.append({"name": "testuser", "password": "password", "databases": [{"name": "testdb"}]}) assert_unprocessable(dbaas.users.create, instance_info.id, users) def test_resize_instance_after_create(self): assert_unprocessable(dbaas.instances.resize_instance, instance_info.id, 8) def test_resize_volume_after_create(self): assert_unprocessable(dbaas.instances.resize_volume, instance_info.id, 2) @test( depends_on_classes=[AfterInstanceCreation], groups=[tests.DBAAS_API_INSTANCES], enabled=create_new_instance() ) class WaitForGuestInstallationToFinish(object): @test @time_out(TIMEOUT_INSTANCE_CREATE) def test_instance_created(self): """Wait for normal instance to be created.""" def result_is_active(): instance = dbaas.instances.get(instance_info.id) if instance.status in CONFIG.running_status: return True else: # If its not ACTIVE, anything but BUILD must be # an error. assert_equal("BUILD", instance.status) if instance_info.volume is not None: assert_equal(instance.volume.get('used', None), None) return False poll_until(result_is_active, sleep_time=5) dbaas.instances.get(instance_info.id) report = CONFIG.get_report() report.log("Created an instance, ID = %s." % instance_info.id) report.log("TIP:") report.log("Rerun the tests with TESTS_USE_INSTANCE_ID=%s " "to skip ahead to this point." % instance_info.id) report.log("Add TESTS_DO_NOT_DELETE_INSTANCE=True to avoid deleting " "the instance at the end of the tests.") @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[tests.DBAAS_API_INSTANCES]) class TestDBandUserAfterInstanceCreated(object): @test def test_databases(self): """Get databases after instance creation.""" databases = dbaas.databases.list(instance_info.id) dbs = [database.name for database in databases] for db in instance_info.databases: assert_true(db["name"] in dbs) @test def test_users(self): """Get users after instance creation.""" users = dbaas.users.list(instance_info.id) usernames = [user.name for user in users] for user in instance_info.users: assert_true(user["name"] in usernames) @test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[tests.DBAAS_API_INSTANCES]) class TestGetInstances(object): @before_class def setUp(self): reqs = Requirements(is_admin=False) self.other_user = CONFIG.users.find_user( reqs, black_list=[instance_info.user.auth_user]) self.other_client = create_dbaas_client(self.other_user) @test def test_index_list(self): allowed_attrs = ['id', 'links', 'name', 'status', 'flavor', 'datastore', 'ip', 'hostname', 'replica_of', 'region'] if VOLUME_SUPPORT: allowed_attrs.append('volume') instances = dbaas.instances.list() assert_equal(200, dbaas.last_http_code) for instance in instances: instance_dict = instance._info with CheckInstance(instance_dict) as check: print("testing instance_dict=%s" % instance_dict) check.contains_allowed_attrs( instance_dict, allowed_attrs, msg="Instance Index") check.links(instance_dict['links']) check.flavor() check.datastore() check.volume() @test def test_detailed_list(self): allowed_attrs = ['created', 'databases', 'flavor', 'hostname', 'id', 'links', 'name', 'status', 'updated', 'ip', 'datastore', 'fault', 'region', 'service_status_updated'] if VOLUME_SUPPORT: allowed_attrs.append('volume') instances = dbaas.instances.list(detailed=True) assert_equal(200, dbaas.last_http_code) for instance in instances: instance_dict = instance._info with CheckInstance(instance_dict) as check: check.contains_allowed_attrs(instance_dict, allowed_attrs, msg="Instance Detailed Index") check.flavor() check.datastore() check.volume() check.used_volume() @test def test_get_instance(self): allowed_attrs = ['created', 'databases', 'flavor', 'hostname', 'id', 'links', 'name', 'status', 'updated', 'ip', 'datastore', 'fault', 'region', 'service_status_updated'] if VOLUME_SUPPORT: allowed_attrs.append('volume') else: allowed_attrs.append('local_storage') instance = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) instance_dict = instance._info print("instance_dict=%s" % instance_dict) with CheckInstance(instance_dict) as check: check.contains_allowed_attrs( instance_dict, allowed_attrs, msg="Get Instance") check.flavor() check.datastore() check.links(instance_dict['links']) check.used_volume() @test def test_get_instance_status(self): result = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) asserts.assert_true(result.status in CONFIG.running_status) @test def test_get_legacy_status(self): result = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) assert_true(result is not None) @test def test_get_legacy_status_notfound(self): assert_raises(exceptions.NotFound, dbaas.instances.get, -2) @test(enabled=VOLUME_SUPPORT) def test_volume_found(self): instance = dbaas.instances.get(instance_info.id) if create_new_instance(): assert_equal(instance_info.volume['size'], instance.volume['size']) @test(enabled=EPHEMERAL_SUPPORT) def test_ephemeral_mount(self): instance = dbaas.instances.get(instance_info.id) assert_true(isinstance(instance.local_storage['used'], float)) @test(enabled=ROOT_PARTITION) def test_root_partition(self): instance = dbaas.instances.get(instance_info.id) assert_true(isinstance(instance.local_storage['used'], float)) @test def test_instance_not_shown_to_other_user(self): daffy_ids = [instance.id for instance in self.other_client.instances.list()] assert_equal(200, self.other_client.last_http_code) admin_ids = [instance.id for instance in dbaas.instances.list()] assert_equal(200, dbaas.last_http_code) assert_not_equal(sorted(admin_ids), sorted(daffy_ids)) assert_raises(exceptions.NotFound, self.other_client.instances.get, instance_info.id) for id in admin_ids: assert_equal(daffy_ids.count(id), 0) @test def test_instance_not_deleted_by_other_user(self): assert_raises(exceptions.NotFound, self.other_client.instances.get, instance_info.id) assert_raises(exceptions.NotFound, self.other_client.instances.delete, instance_info.id) @test(enabled=CONFIG.test_mgmt) def test_mgmt_get_instance_after_started(self): result = dbaas_admin.management.show(instance_info.id) allowed_attrs = ['account_id', 'addresses', 'created', 'databases', 'flavor', 'guest_status', 'host', 'hostname', 'id', 'name', 'root_enabled_at', 'root_enabled_by', 'server_state_description', 'status', 'datastore', 'updated', 'users', 'volume', 'fault', 'region'] with CheckInstance(result._info) as check: check.contains_allowed_attrs( result._info, allowed_attrs, msg="Mgmt get instance") check.flavor() check.datastore() check.guest_status() check.addresses() check.volume_mgmt() @test(depends_on_classes=[TestGetInstances], groups=[tests.DBAAS_API_INSTANCES], enabled=CONFIG.test_mgmt) class TestInstanceMgmtInfo(object): @before_class def set_up(self): self.mgmt_details = dbaas_admin.management.show(instance_info.id) @test def test_mgmt_ips_associated(self): """Every instances has exactly one address""" mgmt_index = dbaas_admin.management.index() for instance in mgmt_index: assert_equal(1, len(instance.ips)) @test def test_mgmt_data(self): """Test management API returns all the values expected.""" info = instance_info ir = info.initial_result cid = ir.id expected = { 'id': cid, 'name': ir.name, 'account_id': info.user.auth_user, 'databases': [ { 'name': 'db2', 'character_set': 'utf8', 'collate': 'utf8_general_ci', }, { 'name': 'firstdb', 'character_set': 'latin2', 'collate': 'latin2_general_ci', } ], } expected_entry = info.expected_dns_entry() if expected_entry: expected['hostname'] = expected_entry.name assert_true(self.mgmt_details is not None) for (k, v) in expected.items(): msg = "Attr %r is missing." % k assert_true(hasattr(self.mgmt_details, k), msg) msg = ("Attr %r expected to be %r but was %r." % (k, v, getattr(self.mgmt_details, k))) assert_equal(getattr(self.mgmt_details, k), v, msg) print(self.mgmt_details.users) for user in self.mgmt_details.users: assert_true('name' in user, "'name' not in users element.") @test(depends_on_classes=[TestInstanceMgmtInfo], groups=[tests.DBAAS_API_INSTANCES]) class TestUpdateInstance(object): """Test updating instance.""" @test def test_update_name(self): new_name = 'new-name' result = dbaas.instances.edit(instance_info.id, name=new_name) assert_equal(202, dbaas.last_http_code) result = dbaas.instances.get(instance_info.id) assert_equal(200, dbaas.last_http_code) assert_equal(new_name, result.name) # Restore instance name because other tests depend on it dbaas.instances.edit(instance_info.id, name=instance_info.name) assert_equal(202, dbaas.last_http_code) @test def test_update_name_to_invalid_instance(self): # test assigning to an instance that does not exist invalid_id = "invalid-inst-id" assert_raises(exceptions.NotFound, instance_info.dbaas.instances.edit, invalid_id, name='name') assert_equal(404, instance_info.dbaas.last_http_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/instances_actions.py0000644000175000017500000005237400000000000023315 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from proboscis import after_class from proboscis import asserts from proboscis import before_class from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat.exceptions import BadRequest from troveclient.compat.exceptions import HTTPNotImplemented from trove.common import cfg from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import assert_unprocessable from trove.tests.api.instances import EPHEMERAL_SUPPORT from trove.tests.api.instances import instance_info from trove.tests.api.instances import VOLUME_SUPPORT from trove.tests.config import CONFIG import trove.tests.util as testsutil from trove.tests.util.check import TypeCheck from trove.tests.util import LocalSqlClient from trove.tests.util.server_connection import create_server_connection MYSQL_USERNAME = "test_user" MYSQL_PASSWORD = "abcde" FAKE_MODE = CONFIG.fake_mode # If true, then we will actually log into the database. USE_IP = not FAKE_MODE class MySqlConnection(object): def __init__(self, host): self.host = host def connect(self): """Connect to MySQL database.""" print("Connecting to MySQL, mysql --host %s -u %s -p%s" % (self.host, MYSQL_USERNAME, MYSQL_PASSWORD)) sql_engine = LocalSqlClient.init_engine(MYSQL_USERNAME, MYSQL_PASSWORD, self.host) self.client = LocalSqlClient(sql_engine, use_flush=False) def is_connected(self): cmd = "SELECT 1;" try: with self.client: self.client.execute(cmd) return True except Exception as e: print( "Failed to execute command: %s, error: %s" % (cmd, str(e)) ) return False def execute(self, cmd): try: with self.client: self.client.execute(cmd) return True except Exception as e: print( "Failed to execute command: %s, error: %s" % (cmd, str(e)) ) return False # Use default value from trove.common.cfg, and it could be overridden by # a environment variable when the tests run. def get_resize_timeout(): value_from_env = os.environ.get("TROVE_RESIZE_TIME_OUT", None) if value_from_env: return int(value_from_env) return cfg.CONF.resize_time_out TIME_OUT_TIME = get_resize_timeout() class ActionTestBase(object): """Has some helpful functions for testing actions. The test user must be created for some of these functions to work. """ def set_up(self): """If you're using this as a base class, call this method first.""" self.dbaas = instance_info.dbaas if USE_IP: address = instance_info.get_address() self.connection = MySqlConnection(address) @property def instance(self): return self.dbaas.instances.get(self.instance_id) @property def instance_address(self): return instance_info.get_address() @property def instance_mgmt_address(self): return instance_info.get_address(mgmt=True) @property def instance_id(self): return instance_info.id def create_user(self): """Create a MySQL user we can use for this test.""" users = [{"name": MYSQL_USERNAME, "password": MYSQL_PASSWORD, "databases": [{"name": MYSQL_USERNAME}]}] self.dbaas.users.create(instance_info.id, users) def has_user(): users = self.dbaas.users.list(instance_info.id) return any([user.name == MYSQL_USERNAME for user in users]) poll_until(has_user, time_out=30) if not FAKE_MODE: time.sleep(5) def ensure_mysql_is_running(self): if USE_IP: self.connection.connect() asserts.assert_true(self.connection.is_connected(), "Unable to connect to MySQL.") self.proc_id = self.find_mysql_proc_on_instance() asserts.assert_is_not_none(self.proc_id, "MySQL process can not be found.") asserts.assert_is_not_none(self.instance) asserts.assert_true(self.instance.status in CONFIG.running_status) def find_mysql_proc_on_instance(self): server = create_server_connection( self.instance_id, ip_address=self.instance_mgmt_address ) cmd = "sudo ps acux | grep mysqld " \ "| grep -v mysqld_safe | awk '{print $2}'" try: stdout = server.execute(cmd) return int(stdout) except ValueError: return None except Exception as e: asserts.fail("Failed to execute command: %s, error: %s" % (cmd, str(e))) def log_current_users(self): users = self.dbaas.users.list(self.instance_id) CONFIG.get_report().log("Current user count = %d" % len(users)) for user in users: CONFIG.get_report().log("\t" + str(user)) def _build_expected_msg(self): expected = { 'instance_size': instance_info.dbaas_flavor.ram, 'tenant_id': instance_info.user.tenant_id, 'instance_id': instance_info.id, 'instance_name': instance_info.name, 'created_at': testsutil.iso_time( instance_info.initial_result.created), 'launched_at': testsutil.iso_time(self.instance.updated), 'modify_at': testsutil.iso_time(self.instance.updated) } return expected @test(depends_on_groups=[tests.DBAAS_API_INSTANCES]) def create_user(): """Create a test user so that subsequent tests can log in.""" helper = ActionTestBase() helper.set_up() if USE_IP: try: helper.create_user() except BadRequest: pass # Ignore this if the user already exists. helper.connection.connect() asserts.assert_true(helper.connection.is_connected(), "Test user must be able to connect to MySQL.") class RebootTestBase(ActionTestBase): """Tests restarting MySQL.""" def call_reboot(self): raise NotImplementedError() def wait_for_successful_restart(self): """Wait until status becomes running. Reboot is an async operation, make sure the instance is rebooting before active. """ def _is_rebooting(): instance = self.instance if instance.status == "REBOOT": return True return False poll_until(_is_rebooting, time_out=TIME_OUT_TIME) def is_finished_rebooting(): instance = self.instance asserts.assert_not_equal(instance.status, "ERROR") if instance.status in CONFIG.running_status: return True return False poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME) def assert_mysql_proc_is_different(self): if not USE_IP: return new_proc_id = self.find_mysql_proc_on_instance() asserts.assert_not_equal(new_proc_id, self.proc_id, "MySQL process ID should be different!") def successful_restart(self): """Restart MySQL via the REST API successfully.""" self.call_reboot() self.wait_for_successful_restart() self.assert_mysql_proc_is_different() def wait_for_failure_status(self): """Wait until status becomes running.""" def is_finished_rebooting(): instance = self.instance if instance.status in ['REBOOT', 'ACTIVE', 'HEALTHY']: return False # The reason we check for BLOCKED as well as SHUTDOWN is because # Upstart might try to bring mysql back up after the borked # connection and the guest status can be either asserts.assert_true(instance.status in ("SHUTDOWN", "BLOCKED")) return True poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME) @test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS], depends_on_groups=[tests.DBAAS_API_DATABASES], depends_on=[create_user]) class RestartTests(RebootTestBase): """Test restarting MySQL.""" def call_reboot(self): self.instance.restart() asserts.assert_equal(202, self.dbaas.last_http_code) @before_class def test_set_up(self): self.set_up() @test def test_ensure_mysql_is_running(self): """Make sure MySQL is accessible before restarting.""" self.ensure_mysql_is_running() @test(depends_on=[test_ensure_mysql_is_running]) def test_successful_restart(self): """Restart MySQL via the REST API successfully.""" self.successful_restart() @test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS], depends_on_classes=[RestartTests]) class StopTests(RebootTestBase): """Test stopping MySQL.""" def call_reboot(self): self.instance.restart() @before_class def test_set_up(self): self.set_up() @test def test_ensure_mysql_is_running(self): """Make sure MySQL is accessible before restarting.""" self.ensure_mysql_is_running() @test(depends_on=[test_ensure_mysql_is_running]) def test_stop_mysql(self): """Stops MySQL.""" instance_info.dbaas_admin.management.stop(self.instance_id) self.wait_for_failure_status() @test(depends_on=[test_stop_mysql]) def test_volume_info_while_mysql_is_down(self): """ Confirms the get call behaves appropriately while an instance is down. """ if not VOLUME_SUPPORT: raise SkipTest("Not testing volumes.") instance = self.dbaas.instances.get(self.instance_id) with TypeCheck("instance", instance) as check: check.has_field("volume", dict) check.true('size' in instance.volume) check.true('used' in instance.volume) check.true(isinstance(instance.volume.get('size', None), int)) check.true(isinstance(instance.volume.get('used', None), float)) @test(depends_on=[test_volume_info_while_mysql_is_down]) def test_successful_restart_from_shutdown(self): """Restart MySQL via the REST API successfully when MySQL is down.""" self.successful_restart() @test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS], depends_on_classes=[StopTests]) class RebootTests(RebootTestBase): """Test restarting instance.""" def call_reboot(self): instance_info.dbaas_admin.management.reboot(self.instance_id) @before_class def test_set_up(self): self.set_up() asserts.assert_true(hasattr(self, 'dbaas')) asserts.assert_true(self.dbaas is not None) @test def test_ensure_mysql_is_running(self): """Make sure MySQL is accessible before rebooting.""" self.ensure_mysql_is_running() @after_class(depends_on=[test_ensure_mysql_is_running]) def test_successful_reboot(self): """MySQL process is different after rebooting.""" if FAKE_MODE: raise SkipTest("Cannot run this in fake mode.") self.successful_restart() @test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS], depends_on_classes=[RebootTests]) class ResizeInstanceTest(ActionTestBase): """Test resizing instance.""" @property def flavor_id(self): return instance_info.dbaas_flavor_href def get_flavor_href(self, flavor_id=2): res = instance_info.dbaas.find_flavor_and_self_href(flavor_id) _, dbaas_flavor_href = res return dbaas_flavor_href def wait_for_resize(self): def is_finished_resizing(): instance = self.instance if instance.status == "RESIZE": return False asserts.assert_true(instance.status in CONFIG.running_status) return True poll_until(is_finished_resizing, time_out=TIME_OUT_TIME) @before_class def setup(self): self.set_up() if USE_IP: self.connection.connect() asserts.assert_true(self.connection.is_connected(), "Should be able to connect before resize.") @test def test_instance_resize_same_size_should_fail(self): asserts.assert_raises(BadRequest, self.dbaas.instances.resize_instance, self.instance_id, self.flavor_id) @test(enabled=VOLUME_SUPPORT) def test_instance_resize_to_ephemeral_in_volume_support_should_fail(self): flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name', 'eph.rd-smaller') flavors = self.dbaas.find_flavors_by_name(flavor_name) def is_active(): return self.instance.status in CONFIG.running_status poll_until(is_active, time_out=TIME_OUT_TIME) asserts.assert_true(self.instance.status in CONFIG.running_status) asserts.assert_raises(HTTPNotImplemented, self.dbaas.instances.resize_instance, self.instance_id, flavors[0].id) @test(enabled=EPHEMERAL_SUPPORT) def test_instance_resize_to_non_ephemeral_flavor_should_fail(self): flavor_name = CONFIG.values.get('instance_bigger_flavor_name', 'm1-small') flavors = self.dbaas.find_flavors_by_name(flavor_name) asserts.assert_raises(BadRequest, self.dbaas.instances.resize_instance, self.instance_id, flavors[0].id) def obtain_flavor_ids(self): old_id = self.instance.flavor['id'] self.expected_old_flavor_id = old_id res = instance_info.dbaas.find_flavor_and_self_href(old_id) self.expected_dbaas_flavor, _ = res if EPHEMERAL_SUPPORT: flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name', 'eph.rd-smaller') else: flavor_name = CONFIG.values.get('instance_bigger_flavor_name', 'm1.small') flavors = self.dbaas.find_flavors_by_name(flavor_name) asserts.assert_equal(len(flavors), 1, "Number of flavors with name '%s' " "found was '%d'." % (flavor_name, len(flavors))) flavor = flavors[0] self.old_dbaas_flavor = instance_info.dbaas_flavor instance_info.dbaas_flavor = flavor self.expected_new_flavor_id = flavor.id @test(depends_on=[test_instance_resize_same_size_should_fail]) def test_status_changed_to_resize(self): """test_status_changed_to_resize""" self.log_current_users() self.obtain_flavor_ids() self.dbaas.instances.resize_instance( self.instance_id, self.get_flavor_href(flavor_id=self.expected_new_flavor_id)) asserts.assert_equal(202, self.dbaas.last_http_code) # (WARNING) IF THE RESIZE IS WAY TOO FAST THIS WILL FAIL assert_unprocessable( self.dbaas.instances.resize_instance, self.instance_id, self.get_flavor_href(flavor_id=self.expected_new_flavor_id)) @test(depends_on=[test_status_changed_to_resize]) @time_out(TIME_OUT_TIME) def test_instance_returns_to_active_after_resize(self): """test_instance_returns_to_active_after_resize""" self.wait_for_resize() @test(depends_on=[test_instance_returns_to_active_after_resize, test_status_changed_to_resize], groups=["dbaas.usage"]) def test_resize_instance_usage_event_sent(self): expected = self._build_expected_msg() expected['old_instance_size'] = self.old_dbaas_flavor.ram instance_info.consumer.check_message(instance_info.id, 'trove.instance.modify_flavor', **expected) @test(depends_on=[test_instance_returns_to_active_after_resize], runs_after=[test_resize_instance_usage_event_sent]) def resize_should_not_delete_users(self): """Resize should not delete users.""" # Resize has an incredibly weird bug where users are deleted after # a resize. The code below is an attempt to catch this while proceeding # with the rest of the test (note the use of runs_after). if USE_IP: users = self.dbaas.users.list(self.instance_id) usernames = [user.name for user in users] if MYSQL_USERNAME not in usernames: self.create_user() asserts.fail("Resize made the test user disappear.") @test(depends_on=[test_instance_returns_to_active_after_resize], runs_after=[resize_should_not_delete_users]) def test_make_sure_mysql_is_running_after_resize(self): self.ensure_mysql_is_running() @test(depends_on=[test_make_sure_mysql_is_running_after_resize]) def test_instance_has_new_flavor_after_resize(self): actual = self.get_flavor_href(self.instance.flavor['id']) expected = self.get_flavor_href(flavor_id=self.expected_new_flavor_id) asserts.assert_equal(actual, expected) @test(depends_on_classes=[ResizeInstanceTest], groups=[tests.DBAAS_API_INSTANCE_ACTIONS], enabled=VOLUME_SUPPORT) class ResizeInstanceVolumeTest(ActionTestBase): """Resize the volume of the instance.""" @before_class def setUp(self): self.set_up() self.old_volume_size = int(instance_info.volume['size']) self.new_volume_size = self.old_volume_size + 1 self.old_volume_fs_size = instance_info.get_volume_filesystem_size() # Create some databases to check they still exist after the resize self.expected_dbs = ['salmon', 'halibut'] databases = [] for name in self.expected_dbs: databases.append({"name": name}) instance_info.dbaas.databases.create(instance_info.id, databases) @test @time_out(60) def test_volume_resize(self): instance_info.dbaas.instances.resize_volume(instance_info.id, self.new_volume_size) @test(depends_on=[test_volume_resize]) @time_out(300) def test_volume_resize_success(self): def check_resize_status(): instance = instance_info.dbaas.instances.get(instance_info.id) if instance.status in CONFIG.running_status: return True elif instance.status == "RESIZE": return False else: asserts.fail("Status should not be %s" % instance.status) poll_until(check_resize_status, sleep_time=2, time_out=300) instance = instance_info.dbaas.instances.get(instance_info.id) asserts.assert_equal(instance.volume['size'], self.new_volume_size) @test(depends_on=[test_volume_resize_success]) def test_volume_filesystem_resize_success(self): # The get_volume_filesystem_size is a mgmt call through the guestagent # and the volume resize occurs through the fake nova-volume. # Currently the guestagent fakes don't have access to the nova fakes so # it doesn't know that a volume resize happened and to what size so # we can't fake the filesystem size. if FAKE_MODE: raise SkipTest("Cannot run this in fake mode.") new_volume_fs_size = instance_info.get_volume_filesystem_size() asserts.assert_true(self.old_volume_fs_size < new_volume_fs_size) # The total filesystem size is not going to be exactly the same size of # cinder volume but it should round to it. (e.g. round(1.9) == 2) asserts.assert_equal(round(new_volume_fs_size), self.new_volume_size) @test(depends_on=[test_volume_resize_success], groups=["dbaas.usage"]) def test_resize_volume_usage_event_sent(self): expected = self._build_expected_msg() expected['volume_size'] = self.new_volume_size expected['old_volume_size'] = self.old_volume_size instance_info.consumer.check_message(instance_info.id, 'trove.instance.modify_volume', **expected) @test @time_out(300) def test_volume_resize_success_databases(self): databases = instance_info.dbaas.databases.list(instance_info.id) db_list = [] for database in databases: db_list.append(database.name) for name in self.expected_dbs: if name not in db_list: asserts.fail( "Database %s was not found after the volume resize. " "Returned list: %s" % (name, databases)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/instances_delete.py0000644000175000017500000000656200000000000023115 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import time from proboscis import asserts from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.instances import instance_info from trove.tests.config import CONFIG def do_not_delete_instance(): return os.environ.get("TESTS_DO_NOT_DELETE_INSTANCE", None) is not None @test(depends_on_groups=[tests.DBAAS_API_REPLICATION], groups=[tests.DBAAS_API_INSTANCES_DELETE], enabled=not do_not_delete_instance()) class TestDeleteInstance(object): @time_out(3 * 60) @test def test_delete(self): """Delete instance for clean up.""" if not hasattr(instance_info, "initial_result"): raise SkipTest("Instance was never created, skipping test...") # Update the report so the logs inside the instance will be saved. CONFIG.get_report().update() dbaas = instance_info.dbaas dbaas.instances.delete(instance_info.id) attempts = 0 try: time.sleep(1) result = True while result is not None: attempts += 1 result = dbaas.instances.get(instance_info.id) asserts.assert_equal(200, dbaas.last_http_code) asserts.assert_equal("SHUTDOWN", result.status) time.sleep(1) except exceptions.NotFound: pass except Exception as ex: asserts.fail("A failure occurred when trying to GET instance %s " "for the %d time: %s" % (str(instance_info.id), attempts, str(ex))) @test(depends_on=[test_delete]) def test_instance_status_deleted_in_db(self): """test_instance_status_deleted_in_db""" dbaas_admin = instance_info.dbaas_admin mgmt_details = dbaas_admin.management.index(deleted=True) for instance in mgmt_details: if instance.id == instance_info.id: asserts.assert_equal(instance.service_status, 'DELETED') break else: asserts.fail("Could not find instance %s" % instance_info.id) @test(depends_on=[test_instance_status_deleted_in_db]) def test_delete_datastore(self): dbaas_admin = instance_info.dbaas_admin datastore = dbaas_admin.datastores.get( CONFIG.dbaas_datastore_name_no_versions) versions = dbaas_admin.datastore_versions.list(datastore.id) for version in versions: dbaas_admin.mgmt_datastore_versions.delete(version.id) # Delete the datastore dbaas_admin.datastores.delete(datastore.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/api/instances_resize.py0000644000175000017500000004230200000000000023144 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from novaclient.exceptions import BadRequest from novaclient.v2.servers import Server from oslo_messaging._drivers.common import RPCException from proboscis import test from testtools import TestCase from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common import instance as rd_instance from trove.common import template from trove.common import utils from trove.datastore.models import DatastoreVersion from trove.guestagent import api as guest from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.tasks import InstanceTasks from trove.taskmanager import models from trove.tests.fakes import nova from trove.tests.unittests import trove_testtools from trove.tests.util import test_config GROUP = 'dbaas.api.instances.resize' OLD_FLAVOR_ID = 1 NEW_FLAVOR_ID = 2 OLD_FLAVOR = nova.FLAVORS.get(OLD_FLAVOR_ID) NEW_FLAVOR = nova.FLAVORS.get(NEW_FLAVOR_ID) class ResizeTestBase(TestCase): def _init(self): self.instance_id = 500 context = trove_testtools.TroveTestContext(self) self.db_info = DBInstance.create( name="instance", flavor_id=OLD_FLAVOR_ID, tenant_id=999, volume_size=None, datastore_version_id=test_config.dbaas_datastore_version_id, task_status=InstanceTasks.RESIZING) self.server = mock.MagicMock(spec=Server) self.instance = models.BuiltInstanceTasks( context, self.db_info, self.server, datastore_status=InstanceServiceStatus.create( instance_id=self.db_info.id, status=rd_instance.ServiceStatuses.RUNNING)) self.instance.server.flavor = {'id': OLD_FLAVOR_ID} self.guest = mock.MagicMock(spec=guest.API) self.instance._guest = self.guest self.instance.refresh_compute_server_info = lambda: None self.instance._refresh_datastore_status = lambda: None self.instance.update_db = mock.Mock() self.instance.set_datastore_status_to_paused = mock.Mock() self.poll_until_side_effects = [] self.action = None def tearDown(self): super(ResizeTestBase, self).tearDown() self.db_info.delete() def _poll_until(self, *args, **kwargs): try: effect = self.poll_until_side_effects.pop(0) except IndexError: effect = None if isinstance(effect, Exception): raise effect elif effect is not None: new_status, new_flavor_id = effect self.server.status = new_status self.instance.server.flavor['id'] = new_flavor_id def _datastore_changes_to(self, new_status): self.instance.datastore_status.status = new_status @test(groups=[GROUP, GROUP + '.resize']) class ResizeTests(ResizeTestBase): def setUp(self): super(ResizeTests, self).setUp() self._init() # By the time flavor objects pass over amqp to the # resize action they have been turned into dicts self.action = models.ResizeAction(self.instance, OLD_FLAVOR.__dict__, NEW_FLAVOR.__dict__) def _start_mysql(self): datastore = mock.Mock(spec=DatastoreVersion) datastore.datastore_name = 'mysql' datastore.name = 'mysql-5.7' datastore.manager = 'mysql' config = template.SingleInstanceConfigTemplate( datastore, NEW_FLAVOR.__dict__, self.instance.id) self.instance.guest.start_db_with_conf_changes(config.render()) def test_guest_wont_stop_mysql(self): self.guest.stop_db.side_effect = RPCException("Could not stop MySQL!") self.assertRaises(RPCException, self.action.execute) self.guest.stop_db.assert_called_once_with(do_not_start_on_reboot=True) self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_nova_wont_resize(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) self.server.resize.side_effect = BadRequest(400) self.server.status = "ACTIVE" self.assertRaises(BadRequest, self.action.execute) self.guest.stop_db.assert_called_once_with(do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.guest.restart.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_nova_resize_timeout(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) self.server.status = "ACTIVE" with mock.patch.object(utils, 'poll_until') as mock_poll_until: mock_poll_until.side_effect = [None, PollTimeOut()] self.assertRaises(PollTimeOut, self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 2 self.assertEqual(expected_calls, mock_poll_until.call_args_list) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_nova_doesnt_change_flavor(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ ("VERIFY_RESIZE", OLD_FLAVOR_ID), None, ("ACTIVE", OLD_FLAVOR_ID)]) mock_poll_until.side_effect = self._poll_until self.assertRaisesRegex(TroveError, "flavor_id=.* and not .*", self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 3 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.guest.reset_configuration.assert_called_once_with( mock.ANY) self.instance.server.revert_resize.assert_called_once() self.guest.restart.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_nova_resize_fails(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ None, ("ERROR", OLD_FLAVOR_ID)]) mock_poll_until.side_effect = self._poll_until self.assertRaisesRegex(TroveError, "status=ERROR and not VERIFY_RESIZE", self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 2 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_nova_resizes_in_weird_state(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ None, ("ACTIVE", NEW_FLAVOR_ID)]) mock_poll_until.side_effect = self._poll_until self.assertRaisesRegex(TroveError, "status=ACTIVE and not VERIFY_RESIZE", self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 2 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.guest.restart.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_guest_is_not_okay(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ None, ("VERIFY_RESIZE", NEW_FLAVOR_ID), None, PollTimeOut(), ("ACTIVE", OLD_FLAVOR_ID)]) mock_poll_until.side_effect = self._poll_until self.instance.set_datastore_status_to_paused.side_effect = ( lambda: self._datastore_changes_to( rd_instance.ServiceStatuses.PAUSED)) self.assertRaises(PollTimeOut, self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 5 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.guest.reset_configuration.assert_called_once_with( mock.ANY) self.instance.server.revert_resize.assert_called_once() self.guest.restart.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_mysql_is_not_okay(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ None, ("VERIFY_RESIZE", NEW_FLAVOR_ID), PollTimeOut(), ("ACTIVE", OLD_FLAVOR_ID)]) mock_poll_until.side_effect = self._poll_until self.instance.set_datastore_status_to_paused.side_effect = ( lambda: self._datastore_changes_to( rd_instance.ServiceStatuses.SHUTDOWN)) self._start_mysql() self.assertRaises(PollTimeOut, self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 4 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.guest.reset_configuration.assert_called_once_with( mock.ANY) self.instance.server.revert_resize.assert_called_once() self.guest.restart.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_confirm_resize_fails(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ None, ("VERIFY_RESIZE", NEW_FLAVOR_ID), None, None, ("SHUTDOWN", NEW_FLAVOR_ID)]) mock_poll_until.side_effect = self._poll_until self.instance.set_datastore_status_to_paused.side_effect = ( lambda: self._datastore_changes_to( rd_instance.ServiceStatuses.RUNNING)) self.server.confirm_resize.side_effect = BadRequest(400) self._start_mysql() self.assertRaises(BadRequest, self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 5 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.server.confirm_resize.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) def test_revert_nova_fails(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ None, ("VERIFY_RESIZE", NEW_FLAVOR_ID), None, PollTimeOut(), ("ERROR", OLD_FLAVOR_ID)]) mock_poll_until.side_effect = self._poll_until self.instance.set_datastore_status_to_paused.side_effect = ( lambda: self._datastore_changes_to( rd_instance.ServiceStatuses.PAUSED)) self.assertRaises(PollTimeOut, self.action.execute) expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 5 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.guest.reset_configuration.assert_called_once_with( mock.ANY) self.instance.server.revert_resize.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) @test(groups=[GROUP, GROUP + '.migrate']) class MigrateTests(ResizeTestBase): def setUp(self): super(MigrateTests, self).setUp() self._init() self.action = models.MigrateAction(self.instance) def test_successful_migrate(self): self._datastore_changes_to(rd_instance.ServiceStatuses.SHUTDOWN) with mock.patch.object(utils, 'poll_until') as mock_poll_until: self.poll_until_side_effects.extend([ None, ("VERIFY_RESIZE", NEW_FLAVOR_ID), None, None]) mock_poll_until.side_effect = self._poll_until self.instance.set_datastore_status_to_paused.side_effect = ( lambda: self._datastore_changes_to( rd_instance.ServiceStatuses.RUNNING)) self.action.execute() expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 4 self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) self.guest.stop_db.assert_called_once_with( do_not_start_on_reboot=True) self.server.migrate.assert_called_once_with(force_host=None) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.server.confirm_resize.assert_called_once() self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/api/limits.py0000644000175000017500000001261200000000000021076 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from datetime import datetime from nose.tools import assert_equal from nose.tools import assert_true from oslo_utils import timeutils from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove.common import cfg from trove.tests.fakes import limits as fake_limits from trove.tests.util import create_dbaas_client from trove.tests.util.users import Users CONF = cfg.CONF GROUP = "dbaas.api.limits" DEFAULT_RATE = CONF.http_get_rate DEFAULT_MAX_VOLUMES = CONF.max_volumes_per_tenant DEFAULT_MAX_INSTANCES = CONF.max_instances_per_tenant DEFAULT_MAX_BACKUPS = CONF.max_backups_per_tenant def ensure_limits_are_not_faked(func): def _cd(*args, **kwargs): fake_limits.ENABLED = True try: return func(*args, **kwargs) finally: fake_limits.ENABLED = False @test(groups=[GROUP]) class Limits(object): @before_class def setUp(self): users = [ { "auth_user": "rate_limit", "auth_key": "password", "tenant": "4000", "requirements": { "is_admin": False, "services": ["trove"] } }, { "auth_user": "rate_limit_exceeded", "auth_key": "password", "tenant": "4050", "requirements": { "is_admin": False, "services": ["trove"] } }] self._users = Users(users) rate_user = self._get_user('rate_limit') self.rd_client = create_dbaas_client(rate_user) def _get_user(self, name): return self._users.find_user_by_name(name) def __is_available(self, next_available): dt_next = timeutils.parse_isotime(next_available) dt_now = datetime.now() return dt_next.time() < dt_now.time() def _get_limits_as_dict(self, limits): d = {} for l in limits: d[l.verb] = l return d @test @ensure_limits_are_not_faked def test_limits_index(self): """Test_limits_index.""" limits = self.rd_client.limits.list() d = self._get_limits_as_dict(limits) # remove the abs_limits from the rate limits abs_limits = d.pop("ABSOLUTE", None) assert_equal(abs_limits.verb, "ABSOLUTE") assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES) assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS) assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES) for k in d: assert_equal(d[k].verb, k) assert_equal(d[k].unit, "MINUTE") assert_true(int(d[k].remaining) <= DEFAULT_RATE) assert_true(d[k].nextAvailable is not None) @test @ensure_limits_are_not_faked def test_limits_get_remaining(self): """Test_limits_get_remaining.""" limits = () for i in range(5): limits = self.rd_client.limits.list() d = self._get_limits_as_dict(limits) abs_limits = d["ABSOLUTE"] get = d["GET"] assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES) assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS) assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES) assert_equal(get.verb, "GET") assert_equal(get.unit, "MINUTE") assert_true(int(get.remaining) <= DEFAULT_RATE - 5) assert_true(get.nextAvailable is not None) @test @ensure_limits_are_not_faked def test_limits_exception(self): """Test_limits_exception.""" # use a different user to avoid throttling tests run out of order rate_user_exceeded = self._get_user('rate_limit_exceeded') rd_client = create_dbaas_client(rate_user_exceeded) get = None encountered = False for i in range(DEFAULT_RATE + 50): try: limits = rd_client.limits.list() d = self._get_limits_as_dict(limits) get = d["GET"] abs_limits = d["ABSOLUTE"] assert_equal(get.verb, "GET") assert_equal(get.unit, "MINUTE") assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES) assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS) assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES) except exceptions.OverLimit: encountered = True assert_true(encountered) assert_true(int(get.remaining) <= 50) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7801108 trove-12.1.0.dev92/trove/tests/api/mgmt/0000755000175000017500000000000000000000000020165 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/api/mgmt/__init__.py0000644000175000017500000000000000000000000022264 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/mgmt/configurations.py0000644000175000017500000001755200000000000023603 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import asserts from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.configurations" @test(groups=[GROUP, tests.DBAAS_API, tests.PRE_INSTANCES]) class ConfigGroupsSetupBeforeInstanceCreation(object): @before_class def setUp(self): self.user = test_config.users.find_user(Requirements(is_admin=True)) self.admin_client = create_dbaas_client(self.user) self.datastore_version_id = self.admin_client.datastore_versions.get( "mysql", "5.5").id @test def test_valid_config_create_type(self): name = "testconfig-create" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs param_list = client.parameters_by_version( self.datastore_version_id) asserts.assert_true(name not in [p.name for p in param_list]) client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) param_list = client.parameters_by_version( self.datastore_version_id) asserts.assert_true(name in [p.name for p in param_list]) param = client.get_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(name, param.name) asserts.assert_equal(restart_required, param.restart_required) asserts.assert_equal(data_type, param.type) # test the modify restart_required = 0 data_type = "integer" max_size = "10" min_size = "1" client.modify( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) param = client.get_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(name, param.name) asserts.assert_equal(restart_required, param.restart_required) asserts.assert_equal(data_type, param.type) asserts.assert_equal(max_size, param.max) asserts.assert_equal(min_size, param.min) client.delete(self.datastore_version_id, name) # test show deleted params work param_list = client.list_all_parameter_by_version( self.datastore_version_id) asserts.assert_true(name in [p.name for p in param_list]) param = client.get_any_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(name, param.name) asserts.assert_equal(restart_required, param.restart_required) asserts.assert_equal(data_type, param.type) asserts.assert_equal(int(max_size), int(param.max)) asserts.assert_equal(int(min_size), int(param.min)) asserts.assert_equal(True, param.deleted) asserts.assert_true(param.deleted_at) def test_create_config_type_twice_fails(self): name = "test-delete-config-types" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) asserts.assert_raises(exceptions.BadRequest, client.create, self.datastore_version_id, name, restart_required, data_type, max_size, min_size) client.delete(self.datastore_version_id, name) config_list = client.parameters_by_version(self.datastore_version_id) asserts.assert_true(name not in [conf.name for conf in config_list]) # testing that recreate of a deleted parameter works. client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) config_list = client.parameters_by_version(self.datastore_version_id) asserts.assert_false(name not in [conf.name for conf in config_list]) @test def test_delete_config_type(self): name = "test-delete-config-types" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) client.delete(self.datastore_version_id, name) config_list = client.parameters_by_version(self.datastore_version_id) asserts.assert_true(name not in [conf.name for conf in config_list]) @test def test_delete_config_type_fail(self): asserts.assert_raises( exceptions.BadRequest, self.admin_client.mgmt_configs.delete, self.datastore_version_id, "test-delete-config-types") @test def test_invalid_config_create_type(self): name = "testconfig_invalid_type" restart_required = 1 data_type = "other" max_size = None min_size = None asserts.assert_raises( exceptions.BadRequest, self.admin_client.mgmt_configs.create, self.datastore_version_id, name, restart_required, data_type, max_size, min_size) @test def test_invalid_config_create_restart_required(self): name = "testconfig_invalid_restart_required" restart_required = 5 data_type = "string" max_size = None min_size = None asserts.assert_raises( exceptions.BadRequest, self.admin_client.mgmt_configs.create, self.datastore_version_id, name, restart_required, data_type, max_size, min_size) @test def test_config_parameter_was_deleted_then_recreate_updates_it(self): name = "test-delete-and-recreate-param" restart_required = 1 data_type = "string" max_size = None min_size = None client = self.admin_client.mgmt_configs client.create( self.datastore_version_id, name, restart_required, data_type, max_size, min_size) client.delete(self.datastore_version_id, name) client.create( self.datastore_version_id, name, 0, data_type, max_size, min_size) param_list = client.list_all_parameter_by_version( self.datastore_version_id) asserts.assert_true(name in [p.name for p in param_list]) param = client.get_any_parameter_by_version( self.datastore_version_id, name) asserts.assert_equal(False, param.deleted) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/mgmt/datastore_versions.py0000644000175000017500000001527200000000000024464 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import before_class from proboscis.check import Check from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.config import CONFIG from trove.tests.util import create_client from trove.tests.util import create_dbaas_client from trove.tests.util import create_glance_client from trove.tests.util import test_config from trove.tests.util.users import Requirements @test(groups=[tests.DBAAS_API_MGMT_DATASTORES], depends_on_groups=[tests.DBAAS_API_DATASTORES]) class MgmtDataStoreVersion(object): """Tests the mgmt datastore version methods.""" @before_class def setUp(self): """Create client for tests.""" reqs = Requirements(is_admin=True) self.user = CONFIG.users.find_user(reqs) self.client = create_dbaas_client(self.user) self.images = [] glance_user = test_config.users.find_user( Requirements(is_admin=True, services=["glance"])) self.glance_client = create_glance_client(glance_user) images = self.glance_client.images.list() for image in images: self.images.append(image.id) def _find_ds_version_by_name(self, ds_version_name): ds_versions = self.client.mgmt_datastore_versions.list() for ds_version in ds_versions: if ds_version_name == ds_version.name: return ds_version @test def test_mgmt_ds_version_list_original_count(self): """Tests the mgmt datastore version list method.""" self.ds_versions = self.client.mgmt_datastore_versions.list() # datastore-versions should exist for a functional Trove deployment. assert_true(len(self.ds_versions) > 0) @test def mgmt_datastore_version_list_requires_admin_account(self): """Test admin is required to list datastore versions.""" client = create_client(is_admin=False) assert_raises(exceptions.Unauthorized, client.mgmt_datastore_versions.list) @test(depends_on=[test_mgmt_ds_version_list_original_count]) def test_mgmt_ds_version_list_fields_present(self): """Verify that all expected fields are returned by list method.""" expected_fields = [ 'id', 'name', 'datastore_id', 'datastore_name', 'datastore_manager', 'image', 'packages', 'active', 'default', ] for ds_version in self.ds_versions: with Check() as check: for field in expected_fields: check.true(hasattr(ds_version, field), "List lacks field %s." % field) @test(depends_on=[test_mgmt_ds_version_list_original_count]) def test_mgmt_ds_version_get(self): """Tests the mgmt datastore version get method.""" test_version = self.ds_versions[0] found_ds_version = self.client.mgmt_datastore_versions.get( test_version.id) assert_equal(test_version.name, found_ds_version.name) assert_equal(test_version.datastore_id, found_ds_version.datastore_id) assert_equal(test_version.datastore_name, found_ds_version.datastore_name) assert_equal(test_version.datastore_manager, found_ds_version.datastore_manager) assert_equal(test_version.image, found_ds_version.image) assert_equal(test_version.packages, found_ds_version.packages) assert_equal(test_version.active, found_ds_version.active) assert_equal(test_version.default, found_ds_version.default) @test(depends_on=[test_mgmt_ds_version_list_original_count]) def test_mgmt_ds_version_create(self): """Tests the mgmt datastore version create method.""" response = self.client.mgmt_datastore_versions.create( 'test_version1', 'test_ds', 'test_mgr', self.images[0], ['vertica-7.1']) assert_equal(None, response) assert_equal(202, self.client.last_http_code) # Since we created one more ds_version # lets check count of total ds_versions, it should be increased by 1 new_ds_versions = self.client.mgmt_datastore_versions.list() assert_equal(len(self.ds_versions) + 1, len(new_ds_versions)) # Match the contents of newly created ds_version. self.created_version = self._find_ds_version_by_name('test_version1') assert_equal('test_version1', self.created_version.name) assert_equal('test_ds', self.created_version.datastore_name) assert_equal('test_mgr', self.created_version.datastore_manager) assert_equal(self.images[0], self.created_version.image) assert_equal(['vertica-7.1'], self.created_version.packages) assert_true(self.created_version.active) assert_false(self.created_version.default) @test(depends_on=[test_mgmt_ds_version_create]) def test_mgmt_ds_version_patch(self): """Tests the mgmt datastore version edit method.""" self.client.mgmt_datastore_versions.edit( self.created_version.id, image=self.images[1], packages=['pkg1']) assert_equal(202, self.client.last_http_code) # Lets match the content of patched datastore patched_ds_version = self._find_ds_version_by_name('test_version1') assert_equal(self.images[1], patched_ds_version.image) assert_equal(['pkg1'], patched_ds_version.packages) @test(depends_on=[test_mgmt_ds_version_patch]) def test_mgmt_ds_version_delete(self): """Tests the mgmt datastore version delete method.""" self.client.mgmt_datastore_versions.delete(self.created_version.id) assert_equal(202, self.client.last_http_code) # Lets match the total count of ds_version, # it should get back to original ds_versions = self.client.mgmt_datastore_versions.list() assert_equal(len(self.ds_versions), len(ds_versions)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/api/mgmt/instances_actions.py0000644000175000017500000001712200000000000024251 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from novaclient.v2.servers import Server from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_raises from proboscis import before_class from proboscis import SkipTest from proboscis import test from trove.backup import models as backup_models from trove.backup import state from trove.common.context import TroveContext from trove.common import exception import trove.common.instance as tr_instance from trove.extensions.mgmt.instances.models import MgmtInstance from trove.extensions.mgmt.instances.service import MgmtInstanceController from trove.instance import models as imodels from trove.instance.models import DBInstance from trove.instance.tasks import InstanceTasks from trove.tests.config import CONFIG from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements GROUP = "dbaas.api.mgmt.action.reset-task-status" class MgmtInstanceBase(object): def setUp(self): self._create_instance() self.controller = MgmtInstanceController() def tearDown(self): self.db_info.delete() def _create_instance(self): self.context = TroveContext(is_admin=True) self.tenant_id = 999 self.db_info = DBInstance.create( id="inst-id-1", name="instance", flavor_id=1, datastore_version_id=test_config.dbaas_datastore_version_id, tenant_id=self.tenant_id, volume_size=None, task_status=InstanceTasks.NONE) self.server = mock.MagicMock(spec=Server) self.instance = imodels.Instance( self.context, self.db_info, self.server, datastore_status=imodels.InstanceServiceStatus( tr_instance.ServiceStatuses.RUNNING)) def _make_request(self, path='/', context=None, **kwargs): from webob import Request path = '/' print("path: %s" % path) return Request.blank(path=path, environ={'trove.context': context}, **kwargs) def _reload_db_info(self): self.db_info = DBInstance.find_by(id=self.db_info.id, deleted=False) @test(groups=[GROUP]) class RestartTaskStatusTests(MgmtInstanceBase): @before_class def setUp(self): super(RestartTaskStatusTests, self).setUp() self.backups_to_clear = [] @after_class def tearDown(self): super(RestartTaskStatusTests, self).tearDown() def _change_task_status_to(self, new_task_status): self.db_info.task_status = new_task_status self.db_info.save() def _make_request(self, path='/', context=None, **kwargs): req = super(RestartTaskStatusTests, self)._make_request(path, context, **kwargs) req.method = 'POST' body = {'reset-task-status': {}} return req, body def reset_task_status(self): with mock.patch.object(MgmtInstance, 'load') as mock_load: mock_load.return_value = self.instance req, body = self._make_request(context=self.context) self.controller = MgmtInstanceController() resp = self.controller.action(req, body, self.tenant_id, self.db_info.id) mock_load.assert_called_once_with(context=self.context, id=self.db_info.id) return resp @test def mgmt_restart_task_requires_admin_account(self): context = TroveContext(is_admin=False) req, body = self._make_request(context=context) self.controller = MgmtInstanceController() assert_raises(exception.Forbidden, self.controller.action, req, body, self.tenant_id, self.db_info.id) @test def mgmt_restart_task_returns_json(self): resp = self.reset_task_status() out = resp.data("application/json") assert_equal(out, None) @test def mgmt_restart_task_changes_status_to_none(self): self._change_task_status_to(InstanceTasks.BUILDING) self.reset_task_status() self._reload_db_info() assert_equal(self.db_info.task_status, InstanceTasks.NONE) @test def mgmt_reset_task_status_clears_backups(self): if CONFIG.fake_mode: raise SkipTest("Test requires an instance.") self.reset_task_status() self._reload_db_info() assert_equal(self.db_info.task_status, InstanceTasks.NONE) user = test_config.users.find_user(Requirements(is_admin=False)) dbaas = create_dbaas_client(user) admin = test_config.users.find_user(Requirements(is_admin=True)) admin_dbaas = create_dbaas_client(admin) result = dbaas.instances.backups(self.db_info.id) assert_equal(0, len(result)) # Create some backups. backup_models.DBBackup.create( name="forever_new", description="forever new", tenant_id=self.tenant_id, state=state.BackupState.NEW, instance_id=self.db_info.id, deleted=False) backup_models.DBBackup.create( name="forever_build", description="forever build", tenant_id=self.tenant_id, state=state.BackupState.BUILDING, instance_id=self.db_info.id, deleted=False) backup_models.DBBackup.create( name="forever_completed", description="forever completed", tenant_id=self.tenant_id, state=state.BackupState.COMPLETED, instance_id=self.db_info.id, deleted=False) # List the backups for this instance. # There ought to be three in the admin tenant, but # none in a different user's tenant. result = dbaas.instances.backups(self.db_info.id) assert_equal(0, len(result)) result = admin_dbaas.instances.backups(self.db_info.id) assert_equal(3, len(result)) self.backups_to_clear = result # Reset the task status. self.reset_task_status() self._reload_db_info() result = admin_dbaas.instances.backups(self.db_info.id) assert_equal(3, len(result)) for backup in result: if backup.name == 'forever_completed': assert_equal(backup.status, state.BackupState.COMPLETED) else: assert_equal(backup.status, state.BackupState.FAILED) @test(runs_after=[mgmt_reset_task_status_clears_backups]) def clear_test_backups(self): for backup in self.backups_to_clear: found_backup = backup_models.DBBackup.find_by(id=backup.id) found_backup.delete() admin = test_config.users.find_user(Requirements(is_admin=True)) admin_dbaas = create_dbaas_client(admin) if not CONFIG.fake_mode: result = admin_dbaas.instances.backups(self.db_info.id) assert_equal(0, len(result)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/api/mgmt/quotas.py0000644000175000017500000001551500000000000022062 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from proboscis import after_class from proboscis import asserts from proboscis.asserts import Check from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove.tests.config import CONFIG from trove.tests.util import create_client from trove.tests.util import create_dbaas_client from trove.tests.util import get_standby_instance_flavor from trove.tests.util.users import Requirements class QuotasBase(object): def setUp(self): self.user1 = CONFIG.users.find_user(Requirements(is_admin=False)) self.user2 = CONFIG.users.find_user(Requirements(is_admin=False)) asserts.assert_not_equal(self.user1.tenant, self.user2.tenant, "Not enough users to run QuotasTest." + " Needs >=2.") self.client1 = create_dbaas_client(self.user1) self.client2 = create_dbaas_client(self.user2) self.mgmt_client = create_client(is_admin=True) ''' Orig quotas from config "trove_max_instances_per_tenant": 55, "trove_max_volumes_per_tenant": 100, ''' self.original_quotas1 = self.mgmt_client.quota.show(self.user1.tenant) self.original_quotas2 = self.mgmt_client.quota.show(self.user2.tenant) def tearDown(self): self.mgmt_client.quota.update(self.user1.tenant, self.original_quotas1) self.mgmt_client.quota.update(self.user2.tenant, self.original_quotas2) @test(groups=["dbaas.api.mgmt.quotas"]) class DefaultQuotasTest(QuotasBase): @before_class def setUp(self): super(DefaultQuotasTest, self).setUp() @after_class def tearDown(self): super(DefaultQuotasTest, self).tearDown() @test def check_quotas_are_set_to_defaults(self): quotas = self.mgmt_client.quota.show(self.user1.tenant) with Check() as check: check.equal(CONFIG.trove_max_instances_per_tenant, quotas["instances"]) check.equal(CONFIG.trove_max_volumes_per_user, quotas["volumes"]) asserts.assert_equal(len(quotas), 2) @test(groups=["dbaas.api.mgmt.quotas"]) class ChangeInstancesQuota(QuotasBase): @before_class def setUp(self): super(ChangeInstancesQuota, self).setUp() self.mgmt_client.quota.update(self.user1.tenant, {"instances": 0}) asserts.assert_equal(200, self.mgmt_client.last_http_code) @after_class def tearDown(self): super(ChangeInstancesQuota, self).tearDown() @test def check_user2_is_not_affected_on_instances_quota_change(self): user2_current_quota = self.mgmt_client.quota.show(self.user2.tenant) asserts.assert_equal(self.original_quotas2, user2_current_quota, "Changing one user's quota affected another" + "user's quota." + " Original: %s. After Quota Change: %s" % (self.original_quotas2, user2_current_quota)) @test def verify_correct_update(self): quotas = self.mgmt_client.quota.show(self.user1.tenant) with Check() as check: check.equal(0, quotas["instances"]) check.equal(CONFIG.trove_max_volumes_per_tenant, quotas["volumes"]) asserts.assert_equal(len(quotas), 2) @test def create_too_many_instances(self): flavor, flavor_href = get_standby_instance_flavor(self.client1) asserts.assert_raises(exceptions.OverLimit, self.client1.instances.create, "too_many_instances", flavor_href, {'size': 1}) asserts.assert_equal(413, self.client1.last_http_code) @test(groups=["dbaas.api.mgmt.quotas"]) class ChangeVolumesQuota(QuotasBase): @before_class def setUp(self): super(ChangeVolumesQuota, self).setUp() self.mgmt_client.quota.update(self.user1.tenant, {"volumes": 0}) asserts.assert_equal(200, self.mgmt_client.last_http_code) @after_class def tearDown(self): super(ChangeVolumesQuota, self).tearDown() @test def check_volumes_overlimit(self): flavor, flavor_href = get_standby_instance_flavor(self.client1) asserts.assert_raises(exceptions.OverLimit, self.client1.instances.create, "too_large_volume", flavor_href, {'size': CONFIG.trove_max_accepted_volume_size + 1}) asserts.assert_equal(413, self.client1.last_http_code) @test def check_user2_is_not_affected_on_volumes_quota_change(self): user2_current_quota = self.mgmt_client.quota.show(self.user2.tenant) asserts.assert_equal(self.original_quotas2, user2_current_quota, "Changing one user's quota affected another" + "user's quota." + " Original: %s. After Quota Change: %s" % (self.original_quotas2, user2_current_quota)) @test def verify_correct_update(self): quotas = self.mgmt_client.quota.show(self.user1.tenant) with Check() as check: check.equal(CONFIG.trove_max_instances_per_tenant, quotas["instances"]) check.equal(0, quotas["volumes"]) asserts.assert_equal(len(quotas), 2) @test def create_too_large_volume(self): flavor, flavor_href = get_standby_instance_flavor(self.client1) asserts.assert_raises(exceptions.OverLimit, self.client1.instances.create, "too_large_volume", flavor_href, {'size': CONFIG.trove_max_accepted_volume_size + 1}) asserts.assert_equal(413, self.client1.last_http_code) # create an instance when I set the limit back to # multiple updates to the quota and it should do what you expect ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/replication.py0000644000175000017500000003712600000000000022115 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from time import sleep from proboscis.asserts import assert_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import fail from proboscis.decorators import time_out from proboscis import SkipTest from proboscis import test from troveclient.compat import exceptions from trove.common.utils import generate_uuid from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import CheckInstance from trove.tests.api.instances import instance_info from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE from trove.tests.config import CONFIG from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.util.server_connection import create_server_connection class SlaveInstanceTestInfo(object): """Stores slave instance information.""" def __init__(self): self.id = None self.replicated_db = generate_uuid() slave_instance = SlaveInstanceTestInfo() existing_db_on_master = generate_uuid() backup_count = None def _get_user_count(server_info): cmd = ('mysql -BNq -e \\\'select count\\(*\\) from mysql.user' ' where user like \\\"slave_%\\\"\\\'') server = create_server_connection(server_info.id) try: stdout = server.execute(cmd) return int(stdout) except Exception as e: fail("Failed to execute command: %s, error: %s" % (cmd, str(e))) def slave_is_running(running=True): def check_slave_is_running(): server = create_server_connection(slave_instance.id) cmd = ("mysqladmin extended-status " "| awk '/Slave_running/{print $4}'") try: stdout = server.execute(cmd) stdout = stdout.rstrip() except Exception as e: fail("Failed to execute command %s, error: %s" % (cmd, str(e))) expected = b"ON" if running else b"OFF" return stdout == expected return check_slave_is_running def backup_count_matches(count): def check_backup_count_matches(): backup = instance_info.dbaas.instances.backups(instance_info.id) return count == len(backup) return check_backup_count_matches def instance_is_active(id): instance = instance_info.dbaas.instances.get(id) if instance.status in CONFIG.running_status: return True else: assert_true(instance.status in ['PROMOTE', 'EJECT', 'BUILD', 'BACKUP']) return False def create_slave(): result = instance_info.dbaas.instances.create( instance_info.name + "_slave", instance_info.dbaas_flavor_href, {'size': 2}, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, nics=instance_info.nics, replica_of=instance_info.id) assert_equal(200, instance_info.dbaas.last_http_code) assert_equal("BUILD", result.status) return result.id def validate_slave(master, slave): new_slave = instance_info.dbaas.instances.get(slave.id) assert_equal(200, instance_info.dbaas.last_http_code) ns_dict = new_slave._info CheckInstance(ns_dict).replica_of() assert_equal(master.id, ns_dict['replica_of']['id']) def validate_master(master, slaves): new_master = instance_info.dbaas.instances.get(master.id) assert_equal(200, instance_info.dbaas.last_http_code) nm_dict = new_master._info CheckInstance(nm_dict).slaves() master_ids = set([replica['id'] for replica in nm_dict['replicas']]) asserted_ids = set([slave.id for slave in slaves]) assert_true(asserted_ids.issubset(master_ids)) @test(depends_on_groups=[tests.DBAAS_API_CONFIGURATIONS], groups=[tests.DBAAS_API_REPLICATION], enabled=CONFIG.swift_enabled) class CreateReplicationSlave(object): @test def test_replica_provisioning_with_missing_replica_source(self): assert_raises(exceptions.NotFound, instance_info.dbaas.instances.create, instance_info.name + "_slave", instance_info.dbaas_flavor_href, instance_info.volume, datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, nics=instance_info.nics, replica_of="Missing replica source") assert_equal(404, instance_info.dbaas.last_http_code) @test def test_create_db_on_master(self): """test_create_db_on_master""" databases = [{'name': existing_db_on_master}] # Ensure that the auth_token in the dbaas client is not stale instance_info.dbaas.authenticate() instance_info.dbaas.databases.create(instance_info.id, databases) assert_equal(202, instance_info.dbaas.last_http_code) @test(runs_after=['test_create_db_on_master']) def test_create_slave(self): """test_create_slave""" global backup_count backup_count = len( instance_info.dbaas.instances.backups(instance_info.id)) slave_instance.id = create_slave() @test(groups=[tests.DBAAS_API_REPLICATION], enabled=CONFIG.swift_enabled, depends_on_classes=[CreateReplicationSlave]) class WaitForCreateSlaveToFinish(object): """Wait until the instance is created and set up as slave.""" @test @time_out(TIMEOUT_INSTANCE_CREATE) def test_slave_created(self): """Wait for replica to be created.""" poll_until(lambda: instance_is_active(slave_instance.id)) @test(enabled=(not CONFIG.fake_mode and CONFIG.swift_enabled), depends_on_classes=[WaitForCreateSlaveToFinish], groups=[tests.DBAAS_API_REPLICATION]) class VerifySlave(object): def db_is_found(self, database_to_find): def find_database(): databases = instance_info.dbaas.databases.list(slave_instance.id) return (database_to_find in [d.name for d in databases]) return find_database @test @time_out(20 * 60) def test_correctly_started_replication(self): """test_correctly_started_replication""" poll_until(slave_is_running()) @test(runs_after=[test_correctly_started_replication]) @time_out(60) def test_backup_deleted(self): """test_backup_deleted""" poll_until(backup_count_matches(backup_count)) @test(depends_on=[test_correctly_started_replication]) def test_slave_is_read_only(self): """test_slave_is_read_only""" cmd = "mysql -BNq -e \\\'select @@read_only\\\'" server = create_server_connection(slave_instance.id) try: stdout = server.execute(cmd) stdout = int(stdout.rstrip()) except Exception as e: fail("Failed to execute command %s, error: %s" % (cmd, str(e))) assert_equal(stdout, 1) @test(depends_on=[test_slave_is_read_only]) def test_create_db_on_master(self): """test_create_db_on_master""" databases = [{'name': slave_instance.replicated_db}] instance_info.dbaas.databases.create(instance_info.id, databases) assert_equal(202, instance_info.dbaas.last_http_code) @test(depends_on=[test_create_db_on_master]) @time_out(5 * 60) def test_database_replicated_on_slave(self): """test_database_replicated_on_slave""" poll_until(self.db_is_found(slave_instance.replicated_db)) @test(runs_after=[test_database_replicated_on_slave]) @time_out(5 * 60) def test_existing_db_exists_on_slave(self): """test_existing_db_exists_on_slave""" poll_until(self.db_is_found(existing_db_on_master)) @test(depends_on=[test_existing_db_exists_on_slave]) def test_slave_user_exists(self): """test_slave_user_exists""" assert_equal(_get_user_count(slave_instance), 1) assert_equal(_get_user_count(instance_info), 1) @test(groups=[tests.DBAAS_API_REPLICATION], depends_on_classes=[VerifySlave], enabled=CONFIG.swift_enabled) class TestInstanceListing(object): """Test replication information in instance listing.""" @test def test_get_slave_instance(self): """test_get_slave_instance""" validate_slave(instance_info, slave_instance) @test def test_get_master_instance(self): """test_get_master_instance""" validate_master(instance_info, [slave_instance]) @test(groups=[tests.DBAAS_API_REPLICATION], depends_on_classes=[TestInstanceListing], enabled=CONFIG.swift_enabled) class TestReplicationFailover(object): """Test replication failover functionality.""" @staticmethod def promote(master, slave): if CONFIG.fake_mode: raise SkipTest("promote_replica_source not supported in fake mode") instance_info.dbaas.instances.promote_to_replica_source(slave) assert_equal(202, instance_info.dbaas.last_http_code) poll_until(lambda: instance_is_active(slave.id)) validate_master(slave, [master]) validate_slave(slave, master) @test def test_promote_master(self): if CONFIG.fake_mode: raise SkipTest("promote_master not supported in fake mode") assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.promote_to_replica_source, instance_info.id) @test def test_eject_slave(self): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") assert_raises(exceptions.BadRequest, instance_info.dbaas.instances.eject_replica_source, slave_instance.id) @test def test_eject_valid_master(self): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") # assert_raises(exceptions.BadRequest, # instance_info.dbaas.instances.eject_replica_source, # instance_info.id) # Uncomment once BUG_EJECT_VALID_MASTER is fixed raise SkipKnownBug(runners.BUG_EJECT_VALID_MASTER) @test(depends_on=[test_promote_master, test_eject_slave, test_eject_valid_master]) def test_promote_to_replica_source(self): """test_promote_to_replica_source""" TestReplicationFailover.promote(instance_info, slave_instance) @test(depends_on=[test_promote_to_replica_source]) def test_promote_back_to_replica_source(self): """test_promote_back_to_replica_source""" TestReplicationFailover.promote(slave_instance, instance_info) @test(depends_on=[test_promote_back_to_replica_source], enabled=False) def add_second_slave(self): """add_second_slave""" if CONFIG.fake_mode: raise SkipTest("three site promote not supported in fake mode") self._third_slave = SlaveInstanceTestInfo() self._third_slave.id = create_slave() poll_until(lambda: instance_is_active(self._third_slave.id)) poll_until(slave_is_running()) sleep(30) validate_master(instance_info, [slave_instance, self._third_slave]) validate_slave(instance_info, self._third_slave) @test(depends_on=[add_second_slave], enabled=False) def test_three_site_promote(self): """Promote the second slave""" if CONFIG.fake_mode: raise SkipTest("three site promote not supported in fake mode") TestReplicationFailover.promote(instance_info, self._third_slave) validate_master(self._third_slave, [slave_instance, instance_info]) validate_slave(self._third_slave, instance_info) @test(depends_on=[test_three_site_promote], enabled=False) def disable_master(self): """Stop trove-guestagent on master""" if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") cmd = "sudo service trove-guestagent stop" server = create_server_connection(self._third_slave.id) try: stdout = server.execute(cmd) stdout = int(stdout.rstrip()) except Exception as e: fail("Failed to execute command %s, error: %s" % (cmd, str(e))) assert_equal(stdout, 1) @test(depends_on=[disable_master], enabled=False) def test_eject_replica_master(self): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") sleep(90) instance_info.dbaas.instances.eject_replica_source(self._third_slave) assert_equal(202, instance_info.dbaas.last_http_code) poll_until(lambda: instance_is_active(self._third_slave.id)) validate_master(instance_info, [slave_instance]) validate_slave(instance_info, slave_instance) @test(groups=[tests.DBAAS_API_REPLICATION], depends_on=[TestReplicationFailover], enabled=CONFIG.swift_enabled) class DetachReplica(object): @test def delete_before_detach_replica(self): assert_raises(exceptions.Forbidden, instance_info.dbaas.instances.delete, instance_info.id) @test @time_out(5 * 60) def test_detach_replica(self): """test_detach_replica""" if CONFIG.fake_mode: raise SkipTest("Detach replica not supported in fake mode") instance_info.dbaas.instances.edit(slave_instance.id, detach_replica_source=True) assert_equal(202, instance_info.dbaas.last_http_code) poll_until(slave_is_running(False)) @test(depends_on=[test_detach_replica]) @time_out(5 * 60) def test_slave_is_not_read_only(self): """test_slave_is_not_read_only""" if CONFIG.fake_mode: raise SkipTest("Test not_read_only not supported in fake mode") # wait until replica is no longer read only def check_not_read_only(): cmd = "mysql -BNq -e \\\'select @@read_only\\\'" server = create_server_connection(slave_instance.id) try: stdout = server.execute(cmd) stdout = int(stdout) except Exception: return False return stdout == 0 poll_until(check_not_read_only) @test(groups=[tests.DBAAS_API_REPLICATION], depends_on=[DetachReplica], enabled=CONFIG.swift_enabled) class DeleteSlaveInstance(object): @test @time_out(TIMEOUT_INSTANCE_DELETE) def test_delete_slave_instance(self): """test_delete_slave_instance""" instance_info.dbaas.instances.delete(slave_instance.id) assert_equal(202, instance_info.dbaas.last_http_code) def instance_is_gone(): try: instance_info.dbaas.instances.get(slave_instance.id) return False except exceptions.NotFound: return True poll_until(instance_is_gone) assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get, slave_instance.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/root.py0000644000175000017500000001631300000000000020562 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nose.plugins.skip import SkipTest import proboscis from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api import instances from trove.tests.util import test_config @test(groups=[tests.DBAAS_API_USERS_ROOT], depends_on_groups=[tests.DBAAS_API_INSTANCES]) class TestRoot(object): root_enabled_timestamp = 'Never' @proboscis.before_class def setUp(self): # Reuse the instance created previously. self.id = instances.instance_info.id self.dbaas = instances.instance_info.dbaas self.dbaas_admin = instances.instance_info.dbaas_admin def _verify_root_timestamp(self, id): reh = self.dbaas_admin.management.root_enabled_history(id) timestamp = reh.enabled assert_equal(self.root_enabled_timestamp, timestamp) assert_equal(id, reh.id) def _root(self): self.dbaas.root.create(self.id) assert_equal(200, self.dbaas.last_http_code) reh = self.dbaas_admin.management.root_enabled_history self.root_enabled_timestamp = reh(self.id).enabled @test def test_root_initially_disabled(self): """Test that root is disabled.""" enabled = self.dbaas.root.is_root_enabled(self.id) assert_equal(200, self.dbaas.last_http_code) is_enabled = enabled if hasattr(enabled, 'rootEnabled'): is_enabled = enabled.rootEnabled assert_false(is_enabled, "Root SHOULD NOT be enabled.") @test def test_create_user_os_admin_failure(self): users = [{"name": "os_admin", "password": "12345"}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, self.id, users) @test def test_delete_user_os_admin_failure(self): assert_raises(exceptions.BadRequest, self.dbaas.users.delete, self.id, "os_admin") @test(depends_on=[test_root_initially_disabled], enabled=not test_config.values['root_removed_from_instance_api']) def test_root_initially_disabled_details(self): """Use instance details to test that root is disabled.""" instance = self.dbaas.instances.get(self.id) assert_true(hasattr(instance, 'rootEnabled'), "Instance has no rootEnabled property.") assert_false(instance.rootEnabled, "Root SHOULD NOT be enabled.") assert_equal(self.root_enabled_timestamp, 'Never') @test(depends_on=[test_root_initially_disabled_details]) def test_root_disabled_in_mgmt_api(self): """Verifies in the management api that the timestamp exists.""" self._verify_root_timestamp(self.id) @test(depends_on=[test_root_initially_disabled_details]) def test_root_disable_when_root_not_enabled(self): reh = self.dbaas_admin.management.root_enabled_history self.root_enabled_timestamp = reh(self.id).enabled assert_raises(exceptions.NotFound, self.dbaas.root.delete, self.id) self._verify_root_timestamp(self.id) @test(depends_on=[test_root_disable_when_root_not_enabled]) def test_enable_root(self): self._root() @test(depends_on=[test_enable_root]) def test_enabled_timestamp(self): assert_not_equal(self.root_enabled_timestamp, 'Never') @test(depends_on=[test_enable_root]) def test_root_not_in_users_list(self): """ Tests that despite having enabled root, user root doesn't appear in the users list for the instance. """ users = self.dbaas.users.list(self.id) usernames = [user.name for user in users] assert_true('root' not in usernames) @test(depends_on=[test_enable_root]) def test_root_now_enabled(self): """Test that root is now enabled.""" enabled = self.dbaas.root.is_root_enabled(self.id) assert_equal(200, self.dbaas.last_http_code) assert_true(enabled, "Root SHOULD be enabled.") @test(depends_on=[test_root_now_enabled], enabled=not test_config.values['root_removed_from_instance_api']) def test_root_now_enabled_details(self): """Use instance details to test that root is now enabled.""" instance = self.dbaas.instances.get(self.id) assert_true(hasattr(instance, 'rootEnabled'), "Instance has no rootEnabled property.") assert_true(instance.rootEnabled, "Root SHOULD be enabled.") assert_not_equal(self.root_enabled_timestamp, 'Never') self._verify_root_timestamp(self.id) @test(depends_on=[test_root_now_enabled_details]) def test_reset_root(self): if test_config.values['root_timestamp_disabled']: raise SkipTest("Enabled timestamp not enabled yet") old_ts = self.root_enabled_timestamp self._root() assert_not_equal(self.root_enabled_timestamp, 'Never') assert_equal(self.root_enabled_timestamp, old_ts) @test(depends_on=[test_reset_root]) def test_root_still_enabled(self): """Test that after root was reset it's still enabled.""" enabled = self.dbaas.root.is_root_enabled(self.id) assert_equal(200, self.dbaas.last_http_code) assert_true(enabled, "Root SHOULD still be enabled.") @test(depends_on=[test_root_still_enabled], enabled=not test_config.values['root_removed_from_instance_api']) def test_root_still_enabled_details(self): """Use instance details to test that after root was reset, it's still enabled. """ instance = self.dbaas.instances.get(self.id) assert_true(hasattr(instance, 'rootEnabled'), "Instance has no rootEnabled property.") assert_true(instance.rootEnabled, "Root SHOULD still be enabled.") assert_not_equal(self.root_enabled_timestamp, 'Never') self._verify_root_timestamp(self.id) @test(depends_on=[test_enable_root]) def test_root_cannot_be_deleted(self): """Even if root was enabled, the user root cannot be deleted.""" assert_raises(exceptions.BadRequest, self.dbaas.users.delete, self.id, "root") @test(depends_on=[test_root_still_enabled_details]) def test_root_disable(self): reh = self.dbaas_admin.management.root_enabled_history self.root_enabled_timestamp = reh(self.id).enabled self.dbaas.root.delete(self.id) assert_equal(204, self.dbaas.last_http_code) self._verify_root_timestamp(self.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/user_access.py0000644000175000017500000005130200000000000022073 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from random import choice from proboscis import after_class from proboscis import asserts from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.instances import instance_info from trove.tests import util from trove.tests.util import test_config FAKE = test_config.values['fake_mode'] class UserAccessBase(object): """ Base class for Positive and Negative TestUserAccess classes """ users = [] databases = [] def set_up(self): self.dbaas = util.create_dbaas_client(instance_info.user) self.users = ["test_access_user"] self.databases = [("test_access_db%02i" % i) for i in range(4)] def _user_list_from_names(self, usernames): return [{"name": name, "password": "password", "databases": []} for name in usernames] def _grant_access_singular(self, user, databases, expected_response=202): """Grant a single user access to the databases listed. Potentially, expect an exception in the process. """ try: self.dbaas.users.grant(instance_info.id, user, databases) except exceptions.BadRequest: asserts.assert_equal(400, expected_response) except exceptions.NotFound: asserts.assert_equal(404, expected_response) except exceptions.ClientException: asserts.assert_equal(500, expected_response) finally: asserts.assert_equal(expected_response, self.dbaas.last_http_code) def _grant_access_plural(self, users, databases, expected_response=202): """Grant each user in the list access to all the databases listed. Potentially, expect an exception in the process. """ for user in users: self._grant_access_singular(user, databases, expected_response) def _revoke_access_singular(self, user, database, expected_response=202): """Revoke from a user access to the given database . Potentially, expect an exception in the process. """ try: self.dbaas.users.revoke(instance_info.id, user, database) asserts.assert_true(expected_response, self.dbaas.last_http_code) except exceptions.BadRequest: asserts.assert_equal(400, self.dbaas.last_http_code) except exceptions.NotFound: asserts.assert_equal(404, self.dbaas.last_http_code) def _revoke_access_plural(self, users, databases, expected_response=202): """Revoke from each user access to each database. Potentially, expect an exception in the process. """ for user in users: for database in databases: self._revoke_access_singular(user, database, expected_response) def _test_access(self, users, databases, expected_response=200): """Verify that each user in the list has access to each database in the list. """ for user in users: access = self.dbaas.users.list_access(instance_info.id, user) asserts.assert_equal(expected_response, self.dbaas.last_http_code) access = [db.name for db in access] asserts.assert_equal(set(access), set(databases)) def _test_ignore_access(self, users, databases, expected_response=200): databases = [d for d in databases if d not in ['lost+found', 'mysql', 'information_schema']] self._test_access(users, databases, expected_response) def _reset_access(self): for user in self.users: for database in self.databases + self.ghostdbs: try: self.dbaas.users.revoke(instance_info.id, user, database) asserts.assert_true(self.dbaas.last_http_code in [202, 404] ) except exceptions.NotFound: # This is all right here, since we're resetting. pass self._test_access(self.users, []) @test(depends_on_groups=[tests.DBAAS_API_USERS], groups=[tests.DBAAS_API_USERS_ACCESS]) class TestUserAccessPasswordChange(UserAccessBase): """Test that change_password works.""" @before_class def setUp(self): super(TestUserAccessPasswordChange, self).set_up() def _check_mysql_connection(self, username, password, success=True): # This can only test connections for users with the host %. # Much more difficult to simulate connection attempts from other hosts. if FAKE: # "Fake mode; cannot test mysql connection." return conn = util.mysql_connection() if success: conn.create(instance_info.get_address(), username, password) else: conn.assert_fails(instance_info.get_address(), username, password) def _pick_a_user(self): users = self._user_list_from_names(self.users) return choice(users) # Pick one, it doesn't matter. @test() def test_change_password_bogus_user(self): user = self._pick_a_user() user["name"] = "thisuserhasanamethatstoolong" asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.change_passwords, instance_info.id, [user]) asserts.assert_equal(400, self.dbaas.last_http_code) @test() def test_change_password_nonexistent_user(self): user = self._pick_a_user() user["name"] = "thisuserDNE" asserts.assert_raises(exceptions.NotFound, self.dbaas.users.change_passwords, instance_info.id, [user]) asserts.assert_equal(404, self.dbaas.last_http_code) @test() def test_create_user_and_dbs(self): users = self._user_list_from_names(self.users) # Default password for everyone is 'password'. self.dbaas.users.create(instance_info.id, users) asserts.assert_equal(202, self.dbaas.last_http_code) databases = [{"name": db} for db in self.databases] self.dbaas.databases.create(instance_info.id, databases) asserts.assert_equal(202, self.dbaas.last_http_code) @test(depends_on=[test_create_user_and_dbs]) def test_initial_connection(self): user = self._pick_a_user() self._check_mysql_connection(user["name"], "password") @test(depends_on=[test_initial_connection]) def test_change_password(self): # Doesn't actually change anything, just tests that the call doesn't # have any problems. As an aside, also checks that a user can # change its password to the same thing again. user = self._pick_a_user() password = user["password"] self.dbaas.users.change_passwords(instance_info.id, [user]) asserts.assert_equal(202, self.dbaas.last_http_code) self._check_mysql_connection(user["name"], password) @test(depends_on=[test_change_password]) def test_change_password_back(self): """Test change and restore user password.""" user = self._pick_a_user() old_password = user["password"] new_password = "NEWPASSWORD" user["password"] = new_password self.dbaas.users.change_passwords(instance_info.id, [user]) asserts.assert_equal(202, self.dbaas.last_http_code) self._check_mysql_connection(user["name"], new_password) user["password"] = old_password self.dbaas.users.change_passwords(instance_info.id, [user]) asserts.assert_equal(202, self.dbaas.last_http_code) self._check_mysql_connection(user["name"], old_password) @after_class(always_run=True) def tearDown(self): for database in self.databases: self.dbaas.databases.delete(instance_info.id, database) asserts.assert_equal(202, self.dbaas.last_http_code) for username in self.users: self.dbaas.users.delete(instance_info.id, username) @test(depends_on_classes=[TestUserAccessPasswordChange], groups=[tests.DBAAS_API_USERS_ACCESS]) class TestUserAccessPositive(UserAccessBase): """Test the creation and deletion of user grants.""" @before_class def setUp(self): super(TestUserAccessPositive, self).set_up() # None of the ghosts are real databases or users. self.ghostdbs = ["test_user_access_ghost_db"] self.ghostusers = ["test_ghostuser"] self.revokedbs = self.databases[:1] self.remainingdbs = self.databases[1:] def _ensure_nothing_else_created(self): # Make sure grants and revokes do not create users or databases. databases = self.dbaas.databases.list(instance_info.id) database_names = [db.name for db in databases] for ghost in self.ghostdbs: asserts.assert_true(ghost not in database_names) users = self.dbaas.users.list(instance_info.id) user_names = [user.name for user in users] for ghost in self.ghostusers: asserts.assert_true(ghost not in user_names) @test() def test_create_user_and_dbs(self): users = self._user_list_from_names(self.users) self.dbaas.users.create(instance_info.id, users) asserts.assert_equal(202, self.dbaas.last_http_code) databases = [{"name": db} for db in self.databases] self.dbaas.databases.create(instance_info.id, databases) asserts.assert_equal(202, self.dbaas.last_http_code) @test(depends_on=[test_create_user_and_dbs]) def test_no_access(self): # No users have any access to any database. self._reset_access() self._test_access(self.users, []) @test(depends_on=[test_no_access]) def test_grant_full_access(self): # The users are granted access to all test databases. self._reset_access() self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) @test(depends_on=[test_no_access]) def test_grant_full_access_ignore_databases(self): # The users are granted access to all test databases. all_dbs = [] all_dbs.extend(self.databases) all_dbs.extend(['lost+found', 'mysql', 'information_schema']) self._reset_access() self._grant_access_plural(self.users, self.databases) self._test_ignore_access(self.users, self.databases) @test(depends_on=[test_grant_full_access]) def test_grant_idempotence(self): # Grant operations can be repeated with no ill effects. self._reset_access() for repeat in range(3): self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) @test(depends_on=[test_grant_full_access]) def test_revoke_one_database(self): # Revoking permission removes that database from a user's list. self._reset_access() self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) self._revoke_access_plural(self.users, self.revokedbs) self._test_access(self.users, self.remainingdbs) @test(depends_on=[test_grant_full_access]) def test_revoke_non_idempotence(self): # Revoking access cannot be repeated. self._reset_access() self._grant_access_plural(self.users, self.databases) self._revoke_access_plural(self.users, self.revokedbs) self._revoke_access_plural(self.users, self.revokedbs, 404) self._test_access(self.users, self.remainingdbs) @test(depends_on=[test_grant_full_access]) def test_revoke_all_access(self): # Revoking access to all databases will leave their access empty. self._reset_access() self._grant_access_plural(self.users, self.databases) self._revoke_access_plural(self.users, self.revokedbs) self._test_access(self.users, self.remainingdbs) @test(depends_on=[test_grant_full_access]) def test_grant_ghostdbs(self): # Grants to imaginary databases are acceptable, and are honored. self._reset_access() self._ensure_nothing_else_created() self._grant_access_plural(self.users, self.ghostdbs) self._ensure_nothing_else_created() @test(depends_on=[test_grant_full_access]) def test_revoke_ghostdbs(self): # Revokes to imaginary databases are acceptable, and are honored. self._reset_access() self._ensure_nothing_else_created() self._grant_access_plural(self.users, self.ghostdbs) self._revoke_access_plural(self.users, self.ghostdbs) self._ensure_nothing_else_created() @test(depends_on=[test_grant_full_access]) def test_grant_ghostusers(self): # You cannot grant permissions to imaginary users, as imaginary users # don't have passwords we can pull from mysql.users self._reset_access() self._grant_access_plural(self.ghostusers, self.databases, 404) @test(depends_on=[test_grant_full_access]) def test_revoke_ghostusers(self): # You cannot revoke permissions from imaginary users, as imaginary # users don't have passwords we can pull from mysql.users self._reset_access() self._revoke_access_plural(self.ghostusers, self.databases, 404) @after_class(always_run=True) def tearDown(self): self._reset_access() for database in self.databases: self.dbaas.databases.delete(instance_info.id, database) asserts.assert_equal(202, self.dbaas.last_http_code) for username in self.users: self.dbaas.users.delete(instance_info.id, username) @test(depends_on_classes=[TestUserAccessPositive], groups=[tests.DBAAS_API_USERS_ACCESS]) class TestUserAccessNegative(UserAccessBase): """Negative tests for the creation and deletion of user grants.""" @before_class def setUp(self): super(TestUserAccessNegative, self).set_up() self.users = ["qe_user?neg3F", "qe_user#neg23"] self.databases = [("qe_user_neg_db%02i" % i) for i in range(2)] self.ghostdbs = [] def _add_users(self, users, expected_response=202): user_list = self._user_list_from_names(users) try: self.dbaas.users.create(instance_info.id, user_list) asserts.assert_equal(self.dbaas.last_http_code, 202) except exceptions.BadRequest: asserts.assert_equal(self.dbaas.last_http_code, 400) asserts.assert_equal(expected_response, self.dbaas.last_http_code) @test() def test_create_duplicate_user_and_dbs(self): """ Create the same user to the first DB - allowed, not part of change """ users = self._user_list_from_names(self.users) self.dbaas.users.create(instance_info.id, users) asserts.assert_equal(202, self.dbaas.last_http_code) databases = [{"name": db} for db in self.databases] self.dbaas.databases.create(instance_info.id, databases) asserts.assert_equal(202, self.dbaas.last_http_code) @test(depends_on=[test_create_duplicate_user_and_dbs]) def test_neg_duplicate_useraccess(self): """ Grant duplicate users access to all database. """ username = "qe_user.neg2E" self._add_users([username]) self._add_users([username], 400) for repeat in range(3): self._grant_access_plural(self.users, self.databases) self._test_access(self.users, self.databases) @test() def test_re_create_user(self): user_list = ["re_create_user"] # create, grant, then check a new user self._add_users(user_list) self._test_access(user_list, []) self._grant_access_singular(user_list[0], self.databases) self._test_access(user_list, self.databases) # drop the user temporarily self.dbaas.users.delete(instance_info.id, user_list[0]) # check his access - user should not be found asserts.assert_raises(exceptions.NotFound, self.dbaas.users.list_access, instance_info.id, user_list[0]) # re-create the user self._add_users(user_list) # check his access - should not exist self._test_access(user_list, []) # grant user access to all database. self._grant_access_singular(user_list[0], self.databases) # check his access - user should exist self._test_access(user_list, self.databases) # revoke users access self._revoke_access_plural(user_list, self.databases) def _negative_user_test(self, username, databases, create_response=202, grant_response=202, access_response=200, revoke_response=202): # Try and fail to create the user. self._add_users([username], create_response) self._grant_access_singular(username, databases, grant_response) access = None try: access = self.dbaas.users.list_access(instance_info.id, username) asserts.assert_equal(200, self.dbaas.last_http_code) except exceptions.BadRequest: asserts.assert_equal(400, self.dbaas.last_http_code) except exceptions.NotFound: asserts.assert_equal(404, self.dbaas.last_http_code) finally: asserts.assert_equal(access_response, self.dbaas.last_http_code) if access is not None: access = [db.name for db in access] asserts.assert_equal(set(access), set(self.databases)) self._revoke_access_plural([username], databases, revoke_response) @test def test_user_withperiod(self): # This is actually fine; we escape dots in the user-host pairing. self._negative_user_test("test.user", self.databases) @test def test_user_empty_no_host(self): # This creates a request to ...//users//databases, # which is parsed to mean "show me user 'databases', which in this # case is a valid username, but not one of an extant user. self._negative_user_test("", self.databases, 400, 500, 404, 404) @test def test_user_empty_with_host(self): # self._negative_user_test("", self.databases, 400, 400, 400, 400) # Try and fail to create the user. empty_user = {"name": "", "host": "%", "password": "password", "databases": []} asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, [empty_user]) asserts.assert_equal(400, self.dbaas.last_http_code) asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.grant, instance_info.id, "", [], "%") asserts.assert_equal(400, self.dbaas.last_http_code) asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.list_access, instance_info.id, "", "%") asserts.assert_equal(400, self.dbaas.last_http_code) asserts.assert_raises(exceptions.BadRequest, self.dbaas.users.revoke, instance_info.id, "", "db", "%") asserts.assert_equal(400, self.dbaas.last_http_code) @test def test_user_nametoolong(self): # You cannot create a user with this name. # Grant revoke, and access filter this username as invalid. self._negative_user_test("exceed_limit_user", self.databases, 400, 400, 400, 400) @test def test_user_allspaces(self): self._negative_user_test(" ", self.databases, 400, 400, 400, 400) @after_class(always_run=True) def tearDown(self): self._reset_access() for database in self.databases: self.dbaas.databases.delete(instance_info.id, database) asserts.assert_equal(202, self.dbaas.last_http_code) for username in self.users: self.dbaas.users.delete(instance_info.id, username) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/users.py0000644000175000017500000004512100000000000020737 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from six.moves.urllib import parse as urllib_parse from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_raises from proboscis.asserts import assert_true from proboscis.asserts import fail from proboscis import before_class from proboscis import test from troveclient.compat import exceptions from trove import tests from trove.tests.api.instances import instance_info from trove.tests import util from trove.tests.util import test_config FAKE = test_config.values['fake_mode'] @test(depends_on_groups=[tests.DBAAS_API_USERS_ROOT], groups=[tests.DBAAS_API_USERS], enabled=not test_config.values['fake_mode']) class TestMysqlAccessNegative(object): """Make sure that MySQL server was secured.""" @test def test_mysql_admin(self): """Ensure we aren't allowed access with os_admin and wrong password.""" util.mysql_connection().assert_fails( instance_info.get_address(), "os_admin", "asdfd-asdf234") @test def test_mysql_root(self): """Ensure we aren't allowed access with root and wrong password.""" util.mysql_connection().assert_fails( instance_info.get_address(), "root", "dsfgnear") @test(depends_on_classes=[TestMysqlAccessNegative], groups=[tests.DBAAS_API_USERS]) class TestUsers(object): """ Test the creation and deletion of users """ username = "tes!@#tuser" password = "testpa$^%ssword" username1 = "anous*&^er" password1 = "anopas*?.sword" db1 = "usersfirstdb" db2 = "usersseconddb" created_users = [username, username1] system_users = ['root', 'debian_sys_maint'] def __init__(self): self.dbaas = util.create_dbaas_client(instance_info.user) self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user) @before_class def setUp(self): databases = [{"name": self.db1, "character_set": "latin2", "collate": "latin2_general_ci"}, {"name": self.db2}] try: self.dbaas.databases.create(instance_info.id, databases) except exceptions.BadRequest as e: if "Validation error" in e.message: raise if not FAKE: time.sleep(5) @after_class def tearDown(self): self.dbaas.databases.delete(instance_info.id, self.db1) self.dbaas.databases.delete(instance_info.id, self.db2) @test() def test_delete_nonexistent_user(self): assert_raises(exceptions.NotFound, self.dbaas.users.delete, instance_info.id, "thisuserDNE") assert_equal(404, self.dbaas.last_http_code) @test() def test_create_users(self): users = [] users.append({"name": self.username, "password": self.password, "databases": [{"name": self.db1}]}) users.append({"name": self.username1, "password": self.password1, "databases": [{"name": self.db1}, {"name": self.db2}]}) self.dbaas.users.create(instance_info.id, users) assert_equal(202, self.dbaas.last_http_code) # Do we need this? if not FAKE: time.sleep(5) self.check_database_for_user(self.username, self.password, [self.db1]) self.check_database_for_user(self.username1, self.password1, [self.db1, self.db2]) @test(depends_on=[test_create_users]) def test_create_users_list(self): # tests for users that should be listed users = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) found = False for user in self.created_users: for result in users: if user == result.name: found = True assert_true(found, "User '%s' not found in result" % user) found = False @test(depends_on=[test_create_users]) def test_fails_when_creating_user_twice(self): users = [] users.append({"name": self.username, "password": self.password, "databases": [{"name": self.db1}]}) users.append({"name": self.username1, "password": self.password1, "databases": [{"name": self.db1}, {"name": self.db2}]}) assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test(depends_on=[test_create_users_list]) def test_cannot_create_root_user(self): # Tests that the user root (in Config:ignore_users) cannot be created. users = [{"name": "root", "password": "12345", "databases": [{"name": self.db1}]}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) @test(depends_on=[test_create_users_list]) def test_get_one_user(self): user = self.dbaas.users.get(instance_info.id, username=self.username, hostname='%') assert_equal(200, self.dbaas.last_http_code) assert_equal(user.name, self.username) assert_equal(1, len(user.databases)) for db in user.databases: assert_equal(db["name"], self.db1) self.check_database_for_user(self.username, self.password, [self.db1]) @test(depends_on=[test_create_users_list]) def test_create_users_list_system(self): # tests for users that should not be listed users = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) for user in self.system_users: found = any(result.name == user for result in users) msg = "User '%s' SHOULD NOT BE found in result" % user assert_false(found, msg) @test(depends_on=[test_create_users_list], runs_after=[test_fails_when_creating_user_twice]) def test_delete_users(self): self.dbaas.users.delete(instance_info.id, self.username, hostname='%') assert_equal(202, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, self.username1, hostname='%') assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) self._check_connection(self.username, self.password) self._check_connection(self.username1, self.password1) @test(depends_on=[test_create_users_list, test_delete_users]) def test_hostnames_default_if_not_present(self): # These tests rely on test_delete_users as they create users only # they use. username = "testuser_nohost" user = {"name": username, "password": "password", "databases": []} self.dbaas.users.create(instance_info.id, [user]) user["host"] = "%" # Can't create the user a second time if it already exists. assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, [user]) self.dbaas.users.delete(instance_info.id, username) @test(depends_on=[test_create_users_list, test_delete_users]) def test_hostnames_make_users_unique(self): # These tests rely on test_delete_users as they create users only # they use. username = "testuser_unique" hostnames = ["192.168.0.1", "192.168.0.2"] users = [{"name": username, "password": "password", "databases": [], "host": hostname} for hostname in hostnames] # Nothing wrong with creating two users with the same name, so long # as their hosts are different. self.dbaas.users.create(instance_info.id, users) for hostname in hostnames: self.dbaas.users.delete(instance_info.id, username, hostname=hostname) @test() def test_updateduser_newname_host_unique(self): # The updated_username@hostname should not exist already users = [] old_name = "testuser1" hostname = "192.168.0.1" users.append({"name": old_name, "password": "password", "host": hostname, "databases": []}) users.append({"name": "testuser2", "password": "password", "host": hostname, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"name": "testuser2"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, old_name, user_new, hostname) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, old_name, hostname=hostname) self.dbaas.users.delete(instance_info.id, "testuser2", hostname=hostname) @test() def test_updateduser_name_newhost_unique(self): # The username@updated_hostname should not exist already users = [] username = "testuser" hostname1 = "192.168.0.1" hostname2 = "192.168.0.2" users.append({"name": username, "password": "password", "host": hostname1, "databases": []}) users.append({"name": username, "password": "password", "host": hostname2, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"host": "192.168.0.2"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, user_new, hostname1) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname1) self.dbaas.users.delete(instance_info.id, username, hostname=hostname2) @test() def test_updateduser_newname_newhost_unique(self): # The updated_username@updated_hostname should not exist already users = [] username = "testuser1" hostname1 = "192.168.0.1" hostname2 = "192.168.0.2" users.append({"name": username, "password": "password", "host": hostname1, "databases": []}) users.append({"name": "testuser2", "password": "password", "host": hostname2, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"name": "testuser2", "host": "192.168.0.2"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, user_new, hostname1) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname1) self.dbaas.users.delete(instance_info.id, "testuser2", hostname=hostname2) @test() def test_updateduser_newhost_invalid(self): # Ensure invalid hostnames/usernames aren't allowed to enter the system users = [] username = "testuser1" hostname1 = "192.168.0.1" users.append({"name": username, "password": "password", "host": hostname1, "databases": []}) self.dbaas.users.create(instance_info.id, users) hostname1 = hostname1.replace('.', '%2e') assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, {"host": "badjuju"}, hostname1) assert_equal(400, self.dbaas.last_http_code) assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, {"name": " bad username "}, hostname1) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname1) @test() def test_cannot_change_rootpassword(self): # Cannot change password for a root user user_new = {"password": "12345"} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, "root", user_new) @test() def test_updateuser_emptyhost(self): # Cannot update the user hostname with an empty string users = [] username = "testuser1" hostname = "192.168.0.1" users.append({"name": username, "password": "password", "host": hostname, "databases": []}) self.dbaas.users.create(instance_info.id, users) user_new = {"host": ""} assert_raises(exceptions.BadRequest, self.dbaas.users.update_attributes, instance_info.id, username, user_new, hostname) assert_equal(400, self.dbaas.last_http_code) self.dbaas.users.delete(instance_info.id, username, hostname=hostname) @test(depends_on=[test_create_users]) def test_hostname_ipv4_restriction(self): # By default, user hostnames are required to be % or IPv4 addresses. user = {"name": "ipv4_nodice", "password": "password", "databases": [], "host": "disallowed_host"} assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, [user]) def show_databases(self, user, password): print("Going to connect to %s, %s, %s" % (instance_info.get_address(), user, password)) with util.mysql_connection().create(instance_info.get_address(), user, password) as db: print(db) dbs = db.execute("show databases") return [row['Database'] for row in dbs] def check_database_for_user(self, user, password, dbs): if not FAKE: # Make the real call to the database to check things. actual_list = self.show_databases(user, password) for db in dbs: assert_true( db in actual_list, "No match for db %s in dblist. %s :(" % (db, actual_list)) # Confirm via API list. result = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) for item in result: if item.name == user: break else: fail("User %s not added to collection." % user) # Confirm via API get. result = self.dbaas.users.get(instance_info.id, user, '%') assert_equal(200, self.dbaas.last_http_code) if result.name != user: fail("User %s not found via get." % user) @test def test_username_too_long(self): users = [{"name": "1233asdwer345tyg56", "password": self.password, "database": self.db1}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test def test_invalid_username(self): users = [] users.append({"name": "user,", "password": self.password, "database": self.db1}) assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test def test_invalid_password(self): users = [{"name": "anouser", "password": "sdf,;", "database": self.db1}] assert_raises(exceptions.BadRequest, self.dbaas.users.create, instance_info.id, users) assert_equal(400, self.dbaas.last_http_code) @test def test_pagination(self): users = [] users.append({"name": "Jetson", "password": "george", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Jetson", "password": "george", "host": "127.0.0.1", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Spacely", "password": "cosmo", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Spacely", "password": "cosmo", "host": "127.0.0.1", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Uniblab", "password": "fired", "databases": [{"name": "Sprockets"}]}) users.append({"name": "Uniblab", "password": "fired", "host": "192.168.0.10", "databases": [{"name": "Sprockets"}]}) self.dbaas.users.create(instance_info.id, users) assert_equal(202, self.dbaas.last_http_code) if not FAKE: time.sleep(5) limit = 2 users = self.dbaas.users.list(instance_info.id, limit=limit) assert_equal(200, self.dbaas.last_http_code) marker = users.next # Better get only as many as we asked for assert_true(len(users) <= limit) assert_true(users.next is not None) expected_marker = "%s@%s" % (users[-1].name, users[-1].host) expected_marker = urllib_parse.quote(expected_marker) assert_equal(marker, expected_marker) marker = users.next # I better get new users if I use the marker I was handed. users = self.dbaas.users.list(instance_info.id, limit=limit, marker=marker) assert_equal(200, self.dbaas.last_http_code) assert_true(marker not in [user.name for user in users]) # Now fetch again with a larger limit. users = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) assert_true(users.next is None) def _check_connection(self, username, password): if not FAKE: util.mysql_connection().assert_fails(instance_info.get_address(), username, password) # Also determine the db is gone via API. result = self.dbaas.users.list(instance_info.id) assert_equal(200, self.dbaas.last_http_code) for item in result: if item.name == username: fail("User %s was not deleted." % username) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/api/versions.py0000644000175000017500000000622300000000000021446 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis.asserts import assert_equal from proboscis import before_class from proboscis import SkipTest from proboscis import test from troveclient.compat.exceptions import ClientException from trove import tests from trove.tests.util import create_dbaas_client from trove.tests.util import test_config from trove.tests.util.users import Requirements @test(groups=[tests.DBAAS_API_VERSIONS]) class Versions(object): """Test listing all versions and verify the current version.""" @before_class def setUp(self): """Sets up the client.""" user = test_config.users.find_user(Requirements(is_admin=False)) self.client = create_dbaas_client(user) @test def test_list_versions_index(self): """test_list_versions_index""" versions = self.client.versions.index(test_config.version_url) assert_equal(1, len(versions)) assert_equal("CURRENT", versions[0].status, message="Version status: %s" % versions[0].status) expected_version = test_config.values['trove_version'] assert_equal(expected_version, versions[0].id, message="Version ID: %s" % versions[0].id) expected_api_updated = test_config.values['trove_api_updated'] assert_equal(expected_api_updated, versions[0].updated, message="Version updated: %s" % versions[0].updated) def _request(self, url, method='GET', response='200'): resp, body = None, None full_url = test_config.version_url + url try: resp, body = self.client.client.request(full_url, method) assert_equal(resp.get('status', ''), response) except ClientException as ce: assert_equal(str(ce.http_status), response) return body @test def test_no_slash_no_version(self): self._request('') @test def test_no_slash_with_version(self): if test_config.auth_strategy == "fake": raise SkipTest("Skipping this test since auth is faked.") self._request('/v1.0', response='401') @test def test_with_slash_no_version(self): self._request('/') @test def test_with_slash_with_version(self): if test_config.auth_strategy == "fake": raise SkipTest("Skipping this test since auth is faked.") self._request('/v1.0/', response='401') @test def test_request_no_version(self): self._request('/dbaas/instances', response='404') @test def test_request_bogus_version(self): self._request('/0.0/', response='404') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/config.py0000644000175000017500000001602600000000000020274 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles configuration options for the tests. The tests are capable of running in other contexts, such as in a VM or against a real deployment. Using this configuration ensures we can run them in other environments if we choose to. """ from collections import Mapping from datetime import datetime import json import os # TODO(tim.simpson): I feel like this class already exists somewhere in core # Python. class FrozenDict(Mapping): def __init__(self, original): self.original = original def __len__(self): return self.original.__len__() def __iter__(self, *args, **kwargs): return self.original.__iter__(self, *args, **kwargs) def __getitem__(self, *args, **kwargs): return self.original.__getitem__(*args, **kwargs) def __str__(self): return self.original.__str__() USAGE_ENDPOINT = os.environ.get("USAGE_ENDPOINT", "trove.tests.util.usage.UsageVerifier") class TestConfig(object): """ Holds test configuration values which can be accessed as attributes or using the values dictionary. """ def __init__(self): """ Create TestConfig, and set default values. These will be overwritten by the "load_from" methods below. """ self._loaded_files = [] self._values = { 'clean_slate': os.environ.get("CLEAN_SLATE", "False") == "True", 'fake_mode': os.environ.get("FAKE_MODE", "False") == "True", 'nova_auth_url': "http://localhost/identity/v2.0", 'trove_auth_url': "http://localhost/identity/v2.0/tokens", 'dbaas_url': "http://localhost:8775/v1.0/dbaas", 'version_url': "http://localhost:8775/", 'nova_url': "http://localhost:8774/v2", 'swift_url': "http://localhost:8080/v1/AUTH_", 'dbaas_datastore': "mysql", 'dbaas_datastore_id': "a00000a0-00a0-0a00-00a0-000a000000aa", 'dbaas_datastore_name_no_versions': "Test_Datastore_1", 'dbaas_datastore_version': "5.5", 'dbaas_datastore_version_id': "b00000b0-00b0-0b00-00b0-" "000b000000bb", 'instance_create_time': 16 * 60, 'mysql_connection_method': {"type": "direct"}, 'typical_nova_image_name': None, 'white_box': os.environ.get("WHITE_BOX", "False") == "True", 'test_mgmt': False, 'use_local_ovz': False, "known_bugs": {}, "in_proc_server": True, "report_directory": os.environ.get("REPORT_DIRECTORY", None), "trove_volume_support": True, "trove_volume_size": 1, "trove_max_volumes_per_tenant": 100, "trove_max_instances_per_tenant": 55, "usage_endpoint": USAGE_ENDPOINT, "root_on_create": False, "mysql": { "configurations": { "valid_values": { "connect_timeout": 120, "local_infile": 0, "collation_server": "latin1_swedish_ci" }, "appending_values": { "join_buffer_size": 1048576, "connect_timeout": 15 }, "nondynamic_parameter": { "join_buffer_size": 1048576, "innodb_buffer_pool_size": 57671680 }, "out_of_bounds_under": { "connect_timeout": -10 }, "out_of_bounds_over": { "connect_timeout": 1000000 }, "parameters_list": [ "key_buffer_size", "connect_timeout" ] }, "volume_support": True, }, "redis": {"volume_support": False}, "swift_enabled": True, "trove_mgmt_network": "trove-mgmt", "running_status": ["ACTIVE", "HEALTHY"], } self._frozen_values = FrozenDict(self._values) self._users = None def get(self, name, default_value): return self.values.get(name, default_value) def get_report(self): return PrintReporter() def load_from_line(self, line): index = line.find("=") if index >= 0: key = line[:index] value = line[index + 1:] self._values[key] = value def load_include_files(self, original_file, files): directory = os.path.dirname(original_file) for file_sub_path in files: file_full_path = os.path.join(directory, file_sub_path) self.load_from_file(file_full_path) def load_from_file(self, file_path): if file_path in self._loaded_files: return file_contents = open(file_path, "r").read() try: contents = json.loads(file_contents) except Exception as exception: raise RuntimeError("Error loading conf file \"%s\"." % file_path, exception) finally: self._loaded_files.append(file_path) if "include-files" in contents: self.load_include_files(file_path, contents['include-files']) del contents['include-files'] self._values.update(contents) def __getattr__(self, name): if name not in self._values: raise AttributeError('Configuration value "%s" not found.' % name) else: return self._values[name] def python_cmd_list(self): """The start of a command list to use when running Python scripts.""" commands = [] if self.use_venv: commands.append("%s/tools/with_venv.sh" % self.nova_code_root) return list commands.append("python") return commands @property def users(self): if self._users is None: from trove.tests.util.users import Users self._users = Users(self.values['users']) return self._users @property def values(self): return self._frozen_values class PrintReporter(object): def log(self, msg): print("%s [REPORT] %s" % (str(datetime.now()), msg)) def update(self): pass # Ignore. This is used in other reporters. CONFIG = TestConfig() del TestConfig.__init__ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7801108 trove-12.1.0.dev92/trove/tests/db/0000755000175000017500000000000000000000000017035 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/db/__init__.py0000644000175000017500000000000000000000000021134 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/db/migrations.py0000644000175000017500000001562000000000000021567 0ustar00coreycorey00000000000000# Copyright 2014 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests database migration scripts for mysql. To run the tests, you'll need to set up db user named 'openstack_citest' with password 'openstack_citest' on localhost. This user needs db admin rights (i.e. create/drop database) """ import glob import os import migrate.versioning.api as migration_api from migrate.versioning import repository from oslo_concurrency import processutils from oslo_log import log as logging from proboscis import after_class from proboscis.asserts import assert_equal from proboscis.asserts import assert_true from proboscis import before_class from proboscis import SkipTest from proboscis import test import sqlalchemy import sqlalchemy.exc from trove.common.i18n import _ import trove.db.sqlalchemy.migrate_repo from trove.tests.util import event_simulator GROUP = "dbaas.db.migrations" LOG = logging.getLogger(__name__) @test(groups=[GROUP]) class ProjectTestCase(object): """Test migration scripts integrity.""" @test def test_all_migrations_have_downgrade(self): topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)) py_glob = os.path.join(topdir, "trove", "db", "sqlalchemy", "migrate_repo", "versions", "*.py") downgrades_found = [] for path in glob.iglob(py_glob): has_downgrade = False with open(path, "r") as f: for line in f: if 'def downgrade(' in line: has_downgrade = True if has_downgrade: fname = os.path.basename(path) downgrades_found.append(fname) helpful_msg = (_("The following migration scripts have a " "downgrade implementation:\n\t%s") % '\n\t'.join(sorted(downgrades_found))) assert_equal(downgrades_found, [], helpful_msg) @test(depends_on_classes=[ProjectTestCase], groups=[GROUP]) class TestTroveMigrations(object): """Test sqlalchemy-migrate migrations.""" USER = "openstack_citest" PASSWD = "openstack_citest" DATABASE = "openstack_citest" @before_class def setUp(self): event_simulator.allowable_empty_sleeps = 1 @after_class def tearDown(self): event_simulator.allowable_empty_sleeps = 0 def __init__(self): self.MIGRATE_FILE = trove.db.sqlalchemy.migrate_repo.__file__ self.REPOSITORY = repository.Repository( os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) self.INIT_VERSION = 0 def _get_connect_string(self, backend, database=None): """Get database connection string.""" args = {'backend': backend, 'user': self.USER, 'passwd': self.PASSWD} template = "%(backend)s://%(user)s:%(passwd)s@localhost" if database is not None: args['database'] = database template += "/%(database)s" return template % args def _is_backend_avail(self, backend): """Check database backend availability.""" connect_uri = self._get_connect_string(backend) engine = sqlalchemy.create_engine(connect_uri) try: connection = engine.connect() except Exception: # any error here means the database backend is not available return False else: connection.close() return True finally: if engine is not None: engine.dispose() def _execute_cmd(self, cmd=None): """Shell out and run the given command.""" out, err = processutils.trycmd(cmd, shell=True) # Until someone wants to rewrite this to avoid the warning # we need to handle it for newer versions of mysql valid_err = err == '' or \ err == 'mysql: [Warning] Using a password on the ' \ 'command line interface can be insecure.\n' assert_true(valid_err, "Failed to run: '%(cmd)s' " "Output: '%(stdout)s' " "Error: '%(stderr)s'" % {'cmd': cmd, 'stdout': out, 'stderr': err}) def _reset_mysql(self): """Reset the MySQL test database Drop the MySQL test database if it already exists and create a new one. """ sql = ("drop database if exists %(database)s; " "create database %(database)s;" % {'database': self.DATABASE}) cmd = ("mysql -u \"%(user)s\" -p%(password)s -h %(host)s " "-e \"%(sql)s\"" % {'user': self.USER, 'password': self.PASSWD, 'host': 'localhost', 'sql': sql}) self._execute_cmd(cmd) @test def test_mysql_migration(self): db_backend = "mysql+pymysql" # Gracefully skip this test if the developer do not have # MySQL running. MySQL should always be available on # the infrastructure if not self._is_backend_avail(db_backend): raise SkipTest("MySQL is not available.") self._reset_mysql() connect_string = self._get_connect_string(db_backend, self.DATABASE) engine = sqlalchemy.create_engine(connect_string) self._walk_versions(engine) engine.dispose() def _walk_versions(self, engine=None): """Walk through and test the migration scripts Determine latest version script from the repo, then upgrade from 1 through to the latest. """ # Place the database under version control migration_api.version_control(engine, self.REPOSITORY, self.INIT_VERSION) assert_equal(self.INIT_VERSION, migration_api.db_version(engine, self.REPOSITORY)) LOG.debug('Latest version is %s', self.REPOSITORY.latest) versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) # Walk from version 1 to the latest, testing the upgrade paths. for version in versions: self._migrate_up(engine, version) def _migrate_up(self, engine, version): """Migrate up to a new version of database.""" migration_api.upgrade(engine, self.REPOSITORY, version) assert_equal(version, migration_api.db_version(engine, self.REPOSITORY)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7801108 trove-12.1.0.dev92/trove/tests/examples/0000755000175000017500000000000000000000000020266 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/examples/__init__.py0000644000175000017500000000000000000000000022365 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/examples/client.py0000644000175000017500000002707100000000000022125 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import re import time from proboscis.asserts import fail from six.moves.urllib.parse import urlparse from troveclient.compat.client import TroveHTTPClient from trove.tests.config import CONFIG print_req = True def shorten_url(url): parsed = urlparse(url) if parsed.query: method_url = parsed.path + '?' + parsed.query else: method_url = parsed.path return method_url class SnippetWriter(object): def __init__(self, conf, get_replace_list): self.conf = conf self.get_replace_list = get_replace_list def output_request(self, user_details, name, url, output_headers, body, content_type, method, static_auth_token=True): headers = [] parsed = urlparse(url) method_url = shorten_url(url) headers.append("%s %s HTTP/1.1" % (method, method_url)) headers.append("User-Agent: %s" % output_headers['User-Agent']) headers.append("Host: %s" % parsed.netloc) # static_auth_token option for documentation purposes if static_auth_token: output_token = '87c6033c-9ff6-405f-943e-2deb73f278b7' else: output_token = output_headers['X-Auth-Token'] headers.append("X-Auth-Token: %s" % output_token) headers.append("Accept: %s" % output_headers['Accept']) print("OUTPUT HEADERS: %s" % output_headers) headers.append("Content-Type: %s" % output_headers['Content-Type']) self.write_file(user_details, name, "-%s-http.txt" % content_type, url, method, "request", output='\n'.join(headers)) pretty_body = self.format_body(body, content_type) self.write_file(user_details, name, ".%s" % content_type, url, method, "request", output=pretty_body) def output_response(self, user_details, name, content_type, url, method, resp, body): version = "1.1" # if resp.version == 11 else "1.0" lines = [ ["HTTP/%s %s %s" % (version, resp.status, resp.reason)], ["Content-Type: %s" % resp['content-type']], ] if 'via' in resp: lines.append(["Via: %s" % resp['via']]) lines.append(["Content-Length: %s" % resp['content-length']]) lines.append(["Date: Mon, 18 Mar 2013 19:09:17 GMT"]) if 'server' in resp: lines.append(["Server: %s" % resp["server"]]) new_lines = [x[0] for x in lines] joined_lines = '\n'.join(new_lines) self.write_file(user_details, name, "-%s-http.txt" % content_type, url, method, "response", output=joined_lines) if body: pretty_body = self.format_body(body, content_type) self.write_file(user_details, name, ".%s" % content_type, url, method, "response", output=pretty_body) def format_body(self, body, content_type): assert content_type == 'json' try: if self.conf['replace_dns_hostname']: before = r'\"hostname\": \"[a-zA-Z0-9-_\.]*\"' after = '\"hostname\": \"%s\"' % self.conf[ 'replace_dns_hostname'] body = re.sub(before, after, body) return json.dumps(json.loads(body), sort_keys=True, indent=4) except Exception: return body or '' def write_request_file(self, user_details, name, content_type, url, method, req_headers, request_body): if print_req: print("\t%s req url:%s" % (content_type, url)) print("\t%s req method:%s" % (content_type, method)) print("\t%s req headers:%s" % (content_type, req_headers)) print("\t%s req body:%s" % (content_type, request_body)) self.output_request(user_details, name, url, req_headers, request_body, content_type, method) def write_response_file(self, user_details, name, content_type, url, method, resp, resp_content): if print_req: print("\t%s resp:%s" % (content_type, resp)) print("\t%s resp content:%s" % (content_type, resp_content)) self.output_response(user_details, name, content_type, url, method, resp, resp_content) def write_file(self, user_details, name, content_type, url, method, in_or_out, output): output = output.replace(user_details['tenant'], '1234') if self.conf['replace_host']: output = output.replace(user_details['api_url'], self.conf['replace_host']) pre_host_port = urlparse(user_details['service_url']).netloc post_host = urlparse(self.conf['replace_host']).netloc output = output.replace(pre_host_port, post_host) output = output.replace("fake_host", "hostname") output = output.replace("FAKE_", "") for resource in self.get_replace_list(): output = output.replace(str(resource[0]), str(resource[1])) filename = "%s/db-%s-%s%s" % (self.conf['directory'], name.replace('_', '-'), in_or_out, content_type) self._write_file(filename, output) def _write_file(self, filename, output): empty = len(output.strip()) == 0 # Manipulate actual data to appease doc niceness checks actual = [line.rstrip() for line in output.split("\n")] if not empty and actual[len(actual) - 1] != '': actual.append("") def goofy_diff(a, b): diff = [] for i in range(len(a)): if i < len(b): if a[i].rstrip() != b[i].rstrip(): diff.append('Expected line %d :%s\n' ' Actual line %d :%s' % (i + 1, a[i], i + 1, b[i])) else: diff.append("Expected line %d :%s" % (i + 1, a[i])) for j in range(len(b) - len(a)): i2 = len(a) + j diff.append(" Actual line %d :%s" % (i2 + 1, b[i2])) return diff def write_actual_file(): # Always write the file. with open(filename, "w") as file: for line in actual: file.write("%s\n" % line) def assert_output_matches(): if os.path.isfile(filename): with open(filename, 'r') as original_file: original = original_file.read() if empty: fail('Error: output missing in new snippet generation ' 'for %s. Old content follows:\n"""%s"""' % (filename, original)) elif filename.endswith('.json'): assert_json_matches(original) else: assert_file_matches(original) elif not empty: fail('Error: new file necessary where there was no file ' 'before. Filename=%s\nContent follows:\n"""%s"""' % (filename, output)) def assert_file_matches(original): expected = original.split('\n') # Remove the last item which will look like a duplicated # file ending newline expected.pop() diff = '\n'.join(goofy_diff(expected, actual)) if diff: fail('Error: output files differ for %s:\n%s' % (filename, diff)) def order_json(json_obj): """Sort the json object so that it can be compared properly.""" if isinstance(json_obj, list): return sorted(order_json(elem) for elem in json_obj) if isinstance(json_obj, dict): return sorted( (key, order_json(value)) for key, value in json_obj.items()) else: return json_obj def assert_json_matches(original): try: expected_json = json.loads(original) actual_json = json.loads(output) except ValueError: fail('Invalid json!\nExpected: %s\nActual: %s' % (original, output)) if order_json(expected_json) != order_json(actual_json): # Re-Use the same failure output if the json is different assert_file_matches(original) if not os.environ.get('TESTS_FIX_EXAMPLES'): assert_output_matches() elif not empty: write_actual_file() # This method is mixed into the client class. # It requires the following fields: snippet_writer, content_type, and # "name," the last of which must be set before each call. def write_to_snippet(self, args, kwargs, resp, body): if self.name is None: raise RuntimeError("'name' not set before call.") url = args[0] method = args[1] request_headers = kwargs['headers'] request_body = kwargs.get('body', None) response_headers = resp response_body = body # Log request user_details = { 'api_url': self.service_url, 'service_url': self.service_url, 'tenant': self.tenant, } self.snippet_writer.write_request_file(user_details, self.name, self.content_type, url, method, request_headers, request_body) self.snippet_writer.write_response_file(user_details, self.name, self.content_type, url, method, response_headers, response_body) # Create a short url to assert against. short_url = url base_url = self.service_url for prefix in (base_url): if short_url.startswith(prefix): short_url = short_url[len(prefix):] self.old_info = { 'url': shorten_url(short_url), 'method': method, 'request_headers': request_headers, 'request_body': request_body, 'response_headers': response_headers, 'response_body': response_body } def add_fake_response_headers(headers): """ Fakes other items that would appear if you were using, just to make up an example, a proxy. """ conf = CONFIG.examples if 'via' in conf and 'via' not in headers: headers['via'] = conf['via'] if 'server' in conf and 'server' not in headers: headers['server'] = conf['server'] if 'date' not in headers: date_string = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) headers['date'] = date_string class JsonClient(TroveHTTPClient): content_type = 'json' def http_log(self, args, kwargs, resp, body): add_fake_response_headers(resp) self.pretty_log(args, kwargs, resp, body) def write_snippet(): return write_to_snippet(self, args, kwargs, resp, body.decode()) self.write_snippet = write_snippet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/examples/snippets.py0000644000175000017500000013000400000000000022503 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import json import time from oslo_log import log as logging from proboscis import asserts from proboscis.asserts import assert_equal from proboscis.asserts import assert_true from proboscis.asserts import Check from proboscis import before_class from proboscis import SkipTest from proboscis import test from proboscis import TestProgram import six from troveclient.compat import client as trove_client from troveclient.compat import Dbaas from troveclient.compat import TroveHTTPClient from trove.tests.config import CONFIG from trove.tests.examples.client import JsonClient from trove.tests.examples.client import SnippetWriter # troveclient.compat.client._logger was changed to LOG in 2.11.0 if hasattr(trove_client, '_logger'): trove_client._logger.setLevel(logging.CRITICAL) elif hasattr(trove_client, 'LOG'): trove_client.LOG.setLevel(logging.CRITICAL) FAKE_INFO = {'m': 30, 's': 0, 'uuid': 'abcdef00-aaaa-aaaa-aaaa-bbbbbbbbbbbb'} EXAMPLE_BACKUP_ID = "a9832168-7541-4536-b8d9-a8a9b79cf1b4" EXAMPLE_BACKUP_INCREMENTAL_ID = "2e351a71-dd28-4bcb-a7d6-d36a5b487173" EXAMPLE_CONFIG_ID = "43a6ea86-e959-4735-9e46-a6a5d4a2d80f" EXAMPLE_INSTANCE_ID = "44b277eb-39be-4921-be31-3d61b43651d7" EXAMPLE_INSTANCE_ID_2 = "d5a9db64-7ef7-41c5-8e1e-4013166874bc" EXAMPLE_CONFIG_SERVER_ID = "271898715" def get_now(): from datetime import datetime return datetime(2014, 10, 30, hour=12, minute=FAKE_INFO['m'], second=FAKE_INFO['s']) def get_uuid(): return FAKE_INFO['uuid'] def set_fake_stuff(uuid=None, minute=None, unique_id=None): if uuid: FAKE_INFO['uuid'] = uuid if minute: FAKE_INFO['minute'] = minute if unique_id: from trove.common.template import SingleInstanceConfigTemplate def fake_calc_id(self): return unique_id SingleInstanceConfigTemplate._calculate_unique_id = fake_calc_id def monkey_patch_uuid_and_date(): import uuid uuid.uuid4 = get_uuid from trove.common import timeutils from trove.common import utils timeutils.utcnow = get_now utils.generate_uuid = get_uuid @test def load_config_file(): global conf if CONFIG.get("examples", None) is None: fail("Missing 'examples' config in test config.") conf = CONFIG.examples global normal_user normal_user = CONFIG.users.find_user_by_name(conf['normal_user_name']) global admin_user admin_user = CONFIG.users.find_user_by_name(conf['admin_user_name']) def create_client_args(user): auth_strategy = None kwargs = { 'service_type': 'trove', 'insecure': CONFIG.values['trove_client_insecure'], } def set_optional(kwargs_name, test_conf_name): value = CONFIG.values.get(test_conf_name, None) if value is not None: kwargs[kwargs_name] = value service_url = CONFIG.get('override_trove_api_url', None) if user.requirements.is_admin: service_url = CONFIG.get('override_admin_trove_api_url', service_url) if service_url: kwargs['service_url'] = service_url auth_strategy = None if user.requirements.is_admin: auth_strategy = CONFIG.get('admin_auth_strategy', CONFIG.auth_strategy) else: auth_strategy = CONFIG.auth_strategy set_optional('region_name', 'trove_client_region_name') if CONFIG.values.get('override_trove_api_url_append_tenant', False): kwargs['service_url'] += "/" + user.tenant if auth_strategy == 'fake': from troveclient.compat import auth class FakeAuth(auth.Authenticator): def authenticate(self): class FakeCatalog(object): def __init__(self, auth): self.auth = auth def get_public_url(self): return "%s/%s" % (CONFIG.dbaas_url, self.auth.tenant) def get_token(self): return self.auth.tenant return FakeCatalog(self) auth_strategy = FakeAuth if auth_strategy: kwargs['auth_strategy'] = auth_strategy if not user.requirements.is_admin: auth_url = CONFIG.trove_auth_url else: auth_url = CONFIG.values.get('trove_admin_auth_url', CONFIG.trove_auth_url) if CONFIG.values.get('trove_client_cls'): cls_name = CONFIG.trove_client_cls kwargs['client_cls'] = import_class(cls_name) kwargs['tenant'] = user.tenant kwargs['auth_url'] = auth_url return (user.auth_user, user.auth_key), kwargs def create_client(cls, user): args, kwargs = create_client_args(user) kwargs['client_cls'] = cls client = Dbaas(*args, **kwargs) return client def make_client(user): args, kwargs = create_client_args(user) kwargs['client_cls'] = JsonClient client = Dbaas(*args, **kwargs) client.client.name = "auth" client.authenticate() return client def write_snippet(get_replace_list, client, name, url, method, status, reason, func, *func_args): """ 'name' is the name of the file, while 'url,' 'method,' 'status,' and 'reason' are expected values that are asserted against. If func_args is present, it is a list of lists, each one of which is passed as the *args to the two invocations of "func". """ func_args = func_args or [] snippet_writer = SnippetWriter(conf, get_replace_list) results = [] client.client.snippet_writer = snippet_writer client.client.name = name args = func_args result = func(client, *args) # Now write the snippet (if this happens earlier we can't replace # data such as the instance ID). client.client.write_snippet() with Check() as check: check.equal(client.client.old_info['url'], url) check.equal(client.client.old_info['method'], method) check.equal(client.client.old_info['response_headers'].status, status) check.equal(client.client.old_info['response_headers'].reason, reason) results.append(result) # To prevent this from writing a snippet somewhere else... client.client.name = "junk" return results JSON_INDEX = 0 class Example(object): @classmethod def get_replace_list(cls): return [] def snippet(self, *args, **kwargs): return write_snippet(self.get_replace_list, self.client, *args, **kwargs) @test(depends_on=[load_config_file], enabled=False) class Versions(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_versions(self): self.snippet( "versions", "", "GET", 200, "OK", lambda client: client.versions.index(conf['version_url'])) @test def get_version(self): def version_call(client): return client.versions.index(conf['version_url'] + "/v1.0/") self.snippet("versions", "/v1.0", "GET", 200, "OK", get_version) @test(depends_on=[load_config_file]) class Flavors(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_flavors(self): self.snippet( "flavors", "/flavors", "GET", 200, "OK", lambda client: client.flavors.list()) @test def get_flavor_by_id(self): self.snippet( "flavors_by_id", "/flavors/1", "GET", 200, "OK", lambda client: client.flavors.get(1)) @test(depends_on=[load_config_file]) def clean_slate(): client = create_client(TroveHTTPClient, admin_user) client.client.name = "list" instances = client.instances.list() assert_equal(0, len(instances), "Instance count must be zero.") @test(depends_on=[clean_slate]) class CreateInstance(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_create_instance(self): set_fake_stuff(uuid=EXAMPLE_INSTANCE_ID) def create_instance(client, name): instance = client.instances.create( name, 1, volume={'size': 2}, databases=[ { "name": "sampledb", "character_set": "utf8", "collate": "utf8_general_ci" }, { "name": "nextround" } ], users=[ { "databases": [{"name": "sampledb"}], "name": "demouser", "password": "demopassword" } ]) assert_equal(instance.status, "BUILD") return instance self.instances = self.snippet( "create_instance", "/instances", "POST", 200, "OK", create_instance, "json_rack_instance") def an_instance_is_not_active(self): for instance in self.instances: instance = self.client.instances.get(instance.id) if instance.status not in CONFIG.running_status: assert_equal(instance.status, "BUILD") return True return False @test(depends_on=[post_create_instance]) def wait_for_instances(self): while self.an_instance_is_not_active(): time.sleep(1) global json_instance json_instance = self.instances[0] @test(depends_on=[CreateInstance], groups=['uses_instances']) class Databases(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_create_databases(self): self.snippet( "create_databases", "/instances/%s/databases" % json_instance.id, "POST", 202, "Accepted", lambda client: client.databases.create( json_instance.id, databases=[ { "name": "testingdb", "character_set": "utf8", "collate": "utf8_general_ci" }, { "name": "anotherdb" }, { "name": "oneMoreDB" }])) @test(depends_on=[post_create_databases]) def get_list_databases(self): self.snippet( "list_databases", "/instances/%s/databases" % json_instance.id, "GET", 200, "OK", lambda client: client.databases.list(json_instance.id)) @test(depends_on=[post_create_databases]) def get_list_databases_limit_two(self): results = self.snippet( "list_databases_pagination", "/instances/%s/databases?limit=1" % json_instance.id, "GET", 200, "OK", lambda client: client.databases.list(json_instance.id, limit=1)) assert_equal(1, len(results[JSON_INDEX])) assert_equal("anotherdb", results[JSON_INDEX].next) @test(depends_on=[post_create_databases], runs_after=[get_list_databases, get_list_databases_limit_two]) def delete_databases(self): self.snippet( "delete_databases", "/instances/%s/databases/testingdb" % json_instance.id, "DELETE", 202, "Accepted", lambda client: client.databases.delete(json_instance.id, 'testingdb')) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Users(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_create_users(self): self.snippet( "create_users", "/instances/%s/users" % json_instance.id, "POST", 202, "Accepted", lambda client: client.users.create( json_instance.id, [{ "name": "dbuser1", "password": "password", "databases": [ { "name": "databaseA" } ] }, { "name": "dbuser2", "password": "password", "databases": [ { "name": "databaseB" }, { "name": "databaseC" } ] }, { "name": "dbuser3", "password": "password", "databases": [ { "name": "databaseD" } ] }])) @test(depends_on=[post_create_users]) def get_list_users(self): self.snippet( "list_users", "/instances/%s/users" % json_instance.id, "GET", 200, "OK", lambda client: client.users.list(json_instance.id)) @test(depends_on=[post_create_users]) def get_list_users_limit_two(self): self.snippet( "list_users_pagination", "/instances/%s/users?limit=2" % json_instance.id, "GET", 200, "OK", lambda client: client.users.list(json_instance.id, limit=2)) @test(depends_on=[post_create_users], runs_after=[get_list_users, get_list_users_limit_two]) def delete_users(self): user_name = "demouser" self.snippet( "delete_users", "/instances/%s/users/%s" % (json_instance.id, user_name), "DELETE", 202, "Accepted", lambda client: client.users.delete(json_instance.id, username=user_name)) @test(depends_on=[post_create_users]) def modify_user_attributes(self): old_user_name = "dbuser1" self.snippet( "change_user_attributes", "/instances/%s/users/%s" % (json_instance.id, old_user_name), "PUT", 202, "Accepted", lambda client: client.users.update_attributes( json_instance.id, username=old_user_name, newuserattr={ "name": "new_username", "password": "new_password" } ) ) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Root(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def post_enable_root_access(self): self.snippet( "enable_root_user", "/instances/%s/root" % json_instance.id, "POST", 200, "OK", lambda client: client.root.create(json_instance.id)) @test(depends_on=[post_enable_root_access]) def get_check_root_access(self): results = self.snippet( "check_root_user", "/instances/%s/root" % json_instance.id, "GET", 200, "OK", lambda client: client.root.is_root_enabled(json_instance.id)) assert_equal(results[JSON_INDEX].rootEnabled, True) @test(depends_on=[get_check_root_access]) def delete_disable_root_access(self): self.snippet( "disable_root_user", "/instances/%s/root" % json_instance.id, "DELETE", 204, "No Content", lambda client: client.root.delete(json_instance.id)) # restore root for subsequent tests self.post_enable_root_access() class ActiveMixin(Example): """Adds a method to wait for instance status to become ACTIVE.""" def _wait_for_active(self, *acceptable_states): global json_instance json_instance = self.client.instances.get(json_instance.id) while json_instance.status not in CONFIG.running_status: assert_true( json_instance.status in acceptable_states, "Instance status == %s; expected it to be one of: %s" % (json_instance.status, acceptable_states)) time.sleep(0.1) json_instance = self.client.instances.get(json_instance.id) def _wait_for_restore_active(self, *acceptable_states): for instance in (self.json_restore, ): instance = self.client.instances.get(instance.id) while instance.status not in CONFIG.running_status: assert_true( instance.status in acceptable_states, "Instance status == %s; expected it to be one of: %s" % (instance.status, acceptable_states)) time.sleep(0.1) instance = self.client.instances.get(instance.id) STATE = { "CONFIGURATION": None, "DATASTORE_ID": None, "DATASTORE_VERSION_ID": None, } @test(depends_on=[CreateInstance], groups=['uses_instances']) class Datastores(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_datastores_list(self): self.datastores = self.snippet( "datastores_list", "/datastores", "GET", 200, "OK", lambda client: client.datastores.list()) for result in self.datastores: assert_equal(1, len(result)) @test(depends_on=[get_datastores_list]) def get_datastore_by_id(self): ds, = self.datastores mysql_ds = [x for x in ds if x.name == 'mysql'] if not mysql_ds: fail('no mysql datastore found in list') ds_id = STATE["DATASTORE_ID"] = mysql_ds[JSON_INDEX].id self.datastore = self.snippet( "datastore_by_id", "/datastores/%s" % ds_id, "GET", 200, "OK", lambda client: client.datastores.get(ds_id)) @test(depends_on=[get_datastore_by_id]) def get_datastore_versions_list(self): ds_id = STATE["DATASTORE_ID"] self.datastore_versions = self.snippet( "datastore_versions_list", "/datastores/%s/versions" % ds_id, "GET", 200, "OK", lambda client: client.datastore_versions.list(ds_id)) @test(depends_on=[get_datastore_versions_list]) def get_datastore_version_by_id(self): ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] = ( self.datastore_versions[JSON_INDEX][0].id ) self.datastore_version = self.snippet( "datastore_version_by_id", "/datastores/%s/versions/%s" % (ds_id, ds_v_id), "GET", 200, "OK", lambda client: client.datastore_versions.get(ds_id, ds_v_id)) @test(depends_on=[Datastores], groups=['uses_instances']) class Configurations(ActiveMixin): @before_class def setup(self): self.client = make_client(normal_user) @test def get_configuration_parameters_for_datastore_version(self): ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] self.snippet( "configuration_parameters_for_datastore_version", "/datastores/%s/versions/%s/parameters" % (ds_id, ds_v_id), "GET", 200, "OK", lambda client: client.configuration_parameters.parameters( ds_id, ds_v_id ) ) @test def get_configuration_parameters_without_datastore_version(self): ds_v_id = STATE["DATASTORE_VERSION_ID"] self.params = self.snippet( "configuration_parameters_without_datastore_version", "/datastores/versions/%s/parameters" % (ds_v_id), "GET", 200, "OK", lambda client: ( client.configuration_parameters.parameters_by_version(ds_v_id) ) ) assert_true(self.params) @test(depends_on=[get_configuration_parameters_without_datastore_version]) def get_configuration_parameter_for_datastore_version(self): ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] param = self.params[JSON_INDEX][0].name self.snippet( "configuration_parameter_for_datastore_version", "/datastores/%s/versions/%s/parameters/%s" % (ds_id, ds_v_id, param), "GET", 200, "OK", lambda client: client.configuration_parameters.get_parameter( ds_id, ds_v_id, param)) @test(depends_on=[get_configuration_parameters_without_datastore_version]) def get_configuration_parameter_without_datastore_version(self): ds_v_id = STATE["DATASTORE_VERSION_ID"] param = self.params[JSON_INDEX][0].name def get_param(client): return client.configuration_parameters.get_parameter_by_version( ds_v_id, param ) self.params = self.snippet( "configuration_parameter_without_datastore_version", "/datastores/versions/%s/parameters/%s" % (ds_v_id, param), "GET", 200, "OK", get_param ) @test(depends_on=[get_configuration_parameter_without_datastore_version]) def create_configuration(self): set_fake_stuff(uuid=EXAMPLE_CONFIG_ID) ds_id = STATE["DATASTORE_ID"] ds_v_id = STATE["DATASTORE_VERSION_ID"] values = { "connect_timeout": 120, "collation_server": "latin1_swedish_ci" } def create(client): config = client.configurations.create( 'example-configuration-name', json.dumps(values), 'example description', ds_id, ds_v_id) return config self.configurations = self.snippet( "configuration_create", "/configurations", "POST", 200, "OK", create) STATE["CONFIGURATION"] = self.configurations[JSON_INDEX] @test(depends_on=[create_configuration]) def get_configuration(self): config = STATE["CONFIGURATION"] self.config = self.snippet( "configuration_details", "/configurations/%s" % config.id, "GET", 200, "OK", lambda client: client.configurations.get(config.id)) @test(depends_on=[create_configuration]) def list_configurations(self): self.configs = self.snippet( "configuration_list", "/configurations", "GET", 200, "OK", lambda client: client.configurations.list()) @test(depends_on=[list_configurations, get_configuration]) def edit_configuration(self): config = STATE["CONFIGURATION"] values = { 'connect_timeout': 300 } self.snippet( "configuration_edit_parameters", "/configurations/%s" % config.id, "PATCH", 200, "OK", lambda client: client.configurations.edit( config.id, json.dumps(values))) @test(depends_on=[edit_configuration]) def update_configuration(self): config = STATE["CONFIGURATION"] values = { 'connect_timeout': 150, 'collation_server': 'utf8_unicode_ci' } self.snippet( "configuration_update_parameters", "/configurations/%s" % config.id, "PUT", 202, "Accepted", lambda client: client.configurations.update( config.id, json.dumps(values), 'example-updated-name', 'example updated description')) @test(depends_on=[update_configuration]) def attach_configuration_to_instance(self): config = STATE["CONFIGURATION"] self.snippet( "configuration_attach_to_instance", "/instances/%s" % json_instance.id, "PUT", 202, "Accepted", lambda client: client.instances.modify( json_instance.id, config.id ) ) @test(depends_on=[attach_configuration_to_instance]) def list_configurations_instances(self): config = STATE["CONFIGURATION"] self.config_instances = self.snippet( "configuration_list_instances", "/configurations/%s/instances" % config.id, "GET", 200, "OK", lambda client: client.configurations.instances(config.id)) @test(depends_on=[list_configurations_instances]) def detach_configuration_from_instance(self): self.snippet( "configuration_detach_from_instance", "/instances/%s" % json_instance.id, "PUT", 202, "Accepted", lambda client: client.instances.modify( json_instance.id, "")) @test(depends_on=[detach_configuration_from_instance]) def instance_restart_after_configration_change(self): self.client.instances.restart(json_instance.id) self._wait_for_active("REBOOT") @test(depends_on=[CreateInstance], groups=['uses_instances']) class InstanceList(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def get_list_instance_index(self): results = self.snippet( "instances_index", "/instances", "GET", 200, "OK", lambda client: client.instances.list()) for result in results: assert_equal(1, len(result)) @test def get_instance_details(self): results = self.snippet( "instance_status_detail", "/instances/%s" % json_instance.id, "GET", 200, "OK", lambda client: client.instances.get(json_instance.id)) assert_equal(results[JSON_INDEX].id, json_instance.id) @test def get_default_instance_configuration(self): set_fake_stuff(unique_id=EXAMPLE_CONFIG_SERVER_ID) self.snippet( "get_default_instance_configuration", "/instances/%s/configuration" % json_instance.id, "GET", 200, "OK", lambda client: client.instances.configuration(json_instance.id)) @test def get_list_instance_index_limit_two(self): third_instance = self.client.instances.create( "The Third Instance", 1, volume={'size': 2}) third_instance = self.client.instances.get(third_instance.id) while third_instance.status not in CONFIG.running_status: time.sleep(0.1) third_instance = self.client.instances.get(third_instance.id) results = self.snippet( "instances_index_pagination", "/instances?limit=2", "GET", 200, "OK", lambda client: client.instances.list(limit=2)) for result in results: assert_equal(2, len(result)) self.client.instances.delete(third_instance.id) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Backups(ActiveMixin): @before_class def setup(self): self.client = make_client(normal_user) @test def create_backup(self): set_fake_stuff(uuid=EXAMPLE_BACKUP_ID) results = self.snippet( "backup_create", "/backups", "POST", 202, "Accepted", lambda client: client.backups.create( name='snapshot', instance=json_instance.id, description="My Backup" ) ) self._wait_for_active("BACKUP") assert_equal(len(results), 1) self.json_backup = results[JSON_INDEX] @test(depends_on=[create_backup]) def create_incremental_backup(self): set_fake_stuff(uuid=EXAMPLE_BACKUP_INCREMENTAL_ID) results = self.snippet( "backup_create_incremental", "/backups", "POST", 202, "Accepted", lambda client: client.backups.create( name='Incremental Snapshot', instance=json_instance.id, parent_id=EXAMPLE_BACKUP_ID, description="My Incremental Backup" ) ) self._wait_for_active("BACKUP") assert_equal(len(results), 1) self.json_backup2 = results[JSON_INDEX] @test(depends_on=[create_incremental_backup]) def get_backup(self): results = self.snippet( "backup_get", "/backups/%s" % self.json_backup.id, "GET", 200, "OK", lambda client: client.backups.get(self.json_backup.id)) assert_equal(len(results), 1) @test(depends_on=[create_incremental_backup]) def get_backups_for_instance(self): results = self.snippet( "backups_by_instance", "/instances/%s/backups" % json_instance.id, "GET", 200, "OK", lambda client: client.instances.backups(json_instance.id)) assert_equal(len(results), 1) @test(depends_on=[create_incremental_backup]) def list_backups(self): results = self.snippet( "backup_list", "/backups", "GET", 200, "OK", lambda client: client.backups.list()) assert_equal(len(results), 1) @test(depends_on=[create_backup]) def restore(self): set_fake_stuff(uuid=EXAMPLE_INSTANCE_ID_2) def create_instance(client, name, backup): instance = client.instances.create( name, 1, volume={'size': 2}, restorePoint={'backupRef': backup}) assert_equal(instance.status, "BUILD") return instance results = self.snippet( "backup_restore", "/instances", "POST", 200, "OK", lambda client: create_instance( client, "backup_instance", self.json_backup.id)) assert_equal(len(results), 1) self.json_restore = results[JSON_INDEX] self._wait_for_restore_active("BUILD") self.json_restore = self.client.instances.get(self.json_restore.id) asserts.assert_true(self.json_restore.status in CONFIG.running_status) @test(depends_on=[restore]) def delete_restores(self): self.snippet( "restore_delete", "/instances/%s" % self.json_restore.id, "DELETE", 202, "Accepted", lambda client: client.instances.delete(self.json_restore.id)) self.json_restore = self.client.instances.get(self.json_restore.id) assert_equal(self.json_restore.status, "SHUTDOWN") @test(depends_on=[create_backup], runs_after=[get_backup, list_backups, restore, get_backups_for_instance]) def delete_backup(self): results = self.snippet( "backup_delete", "/backups/%s" % self.json_backup.id, "DELETE", 202, "Accepted", lambda client: client.backups.delete(self.json_backup.id)) assert_equal(len(results), 1) @test(depends_on=[CreateInstance], groups=['uses_instances']) class Actions(ActiveMixin): @before_class def setup(self): self.client = make_client(normal_user) @test def instance_restart(self): self.snippet( "instance_restart", "/instances/%s/action" % json_instance.id, "POST", 202, "Accepted", lambda client: client.instances.restart(json_instance.id)) self._wait_for_active("REBOOT") @test def instance_resize_volume(self): self.snippet( "instance_resize_volume", "/instances/%s/action" % json_instance.id, "POST", 202, "Accepted", lambda client: client.instances.resize_volume(json_instance.id, 4)) self._wait_for_active("RESIZE") assert_equal(json_instance.volume['size'], 4) @test def instance_resize_flavor(self): self.snippet( "instance_resize_flavor", ("/instances/%s/action" % json_instance.id), "POST", 202, "Accepted", lambda client: client.instances.resize_instance( json_instance.id, 3)) self._wait_for_active("RESIZE") # TODO(imsplitbit): remove coercion when troveclient fixes are in assert_equal(int(json_instance.flavor['id']), 3) @test(depends_on=[CreateInstance], groups=['uses_instances', "MgmtHosts"]) class MgmtHosts(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_list_hosts(self): results = self.snippet( "mgmt_list_hosts", "/mgmt/hosts", "GET", 200, "OK", lambda client: client.mgmt.hosts.index()) with Check() as check: for hosts in results: check.equal(2, len(hosts)) check.true("fake_host_1" == hosts[0].name or "fake_host_1" == hosts[1].name) check.true("fake_host_2" == hosts[0].name or "fake_host_2" == hosts[1].name) check.true(1 == results[0][1].instanceCount or 1 == results[0][0].instanceCount) @test def mgmt_get_host_detail(self): results = self.snippet( "mgmt_get_host_detail", "/mgmt/hosts/fake_host_1", "GET", 200, "OK", lambda client: client.mgmt.hosts.get("fake_host_1")) with Check() as check: for host in results: check.equal(results[0].name, "fake_host_1") # XML entries won't come back as these types. :( check.true(isinstance(results[0].percentUsed, int)), check.true(isinstance(results[0].totalRAM, int)), check.true(isinstance(results[0].usedRAM, int)), with Check() as check: for host in results: check.equal(1, len(host.instances)) for instance in host.instances: check.equal(instance['status'], 'HEALTHY') check.true(isinstance(instance['name'], six.string_types)) check.true(isinstance(instance['id'], six.string_types)) check.true(isinstance(instance['server_id'], six.string_types)) check.true(isinstance(instance['tenant_id'], six.string_types)) @test def mgmt_host_update_all(self): raise SkipTest("This isn't working... :(") self.snippet( "mgmt_host_update", "/mgmt/hosts/fake_host_1/instances/action", "POST", 202, "Accepted", lambda client: client.mgmt.hosts.update_all("fake_host_1")) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtStorage(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_storage(self): results = self.snippet( "mgmt_get_storage", "/mgmt/storage", "GET", 200, "OK", lambda client: client.mgmt.storage.index()) for index, devices in enumerate(results): with Check() as check: check.equal(1, len(devices)) device = devices[0] check.equal(int(device.capacity['available']), 90) check.equal(int(device.capacity['total']), 100) check.equal(device.name, "fake_storage") check.equal(int(device.provision['available']), 40) check.equal(int(device.provision['percent']), 10) check.equal(int(device.provision['total']), 50) check.equal(device.type, "test_type") check.equal(int(device.used), 10) if index == JSON_INDEX: check.true(isinstance(device.capacity['available'], int)) check.true(isinstance(device.capacity['total'], int)) check.true(isinstance(device.provision['available'], int)) check.true(isinstance(device.provision['percent'], int)) check.true(isinstance(device.provision['total'], int)) check.true(isinstance(device.used, int)) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtAccount(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_account_details(self): results = self.snippet( "mgmt_get_account_details", "/mgmt/accounts/%s" % conf['normal_user_tenant'], "GET", 200, "OK", lambda client: client.mgmt.accounts.show( conf['normal_user_tenant'], )) with Check() as check: for account_info in results: check.equal(conf['normal_user_tenant'], account_info.id) @test def mgmt_get_account_list(self): results = self.snippet( "mgmt_list_accounts", "/mgmt/accounts", "GET", 200, "OK", lambda client: client.mgmt.accounts.index()) matches = {conf['normal_user_tenant']: 2, conf['admin_user_tenant']: 0} for index, result in enumerate(results): for account in result.accounts: if account['id'] not in matches: fail("Did not expect this account ID: %s" % account['id']) expected_count = matches[account['id']] if index == JSON_INDEX: assert_equal(2, expected_count) else: assert_equal(2, expected_count) def for_both(func): @functools.wraps(func) def both(self): for result in self.results: func(self, result) return both @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstance(Example): @before_class def mgmt_get_instance_details(self): self.client = make_client(admin_user) self.results = self.snippet( "mgmt_get_instance_details", ("/mgmt/instances/%s" % json_instance.id), "GET", 200, "OK", lambda client: client.mgmt.instances.show(json_instance.id)) @test @for_both def created(self, result): assert_true(isinstance(result.created, six.string_types)) @test def deleted(self): assert_equal(self.results[JSON_INDEX].deleted, False) @test @for_both def flavor(self, result): # TODO(imsplitbit): remove the coercion when python-troveclient fixes # land in the public. assert_true( int(result.flavor['id']) == 1 or int(result.flavor['id']) == 3) assert_equal(len(result.flavor['links']), 2) @test @for_both def guest_status(self, result): assert_equal(result.guest_status['state_description'], 'running') @test(enabled=False) @for_both def host(self, result): assert_equal(result.host, 'fake_host_1') @test def id(self): assert_equal(self.results[JSON_INDEX].id, json_instance.id) @test @for_both def links(self, result): assert_true(isinstance(result.links, list)) for link in result.links: assert_true(isinstance(link, dict)) assert_true(isinstance(link['href'], six.string_types)) assert_true(isinstance(link['rel'], six.string_types)) @test def local_id(self): assert_true(isinstance(self.results[JSON_INDEX].server['local_id'], int)) @test @for_both def name(self, result): assert_true(isinstance(result.name, six.string_types)) @test @for_both def server_id(self, result): assert_true(isinstance(result.server['id'], six.string_types)) @test @for_both def status(self, result): assert_equal("ACTIVE", result.status) @test @for_both def task_description(self, result): assert_equal(result.task_description, "No tasks for the instance.") @test @for_both def tenant_id(self, result): assert_equal(result.tenant_id, conf['normal_user_tenant']) @test @for_both def updated(self, result): assert_true(isinstance(result.updated, six.string_types)) @test @for_both def volume(self, result): assert_true(isinstance(result.volume, dict)) assert_true('id' in result.volume) assert_true('size' in result.volume) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstanceIndex(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_instance_index(self, deleted=False): self.snippet( "mgmt_instance_index", "/mgmt/instances?deleted=false", "GET", 200, "OK", lambda client: client.mgmt.instances.index(deleted=False)) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstanceDiagnostics(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_instance_diagnostics(self): self.snippet( "mgmt_instance_diagnostics", ("/mgmt/instances/%s/diagnostics" % json_instance.id), "GET", 200, "OK", lambda client: client.diagnostics.get(json_instance.id)) @test(depends_on=[CreateInstance]) class MgmtInstanceRoot(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_root_details(self): self.snippet( "mgmt_get_root_details", ("/mgmt/instances/%s/root" % json_instance.id), "GET", 200, "OK", lambda client: client.mgmt.instances.root_enabled_history( json_instance.id) ) @test(depends_on=[CreateInstance], enabled=False) class MgmtInstanceHWInfo(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_get_hw_info(self): self.snippet( "mgmt_get_hw_info", ("/mgmt/instances/%s/hwinfo" % json_instance.id), "GET", 200, "OK", lambda client, id: client.hw_info.get(id), ([json_instance.id], )) @test(depends_on=[CreateInstance], groups=['uses_instances']) class MgmtInstanceReboot(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_instance_reboot(self): self.snippet( "instance_reboot", ("/mgmt/instances/%s/action" % json_instance.id), "POST", 202, "Accepted", lambda client: client.mgmt.instances.reboot(json_instance.id)) @test(depends_on=[CreateInstance], groups=['uses_instances'], enabled=False) class MgmtInstanceGuestUpdate(Example): @before_class def setup(self): self.client = make_client(admin_user) @test def mgmt_instance_guest_update(self): self.snippet( "guest_update", ("/mgmt/instances/%s/action" % json_instance.id), "POST", 202, "Accepted", lambda client: client.mgmt.instances.update(json_instance.id)) @test(depends_on=[CreateInstance], runs_after_groups=['uses_instances']) class ZzzDeleteInstance(Example): @before_class def setup(self): self.client = make_client(normal_user) @test def zzz_delete_instance(self): global json_instance self.snippet( "delete_instance", "/instances/%s" % json_instance.id, "DELETE", 202, "Accepted", lambda client: client.instances.delete(json_instance.id)) json_instance = self.client.instances.get(json_instance.id) assert_equal(json_instance.status, "SHUTDOWN") @test(depends_on=[zzz_delete_instance]) def delete_configuration(self): config = STATE["CONFIGURATION"] self.configs = self.snippet( "configuration_delete", ("/configurations/%s" % config.id), "DELETE", 202, "Accepted", lambda client: client.configurations.delete(config.id)) if __name__ == "__main__": CONFIG.load_from_file("etc/tests/localhost.test.conf") TestProgram().run_and_exit() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7801108 trove-12.1.0.dev92/trove/tests/fakes/0000755000175000017500000000000000000000000017541 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/__init__.py0000644000175000017500000000136000000000000021652 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implements a fake version of the models code so that the server can be stood up and run under test quickly. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/common.py0000644000175000017500000000153000000000000021402 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code to help in faking the models.""" from novaclient import exceptions as nova_exceptions def authorize(context): if not context.is_admin: raise nova_exceptions.Forbidden(403, "Forbidden") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/conf.py0000644000175000017500000000137000000000000021041 0ustar00coreycorey00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FakeConf(object): def __init__(self, conf_dict): self._conf = conf_dict def __getattr__(self, name): return self._conf[name] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/dns.py0000644000175000017500000000641200000000000020702 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from proboscis.asserts import assert_equal from proboscis.asserts import assert_true from proboscis.asserts import fail from trove.dns import driver LOG = logging.getLogger(__name__) ENTRIES = {} class FakeDnsDriver(driver.DnsDriver): def create_entry(self, entry, content): """Pretend to create a DNS entry somewhere. Since nothing else tests that this works, there's nothing more to do here. """ entry.content = content assert_true(entry.name not in ENTRIES) LOG.debug("Adding fake DNS entry for hostname %s.", entry.name) ENTRIES[entry.name] = entry def delete_entry(self, name, type, dns_zone=None): LOG.debug("Deleting fake DNS entry for hostname %s", name) ENTRIES.pop(name, None) class FakeDnsInstanceEntryFactory(driver.DnsInstanceEntryFactory): def create_entry(self, instance_id): # Construct hostname using pig-latin. hostname = "%s-lay" % instance_id LOG.debug("Mapping instance_id %(id)s to hostname %(host)s", {'id': instance_id, 'host': hostname}) return driver.DnsEntry(name=hostname, content=None, type="A", ttl=42, dns_zone=None) class FakeDnsChecker(object): """Used by tests to make sure a DNS record was written in fake mode.""" def __call__(self, mgmt_instance): """ Given an instance ID and ip address, confirm that the proper DNS record was stored in Designate or some other DNS system. """ entry = FakeDnsInstanceEntryFactory().create_entry(mgmt_instance.id) # Confirm DNS entry shown to user is what we expect. assert_equal(entry.name, mgmt_instance.hostname) hostname = entry.name for i in ENTRIES: print(i) print("\t%s" % ENTRIES[i]) assert_true(hostname in ENTRIES, "Hostname %s not found in DNS entries!" % hostname) entry = ENTRIES[hostname] # See if the ip address assigned to the record is what we expect. # This isn't perfect, but for Fake Mode its good enough. If we # really want to know exactly what it should be then we should restore # the ability to return the IP from the API as well as a hostname, # since that lines up to the DnsEntry's content field. ip_addresses = mgmt_instance.server['addresses'] for network_name, ip_list in ip_addresses.items(): for ip in ip_list: if entry.content == ip['addr']: return fail("Couldn't find IP address %s among these values: %s" % (entry.content, ip_addresses)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/fakes/guestagent.py0000644000175000017500000003274500000000000022274 0ustar00coreycorey00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import time import eventlet from oslo_log import log as logging from trove.common import exception as rd_exception from trove.common import instance as rd_instance from trove.tests.util import unquote_user_host DB = {} LOG = logging.getLogger(__name__) BACKUP_SIZE = 0.14 class FakeGuest(object): def __init__(self, id): self.id = id self.users = {} self.dbs = {} self.root_was_enabled = False self.version = 1 self.grants = {} self.overrides = {} # Our default admin user. self._create_user({ "_name": "os_admin", "_host": "%", "_password": "12345", "_databases": [], }) def get_hwinfo(self): return {'mem_total': 524288, 'num_cpus': 1} def get_diagnostics(self): return { 'version': str(self.version), 'fd_size': 64, 'vm_size': 29096, 'vm_peak': 29160, 'vm_rss': 2872, 'vm_hwm': 2872, 'threads': 2 } def update_guest(self): LOG.debug("Updating guest %s", self.id) self.version += 1 def _check_username(self, username): unsupported_chars = re.compile(r"""^\s|\s$|'|"|;|`|,|/|\\""") if (not username or unsupported_chars.search(username) or ("%r" % username).find("\\") != -1): raise ValueError("'%s' is not a valid user name." % username) if len(username) > 16: raise ValueError("User name '%s' is too long. Max length = 16" % username) def change_passwords(self, users): for user in users: # Use the model to check validity. username = user['name'] self._check_username(username) hostname = user['host'] password = user['password'] if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s@%s cannot be found on the instance." % (username, hostname)) self.users[(username, hostname)]['password'] = password def update_attributes(self, username, hostname, user_attrs): LOG.debug("Updating attributes") self._check_username(username) if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s@%s cannot be found on the instance." % (username, hostname)) new_name = user_attrs.get('name') new_host = user_attrs.get('host') new_password = user_attrs.get('password') old_name = username old_host = hostname name = new_name or old_name host = new_host or old_host if new_name or new_host: old_grants = self.grants.get((old_name, old_host), set()) self._create_user({ "_name": name, "_host": host, "_password": self.users[(old_name, host)]['_password'], "_databases": [], }) self.grants[(name, host)] = old_grants del self.users[(old_name, old_host)] if new_password: self.users[(name, host)]['_password'] = new_password def create_database(self, databases): for db in databases: self.dbs[db['_name']] = db def create_user(self, users): for user in users: self._create_user(user) def _create_user(self, user): username = user['_name'] self._check_username(username) hostname = user['_host'] if hostname is None: hostname = '%' self.users[(username, hostname)] = user print("CREATING %s @ %s" % (username, hostname)) databases = [db['_name'] for db in user['_databases']] self.grant_access(username, hostname, databases) return user def delete_database(self, database): if database['_name'] in self.dbs: del self.dbs[database['_name']] def enable_root(self): self.root_was_enabled = True return self._create_user({ "_name": "root", "_host": "%", "_password": "12345", "_databases": [], }) def enable_root_with_password(self, root_password=None): self.root_was_enabled = True return self._create_user({ "_name": "root", "_host": "%", "_password": "12345", "_databases": [], }) def disable_root(self): self.delete_user({ "_name": "root", "_host": "%"}) def delete_user(self, user): username = user['_name'] self._check_username(username) hostname = user['_host'] self.grants[(username, hostname)] = set() if (username, hostname) in self.users: del self.users[(username, hostname)] def is_root_enabled(self): return self.root_was_enabled def _list_resource(self, resource, limit=None, marker=None, include_marker=False): names = sorted([name for name in resource]) if marker in names: if not include_marker: # Cut off everything left of and including the marker item. names = names[names.index(marker) + 1:] else: names = names[names.index(marker):] next_marker = None if limit: if len(names) > limit: next_marker = names[limit - 1] names = names[:limit] return [resource[name] for name in names], next_marker def list_databases(self, limit=None, marker=None, include_marker=False): return self._list_resource(self.dbs, limit, marker, include_marker) def list_users(self, limit=None, marker=None, include_marker=False): # The markers for users are a composite of the username and hostname. names = sorted(["%s@%s" % (name, host) for (name, host) in self.users]) if marker in names: if not include_marker: # Cut off everything left of and including the marker item. names = names[names.index(marker) + 1:] else: names = names[names.index(marker):] next_marker = None if limit: if len(names) > limit: next_marker = names[limit - 1] names = names[:limit] return ([self.users[unquote_user_host(userhost)] for userhost in names], next_marker) def get_user(self, username, hostname): self._check_username(username) for (u, h) in self.users: print("%r @ %r" % (u, h)) if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s@%s cannot be found on the instance." % (username, hostname)) return self.users.get((username, hostname), None) def prepare(self, memory_mb, packages, databases, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, modules=None): from trove.guestagent.models import AgentHeartBeat from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus LOG.debug("users... %s", users) LOG.debug("databases... %s", databases) instance_name = DBInstance.find_by(id=self.id).name self.create_user(users) self.create_database(databases) self.overrides = overrides or {} def update_db(): status = InstanceServiceStatus.find_by(instance_id=self.id) if instance_name.endswith('GUEST_ERROR'): status.status = rd_instance.ServiceStatuses.FAILED else: status.status = rd_instance.ServiceStatuses.HEALTHY status.save() AgentHeartBeat.create(instance_id=self.id) eventlet.spawn_after(3.5, update_db) def _set_task_status(self, new_status='HEALTHY'): from trove.instance.models import InstanceServiceStatus print("Setting status to %s" % new_status) states = {'HEALTHY': rd_instance.ServiceStatuses.HEALTHY, 'SHUTDOWN': rd_instance.ServiceStatuses.SHUTDOWN, } status = InstanceServiceStatus.find_by(instance_id=self.id) status.status = states[new_status] status.save() def restart(self): # All this does is restart, and shut off the status updates while it # does so. So there's actually nothing to do to fake this out except # take a nap. print("Sleeping for a second.") time.sleep(1) self._set_task_status('HEALTHY') def reset_configuration(self, config): # There's nothing to do here, since there is no config to update. pass def start_db_with_conf_changes(self, config_contents): time.sleep(2) self._set_task_status('HEALTHY') def stop_db(self, do_not_start_on_reboot=False): self._set_task_status('SHUTDOWN') def get_volume_info(self): """Return used and total volume filesystem information in GB.""" return {'used': 0.16, 'total': 4.0} def grant_access(self, username, hostname, databases): """Add a database to a users's grant list.""" if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s cannot be found on the instance." % username) current_grants = self.grants.get((username, hostname), set()) for db in databases: current_grants.add(db) self.grants[(username, hostname)] = current_grants def revoke_access(self, username, hostname, database): """Remove a database from a users's grant list.""" if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s cannot be found on the instance." % username) if database not in self.grants.get((username, hostname), set()): raise rd_exception.DatabaseNotFound( "Database %s cannot be found on the instance." % database) current_grants = self.grants.get((username, hostname), set()) if database in current_grants: current_grants.remove(database) self.grants[(username, hostname)] = current_grants def list_access(self, username, hostname): if (username, hostname) not in self.users: raise rd_exception.UserNotFound( "User %s cannot be found on the instance." % username) current_grants = self.grants.get((username, hostname), set()) dbs = [{'_name': db, '_collate': '', '_character_set': '', } for db in current_grants] return dbs def create_backup(self, backup_info): from trove.backup.models import Backup from trove.backup.state import BackupState backup = Backup.get_by_id(context=None, backup_id=backup_info['id']) def finish_create_backup(): backup.state = BackupState.COMPLETED backup.location = 'http://localhost/path/to/backup' backup.checksum = 'fake-md5-sum' backup.size = BACKUP_SIZE backup.save() eventlet.spawn_after(10, finish_create_backup) def mount_volume(self, device_path=None, mount_point=None): pass def unmount_volume(self, device_path=None, mount_point=None): pass def resize_fs(self, device_path=None, mount_point=None): pass def update_overrides(self, overrides, remove=False): self.overrides = overrides def apply_overrides(self, overrides): self.overrides = overrides def get_replication_snapshot(self, snapshot_info, replica_source_config=None): self.create_backup(snapshot_info) return { 'dataset': { 'datastore_manager': 'mysql', 'dataset_size': '0.0', 'volume_size': '10.0', 'snapshot_id': None }, 'replication_strategy': 'replication_strategy', 'master': '1', 'log_position': '100' } def attach_replication_slave(self, snapshot, slave_config): pass def backup_required_for_replication(self): return True def post_processing_required_for_replication(self): return False def module_list(self, context, include_contents=False): return [] def module_apply(self, context, modules=None): return [] def module_remove(self, context, module=None): pass def get_or_create(id): if id not in DB: DB[id] = FakeGuest(id) return DB[id] def fake_create_guest_client(context, id, manager=None): return get_or_create(id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/keystone.py0000644000175000017500000000514000000000000021754 0ustar00coreycorey00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class AuthProtocol(object): def __init__(self, app, conf): self.conf = conf self.app = app def __call__(self, env, start_response): token = self._get_user_token_from_header(env) user_headers = self._get_info_from_token(token) self._add_headers(env, user_headers) return self.app(env, start_response) def _header_to_env_var(self, key): """Convert header to wsgi env variable. :param key: http header name (ex. 'X-Auth-Token') :return: wsgi env variable name (ex. 'HTTP_X_AUTH_TOKEN') """ return 'HTTP_%s' % key.replace('-', '_').upper() def _add_headers(self, env, headers): """Add http headers to environment.""" for (k, v) in headers.items(): env_key = self._header_to_env_var(k) env[env_key] = v def get_admin_token(self): return "ABCDEF0123456789" def _get_info_from_token(self, token): if token.startswith("admin"): role = "admin,%s" % token else: role = token return { 'X_IDENTITY_STATUS': 'Confirmed', 'X_TENANT_ID': token, 'X_TENANT_NAME': token, 'X_USER_ID': token, 'X_USER_NAME': token, 'X_ROLE': role, } def _get_header(self, env, key, default=None): # Copied from keystone. env_key = self._header_to_env_var(key) return env.get(env_key, default) def _get_user_token_from_header(self, env): token = self._get_header(env, 'X-Auth-Token', self._get_header(env, 'X-Storage-Token')) if token: return token else: raise RuntimeError('Unable to find token in headers') def filter_factory(global_conf, **local_conf): """Fakes a keystone filter.""" conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return AuthProtocol(app, conf) return auth_filter ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/limits.py0000644000175000017500000000143700000000000021421 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common import limits ENABLED = False class FakeRateLimitingMiddleware(limits.RateLimitingMiddleware): def enabled(self): return ENABLED ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/neutron.py0000644000175000017500000000377700000000000021623 0ustar00coreycorey00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class FakeNeutronClient(object): def __init__(self, context): self.context = context def show_network(self, *arg, **kwargs): return {'network': {'name': 'fake-mgmt-net-name'}} def list_networks(self, *arg, **kwargs): if 'router:external' in kwargs: return {'networks': [{'id': 'fake-public-net-id'}]} return {'networks': []} def create_port(self, body): if 'Management' in body['port'].get('description', ''): return {'port': {'id': 'fake-mgmt-port-id'}} return {'port': {'id': 'fake-user-port-id'}} def delete_port(self, *arg, **kwargs): pass def list_ports(self, *arg, **kwargs): return {'ports': []} def create_floatingip(self, *arg, **kwargs): pass def list_floatingips(self, *arg, **kwargs): return {'floatingips': []} def update_floatingip(self, *arg, **kwargs): pass def delete_floatingip(self, *arg, **kwargs): pass def create_security_group(self, *arg, **kwargs): return {'security_group': {'id': 'fake-sg-id'}} def create_security_group_rule(self, *arg, **kwargs): pass def list_security_groups(self, *arg, **kwargs): return {'security_groups': []} def delete_security_group(self, *arg, **kwargs): pass def fake_create_neutron_client(context, region_name=None): return FakeNeutronClient(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/nova.py0000644000175000017500000006263700000000000021074 0ustar00coreycorey00000000000000# Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from novaclient import exceptions as nova_exceptions from oslo_log import log as logging from trove.common.exception import PollTimeOut from trove.common import instance as rd_instance from trove.tests.fakes.common import authorize import collections import eventlet import uuid LOG = logging.getLogger(__name__) FAKE_HOSTS = ["fake_host_1", "fake_host_2"] class FakeFlavor(object): def __init__(self, id, disk, name, ram, ephemeral=0, vcpus=10): self.id = id self.disk = disk self.name = name self.ram = ram self.vcpus = vcpus self.ephemeral = ephemeral @property def links(self): url = ("http://localhost:8774/v2/5064d71eb09c47e1956cf579822bae9a/" "flavors/%s") % self.id return [{"href": url, "rel": link_type} for link_type in ['self', 'bookmark']] @property def href_suffix(self): return "flavors/%s" % self.id class FakeFlavors(object): def __init__(self): self.db = {} self._add(1, 0, "m1.tiny", 512) self._add(2, 20, "m1.small", 2048) self._add(3, 40, "m1.medium", 4096) self._add(4, 80, "m1.large", 8192) self._add(5, 160, "m1.xlarge", 16384) self._add(6, 0, "m1.nano", 64) self._add(7, 0, "m1.micro", 128) self._add(8, 2, "m1.rd-smaller", 768) self._add(9, 10, "tinier", 506) self._add(10, 2, "m1.rd-tiny", 512) self._add(11, 0, "eph.rd-tiny", 512, 1) self._add(12, 20, "eph.rd-smaller", 768, 2) self._add("custom", 25, "custom.small", 512, 1) def _add(self, *args, **kwargs): new_flavor = FakeFlavor(*args, **kwargs) self.db[new_flavor.id] = new_flavor def get(self, id): try: id = int(id) except ValueError: pass if id not in self.db: raise nova_exceptions.NotFound(404, "Flavor id not found %s" % id) return self.db[id] def get_by_href(self, href): for id in self.db: value = self.db[id] # Use inexact match since faking the exact endpoints would be # difficult. if href.endswith(value.href_suffix): return value raise nova_exceptions.NotFound(404, "Flavor href not found %s" % href) def list(self): return [self.get(id) for id in self.db] class FakeServer(object): next_local_id = 0 def __init__(self, parent, owner, id, name, image_id, flavor_ref, volumes, key_name): self.owner = owner # This is a context. self.id = id self.parent = parent self.name = name self.image_id = image_id self.flavor_ref = flavor_ref self.old_flavor_ref = None self._current_status = "BUILD" self.volumes = volumes # This is used by "RdServers". Its easier to compute the # fake value in this class's initializer. self._local_id = self.next_local_id self.next_local_id += 1 info_vols = [] for volume in self.volumes: info_vols.append({'id': volume.id}) volume.set_attachment(id) volume.schedule_status("in-use", 1) self.host = FAKE_HOSTS[0] self.old_host = None setattr(self, 'OS-EXT-AZ:availability_zone', 'nova') self._info = {'os:volumes': info_vols} self.key_name = key_name @property def addresses(self): return {"private": [{"addr": "123.123.123.123"}]} def confirm_resize(self): if self.status != "VERIFY_RESIZE": raise RuntimeError("Not in resize confirm mode.") self._current_status = "HEALTHY" def revert_resize(self): if self.status != "VERIFY_RESIZE": raise RuntimeError("Not in resize confirm mode.") self.host = self.old_host self.old_host = None self.flavor_ref = self.old_flavor_ref self.old_flavor_ref = None self._current_status = "HEALTHY" def reboot(self): LOG.debug("Rebooting server %s", self.id) def set_to_active(): self._current_status = "HEALTHY" self.parent.schedule_simulate_running_server(self.id, 1.5) self._current_status = "REBOOT" eventlet.spawn_after(1, set_to_active) def delete(self): self.schedule_status = [] # TODO(pdmars): This is less than ideal, but a quick way to force it # into the error state before scheduling the delete. if (self.name.endswith("_ERROR_ON_DELETE") and self._current_status != "SHUTDOWN"): # Fail to delete properly the first time, just set the status # to SHUTDOWN and break. It's important that we only fail to delete # once in fake mode. self._current_status = "SHUTDOWN" return self._current_status = "SHUTDOWN" self.parent.schedule_delete(self.id, 1.5) @property def flavor(self): return FLAVORS.get_by_href(self.flavor_ref).__dict__ @property def links(self): url = "https://localhost:9999/v1.0/1234/instances/%s" % self.id return [{"href": url, "rel": link_type} for link_type in ['self', 'bookmark']] def migrate(self, force_host=None): self.resize(None, force_host) def resize(self, new_flavor_id=None, force_host=None): self._current_status = "RESIZE" if self.name.endswith("_RESIZE_TIMEOUT"): raise PollTimeOut() def set_to_confirm_mode(): self._current_status = "VERIFY_RESIZE" def set_to_active(): self.parent.schedule_simulate_running_server(self.id, 1.5) eventlet.spawn_after(1, set_to_active) def change_host(): self.old_host = self.host if not force_host: self.host = [host for host in FAKE_HOSTS if host != self.host][0] else: self.host = force_host def set_flavor(): if self.name.endswith("_RESIZE_ERROR"): self._current_status = "HEALTHY" return if new_flavor_id is None: # Migrations are flavorless flavor resizes. # A resize MIGHT change the host, but a migrate # deliberately does. LOG.debug("Migrating fake instance.") eventlet.spawn_after(0.75, change_host) else: LOG.debug("Resizing fake instance.") self.old_flavor_ref = self.flavor_ref flavor = self.parent.flavors.get(new_flavor_id) self.flavor_ref = flavor.links[0]['href'] eventlet.spawn_after(1, set_to_confirm_mode) eventlet.spawn_after(0.8, set_flavor) def schedule_status(self, new_status, time_from_now): """Makes a new status take effect at the given time.""" def set_status(): self._current_status = new_status eventlet.spawn_after(time_from_now, set_status) @property def status(self): return self._current_status @property def created(self): return "2012-01-25T21:55:51Z" @property def updated(self): return "2012-01-25T21:55:51Z" @property def tenant(self): # This is on the RdServer extension type. return self.owner.tenant @property def tenant_id(self): return self.owner.tenant # The global var contains the servers dictionary in use for the life of these # tests. FAKE_SERVERS_DB = {} class FakeServers(object): def __init__(self, context, flavors): self.context = context self.db = FAKE_SERVERS_DB self.flavors = flavors def can_see(self, id): """Can this FakeServers, with its context, see some resource?""" server = self.db[id] return (self.context.is_admin or server.owner.tenant == self.context.project_id) def create(self, name, image_id, flavor_ref, files=None, userdata=None, block_device_mapping_v2=None, security_groups=None, availability_zone=None, nics=None, config_drive=False, scheduler_hints=None, key_name=None): id = "FAKE_%s" % uuid.uuid4() volumes = self._get_volumes_from_bdm_v2(block_device_mapping_v2) server = FakeServer(self, self.context, id, name, image_id, flavor_ref, volumes, key_name) self.db[id] = server if name.endswith('SERVER_ERROR'): raise nova_exceptions.ClientException("Fake server create error.") if availability_zone == 'BAD_ZONE': raise nova_exceptions.ClientException("The requested availability " "zone is not available.") server.schedule_status("HEALTHY", 1) LOG.info("FAKE_SERVERS_DB : %s", str(FAKE_SERVERS_DB)) return server def _get_volumes_from_bdm_v2(self, block_device_mapping_v2): volumes = [] if block_device_mapping_v2 is not None: # block_device_mapping_v2 is an array of dicts. Every dict is # a volume attachment, which contains "uuid", "source_type", # "destination_type", "device_name", "volume_size" and # "delete_on_termination". for bdm in block_device_mapping_v2: volume = self.volumes.get(bdm['uuid']) volumes.append(volume) return volumes def get(self, id): if id not in self.db: LOG.error("Couldn't find server id %(id)s, collection=%(db)s", {'id': id, 'db': self.db}) raise nova_exceptions.NotFound(404, "Not found") else: if self.can_see(id): return self.db[id] else: raise nova_exceptions.NotFound(404, "Bad permissions") def list(self): return [v for (k, v) in self.db.items() if self.can_see(v.id)] def schedule_delete(self, id, time_from_now): def delete_server(): LOG.info("Simulated event ended, deleting server %s.", id) del self.db[id] eventlet.spawn_after(time_from_now, delete_server) def schedule_simulate_running_server(self, id, time_from_now): from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus def set_server_running(): instance = DBInstance.find_by(compute_instance_id=id) LOG.debug("Setting server %s to running", instance.id) status = InstanceServiceStatus.find_by(instance_id=instance.id) status.status = rd_instance.ServiceStatuses.RUNNING status.save() eventlet.spawn_after(time_from_now, set_server_running) class FakeRdServer(object): def __init__(self, server): self.server = server self.deleted = False self.deleted_at = None # Not sure how to simulate "True" for this. self.local_id = server._local_id def __getattr__(self, name): return getattr(self.server, name) class FakeRdServers(object): def __init__(self, servers): self.servers = servers def get(self, id): return FakeRdServer(self.servers.get(id)) def list(self): # Attach the extra Rd Server stuff to the normal server. return [FakeRdServer(server) for server in self.servers.list()] class FakeVolume(object): def __init__(self, parent, owner, id, size, name, description, volume_type): self.attachments = [] self.parent = parent self.owner = owner # This is a context. self.id = id self.size = size self.name = name self.description = description self._current_status = "BUILD" # For some reason we grab this thing from device then call it mount # point. self.device = "vdb" self.volume_type = volume_type def __repr__(self): msg = ("FakeVolume(id=%s, size=%s, name=%s, " "description=%s, _current_status=%s)") params = (self.id, self.size, self.name, self.description, self._current_status) return (msg % params) @property def availability_zone(self): return "fake-availability-zone" @property def created_at(self): return "2001-01-01-12:30:30" def get(self, key): return getattr(self, key) def schedule_status(self, new_status, time_from_now): """Makes a new status take effect at the given time.""" def set_status(): self._current_status = new_status eventlet.spawn_after(time_from_now, set_status) def set_attachment(self, server_id): """Fake method we've added to set attachments. Idempotent.""" for attachment in self.attachments: if attachment['server_id'] == server_id: return # Do nothing self.attachments.append({'server_id': server_id, 'device': self.device}) @property def status(self): return self._current_status class FakeBlockDeviceMappingInfo(object): def __init__(self, id, device, _type, size, delete_on_terminate): self.volumeId = id self.device = device self.type = _type self.size = size self.delete_on_terminate = delete_on_terminate FAKE_VOLUMES_DB = {} class FakeVolumes(object): def __init__(self, context): self.context = context self.db = FAKE_VOLUMES_DB def can_see(self, id): """Can this FakeVolumes, with its context, see some resource?""" server = self.db[id] return (self.context.is_admin or server.owner.tenant == self.context.project_id) def get(self, id): if id not in self.db: LOG.error("Couldn't find volume id %(id)s, collection=%(db)s", {'id': id, 'db': self.db}) raise nova_exceptions.NotFound(404, "Not found") else: if self.can_see(id): return self.db[id] else: raise nova_exceptions.NotFound(404, "Bad permissions") def create(self, size, name=None, description=None, volume_type=None): id = "FAKE_VOL_%s" % uuid.uuid4() volume = FakeVolume(self, self.context, id, size, name, description, volume_type) self.db[id] = volume if size == 9: volume.schedule_status("error", 2) elif size == 13: raise Exception("No volume for you!") else: volume.schedule_status("available", 2) LOG.debug("Fake volume created %(volumeid)s with " "status %(volumestatus)s", {'volumeid': volume.id, 'volumestatus': volume.status}) LOG.info("FAKE_VOLUMES_DB : %s", FAKE_VOLUMES_DB) return volume def list(self, detailed=True): return [self.db[key] for key in self.db] def extend(self, volume_id, new_size): LOG.debug("Resize volume id (%(volumeid)s) to size (%(size)s)", {'volumeid': volume_id, 'size': new_size}) volume = self.get(volume_id) if volume._current_status != 'available': raise Exception("Invalid volume status: " "expected 'in-use' but was '%s'" % volume._current_status) def finish_resize(): volume.size = new_size eventlet.spawn_after(1.0, finish_resize) def delete_server_volume(self, server_id, volume_id): volume = self.get(volume_id) if volume._current_status != 'in-use': raise Exception("Invalid volume status: " "expected 'in-use' but was '%s'" % volume._current_status) def finish_detach(): volume._current_status = "available" eventlet.spawn_after(1.0, finish_detach) def create_server_volume(self, server_id, volume_id, device_path): volume = self.get(volume_id) if volume._current_status != "available": raise Exception("Invalid volume status: " "expected 'available' but was '%s'" % volume._current_status) def finish_attach(): volume._current_status = "in-use" eventlet.spawn_after(1.0, finish_attach) class FakeAccount(object): def __init__(self, id, servers): self.id = id self.servers = self._servers_to_dict(servers) def _servers_to_dict(self, servers): ret = [] for server in servers: server_dict = {} server_dict['id'] = server.id server_dict['name'] = server.name server_dict['status'] = server.status server_dict['host'] = server.host ret.append(server_dict) return ret class FakeAccounts(object): def __init__(self, context, servers): self.context = context self.db = FAKE_SERVERS_DB self.servers = servers def _belongs_to_tenant(self, tenant, id): server = self.db[id] return server.tenant == tenant def get_instances(self, id): authorize(self.context) servers = [v for (k, v) in self.db.items() if self._belongs_to_tenant(id, v.id)] return FakeAccount(id, servers) FLAVORS = FakeFlavors() class FakeHost(object): def __init__(self, name, servers): self.name = name self.servers = servers self.instances = [] self.percentUsed = 0 self.totalRAM = 0 self.usedRAM = 0 @property def instanceCount(self): return len(self.instances) def recalc(self): """ This fake-mode exclusive method recalculates the fake data this object passes back. """ self.instances = [] self.percentUsed = 0 self.totalRAM = 32000 # 16384 self.usedRAM = 0 for server in self.servers.list(): print(server) if server.host != self.name: print("\t...not on this host.") continue self.instances.append({ 'uuid': server.id, 'name': server.name, 'status': server.status }) if (str(server.flavor_ref).startswith('http:') or str(server.flavor_ref).startswith('https:')): flavor = FLAVORS.get_by_href(server.flavor_ref) else: flavor = FLAVORS.get(server.flavor_ref) ram = flavor.ram self.usedRAM += ram decimal = float(self.usedRAM) / float(self.totalRAM) self.percentUsed = int(decimal * 100) class FakeHosts(object): def __init__(self, servers): # Use an ordered dict to make the results of the fake api call # return in the same order for the example generator. self.hosts = collections.OrderedDict() for host in FAKE_HOSTS: self.add_host(FakeHost(host, servers)) def add_host(self, host): self.hosts[host.name] = host return host def get(self, name): try: self.hosts[name].recalc() return self.hosts[name] except KeyError: raise nova_exceptions.NotFound(404, "Host not found %s" % name) def list(self): for name in self.hosts: self.hosts[name].recalc() return [self.hosts[name] for name in self.hosts] class FakeRdStorage(object): def __init__(self, name): self.name = name self.type = "" self.used = 0 self.capacity = {} self.provision = {} def recalc(self): self.type = "test_type" self.used = 10 self.capacity['total'] = 100 self.capacity['available'] = 90 self.provision['total'] = 50 self.provision['available'] = 40 self.provision['percent'] = 10 class FakeRdStorages(object): def __init__(self): self.storages = {} self.add_storage(FakeRdStorage("fake_storage")) def add_storage(self, storage): self.storages[storage.name] = storage return storage def list(self): for name in self.storages: self.storages[name].recalc() return [self.storages[name] for name in self.storages] class FakeSecurityGroup(object): def __init__(self, name=None, description=None, context=None): self.name = name self.description = description self.id = "FAKE_SECGRP_%s" % uuid.uuid4() self.rules = {} def get_id(self): return self.id def add_rule(self, fakeSecGroupRule): self.rules.append(fakeSecGroupRule) return self.rules def get_rules(self): result = "" for rule in self.rules: result = result + rule.data() return result def data(self): return { 'id': self.id, 'name': self.name, 'description': self.description } class FakeSecurityGroups(object): def __init__(self, context=None): self.context = context self.securityGroups = {} def create(self, name=None, description=None): secGrp = FakeSecurityGroup(name, description) self.securityGroups[secGrp.get_id()] = secGrp return secGrp def delete(self, group_id): pass def list(self): pass class FakeSecurityGroupRule(object): def __init__(self, ip_protocol=None, from_port=None, to_port=None, cidr=None, parent_group_id=None, context=None): self.group_id = parent_group_id self.protocol = ip_protocol self.from_port = from_port self.to_port = to_port self.cidr = cidr self.context = context self.id = "FAKE_SECGRP_RULE_%s" % uuid.uuid4() def get_id(self): return self.id def data(self): return { 'id': self.id, 'group_id': self.group_id, 'protocol': self.protocol, 'from_port': self.from_port, 'to_port': self.to_port, 'cidr': self.cidr } class FakeSecurityGroupRules(object): def __init__(self, context=None): self.context = context self.securityGroupRules = {} def create(self, parent_group_id, ip_protocol, from_port, to_port, cidr): secGrpRule = FakeSecurityGroupRule(ip_protocol, from_port, to_port, cidr, parent_group_id) self.securityGroupRules[secGrpRule.get_id()] = secGrpRule return secGrpRule def delete(self, id): if id in self.securityGroupRules: del self.securityGroupRules[id] class FakeServerGroup(object): def __init__(self, name=None, policies=None, context=None): self.name = name self.description = description self.id = "FAKE_SRVGRP_%s" % uuid.uuid4() self.policies = policies or {} def get_id(self): return self.id def data(self): return { 'id': self.id, 'name': self.name, 'policies': self.policies } class FakeServerGroups(object): def __init__(self, context=None): self.context = context self.server_groups = {} def create(self, name=None, policies=None): server_group = FakeServerGroup(name, policies, context=self.context) self.server_groups[server_group.get_id()] = server_group return server_group def delete(self, group_id): pass def list(self): return self.server_groups class FakeClient(object): def __init__(self, context): self.context = context self.flavors = FLAVORS self.servers = FakeServers(context, self.flavors) self.volumes = FakeVolumes(context) self.servers.volumes = self.volumes self.accounts = FakeAccounts(context, self.servers) self.rdhosts = FakeHosts(self.servers) self.rdstorage = FakeRdStorages() self.rdservers = FakeRdServers(self.servers) self.security_groups = FakeSecurityGroups(context) self.security_group_rules = FakeSecurityGroupRules(context) self.server_groups = FakeServerGroups(context) def rescan_server_volume(self, server, volume_id): LOG.info("FAKE rescanning volume.") CLIENT_DATA = {} def get_client_data(context): if context not in CLIENT_DATA: nova_client = FakeClient(context) volume_client = FakeClient(context) volume_client.servers = nova_client CLIENT_DATA[context] = { 'nova': nova_client, 'volume': volume_client } return CLIENT_DATA[context] def fake_create_nova_client(context, region_name=None): return get_client_data(context)['nova'] def fake_create_nova_volume_client(context, region_name=None): return get_client_data(context)['volume'] def fake_create_cinder_client(context, region_name=None): return get_client_data(context)['volume'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/swift.py0000644000175000017500000005222700000000000021257 0ustar00coreycorey00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from hashlib import md5 from mock import MagicMock, patch import json import os import socket import swiftclient import swiftclient.client as swift_client import uuid from oslo_log import log as logging import six from six.moves import http_client from swiftclient import client as swift LOG = logging.getLogger(__name__) class FakeSwiftClient(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Connection(cls, *args, **kargs): LOG.debug("fake FakeSwiftClient Connection") return FakeSwiftConnection() class FakeSwiftConnection(object): """Logging calls instead of executing.""" MANIFEST_QUERY_STRING_PUT = 'multipart-manifest=put' MANIFEST_QUERY_STRING_DELETE = 'multipart-manifest=delete' COPY_OBJECT_HEADER_KEY = 'X-Copy-From' url = 'http://mockswift/v1' def __init__(self, *args, **kwargs): self.manifest_prefix = None self.manifest_name = None self.container_objects = {} def get_auth(self): return ( u"http://127.0.0.1:8080/v1/AUTH_c7b038976df24d96bf1980f5da17bd89", u'MIINrwYJKoZIhvcNAQcCoIINoDCCDZwCAQExCTAHBgUrDgMCGjCCDIgGCSqGSIb3' u'DQEHAaCCDHkEggx1eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAi' u'MjAxMy0wMy0xOFQxODoxMzoyMC41OTMyNzYiLCAiZXhwaXJlcyI6ICIyMDEzLTAz' u'LTE5VDE4OjEzOjIwWiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7' u'ImVuYWJsZWQiOiB0cnVlLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJy' u'ZWRkd2FyZiIsICJpZCI6ICJjN2IwMzg5NzZkZjI0ZDk2YmYxOTgwZjVkYTE3YmQ4' u'OSJ9fSwgInNlcnZpY2VDYXRhbG9nIjogW3siZW5kcG9pbnRzIjogW3siYWRtaW5') def get_account(self): return ({'content-length': '2', 'accept-ranges': 'bytes', 'x-timestamp': '1363049003.92304', 'x-trans-id': 'tx9e5da02c49ed496395008309c8032a53', 'date': 'Tue, 10 Mar 2013 00:43:23 GMT', 'x-account-bytes-used': '0', 'x-account-container-count': '0', 'content-type': 'application/json; charset=utf-8', 'x-account-object-count': '0'}, []) def head_container(self, container): LOG.debug("fake head_container(%s)", container) if container == 'missing_container': raise swift.ClientException('fake exception', http_status=http_client.NOT_FOUND) elif container == 'unauthorized_container': raise swift.ClientException('fake exception', http_status=http_client.UNAUTHORIZED) elif container == 'socket_error_on_head': raise socket.error(111, 'ECONNREFUSED') pass def put_container(self, container): LOG.debug("fake put_container(%s)", container) pass def get_container(self, container, **kwargs): LOG.debug("fake get_container(%s)", container) fake_header = None fake_body = [{'name': 'backup_001'}, {'name': 'backup_002'}, {'name': 'backup_003'}] return fake_header, fake_body def head_object(self, container, name): LOG.debug("fake put_container(%(container)s, %(name)s)", {'container': container, 'name': name}) checksum = md5() if self.manifest_name == name: for object_name in sorted(self.container_objects): object_checksum = md5(self.container_objects[object_name]) # The manifest file etag for a HEAD or GET is the checksum of # the concatenated checksums. if six.PY3: checksum.update(object_checksum.hexdigest().encode()) else: checksum.update(object_checksum.hexdigest()) # this is included to test bad swift segment etags if name.startswith("bad_manifest_etag_"): return {'etag': '"this_is_an_intentional_bad_manifest_etag"'} else: if name in self.container_objects: checksum.update(self.container_objects[name]) else: return {'etag': 'fake-md5-sum'} # Currently a swift HEAD object returns etag with double quotes return {'etag': '"%s"' % checksum.hexdigest()} def get_object(self, container, name, resp_chunk_size=None): LOG.debug("fake get_object(%(container)s, %(name)s)", {'container': container, 'name': name}) if container == 'socket_error_on_get': raise socket.error(111, 'ECONNREFUSED') if 'metadata' in name: fake_object_header = None metadata = {} if container == 'unsupported_version': metadata['version'] = '9.9.9' else: metadata['version'] = '1.0.0' metadata['backup_id'] = 123 metadata['volume_id'] = 123 metadata['backup_name'] = 'fake backup' metadata['backup_description'] = 'fake backup description' metadata['created_at'] = '2013-02-19 11:20:54,805' metadata['objects'] = [{ 'backup_001': {'compression': 'zlib', 'length': 10}, 'backup_002': {'compression': 'zlib', 'length': 10}, 'backup_003': {'compression': 'zlib', 'length': 10} }] metadata_json = json.dumps(metadata, sort_keys=True, indent=2) fake_object_body = metadata_json return (fake_object_header, fake_object_body) fake_header = {'etag': '"fake-md5-sum"'} if resp_chunk_size: def _object_info(): length = 0 while length < (1024 * 1024): yield os.urandom(resp_chunk_size) length += resp_chunk_size fake_object_body = _object_info() else: fake_object_body = os.urandom(1024 * 1024) return (fake_header, fake_object_body) def put_object(self, container, name, contents, **kwargs): LOG.debug("fake put_object(%(container)s, %(name)s)", {'container': container, 'name': name}) if container == 'socket_error_on_put': raise socket.error(111, 'ECONNREFUSED') headers = kwargs.get('headers', {}) query_string = kwargs.get('query_string', '') object_checksum = md5() if query_string == self.MANIFEST_QUERY_STRING_PUT: # the manifest prefix format is / where # container is where the object segments are in and prefix is the # common prefix for all segments. self.manifest_name = name if isinstance(contents, six.text_type): object_checksum.update(contents.encode('utf-8')) else: object_checksum.update(contents) elif self.COPY_OBJECT_HEADER_KEY in headers: # this is a copy object operation source_path = headers.get(self.COPY_OBJECT_HEADER_KEY) source_name = source_path.split('/')[1] self.container_objects[name] = self.container_objects[source_name] else: if hasattr(contents, 'read'): chunk_size = 128 object_content = b"" chunk = contents.read(chunk_size) while chunk: object_content += chunk object_checksum.update(chunk) chunk = contents.read(chunk_size) self.container_objects[name] = object_content else: object_checksum.update(contents) self.container_objects[name] = contents # this is included to test bad swift segment etags if name.startswith("bad_segment_etag_"): return "this_is_an_intentional_bad_segment_etag" return object_checksum.hexdigest() def post_object(self, container, name, headers={}): LOG.debug("fake post_object(%(container)s, %(name)s, %(head)s)", {'container': container, 'name': name, 'head': str(headers)}) def delete_object(self, container, name): LOG.debug("fake delete_object(%(container)s, %(name)s)", {'container': container, 'name': name}) if container == 'socket_error_on_delete': raise socket.error(111, 'ECONNREFUSED') pass class Patcher(object): """Objects that need to mock global symbols throughout their existence should extend this base class. The object acts as a context manager which, when used in conjunction with the 'with' statement, terminates all running patchers when it leaves the scope. """ def __init__(self): self.__patchers = None def __enter__(self): self.__patchers = [] return self def __exit__(self, type, value, traceback): # Stop patchers in the LIFO order. while self.__patchers: self.__patchers.pop().stop() def _start_patcher(self, patcher): """All patchers started by this method will be automatically terminated on __exit__(). """ self.__patchers.append(patcher) return patcher.start() class SwiftClientStub(Patcher): """ Component for controlling behavior of Swift Client Stub. Instantiated before tests are invoked in "fake" mode. Invoke methods to control behavior so that systems under test can interact with this as it is a real swift client with a real backend example: if FAKE: swift_stub = SwiftClientStub() swift_stub.with_account('xyz') # returns swift account info and auth token component_using_swift.get_swift_account() if FAKE: swift_stub.with_container('test-container-name') # returns swift container information - mostly faked component_using.swift.create_container('test-container-name') component_using_swift.get_container_info('test-container-name') if FAKE: swift_stub.with_object('test-container-name', 'test-object-name', 'test-object-contents') # returns swift object info and contents component_using_swift.create_object('test-container-name', 'test-object-name', 'test-contents') component_using_swift.get_object('test-container-name', 'test-object-name') if FAKE: swift_stub.without_object('test-container-name', 'test-object-name') # allows object to be removed ONCE component_using_swift.remove_object('test-container-name', 'test-object-name') # throws ClientException - 404 component_using_swift.get_object('test-container-name', 'test-object-name') component_using_swift.remove_object('test-container-name', 'test-object-name') if FAKE: swift_stub.without_object('test-container-name', 'test-object-name') # allows container to be removed ONCE component_using_swift.remove_container('test-container-name') # throws ClientException - 404 component_using_swift.get_container('test-container-name') component_using_swift.remove_container('test-container-name') """ def __init__(self): super(SwiftClientStub, self).__init__() self._connection = swift_client.Connection() self._containers = {} self._containers_list = [] self._objects = {} def _remove_object(self, name, some_list): idx = [i for i, obj in enumerate(some_list) if obj['name'] == name] if len(idx) == 1: del some_list[idx[0]] def _ensure_object_exists(self, container, name): self._connection.get_object(container, name) def with_account(self, account_id): """ setups up account headers example: if FAKE: swift_stub = SwiftClientStub() swift_stub.with_account('xyz') # returns swift account info and auth token component_using_swift.get_swift_account() :param account_id: account id """ def account_resp(): return ({'content-length': '2', 'accept-ranges': 'bytes', 'x-timestamp': '1363049003.92304', 'x-trans-id': 'tx9e5da02c49ed496395008309c8032a53', 'date': 'Tue, 10 Mar 2013 00:43:23 GMT', 'x-account-bytes-used': '0', 'x-account-container-count': '0', 'content-type': 'application/json; charset=utf-8', 'x-account-object-count': '0'}, self._containers_list) get_auth_return_value = ( u"http://127.0.0.1:8080/v1/AUTH_c7b038976df24d96bf1980f5da17bd89", u'MIINrwYJKoZIhvcNAQcCoIINoDCCDZwCAQExCTAHBgUrDgMCGjCCDIgGCSqGSIb3' u'DQEHAaCCDHkEggx1eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAi' u'MjAxMy0wMy0xOFQxODoxMzoyMC41OTMyNzYiLCAiZXhwaXJlcyI6ICIyMDEzLTAz' u'LTE5VDE4OjEzOjIwWiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7' u'ImVuYWJsZWQiOiB0cnVlLCAiZGVzY3JpcHRpb24iOiBudWxsLCAibmFtZSI6ICJy' u'ZWRkd2FyZiIsICJpZCI6ICJjN2IwMzg5NzZkZjI0ZDk2YmYxOTgwZjVkYTE3YmQ4' u'OSJ9fSwgInNlcnZpY2VDYXRhbG9nIjogW3siZW5kcG9pbnRzIjogW3siYWRtaW5') get_auth_patcher = patch.object( swift_client.Connection, 'get_auth', MagicMock(return_value=get_auth_return_value)) self._start_patcher(get_auth_patcher) get_account_patcher = patch.object( swift_client.Connection, 'get_account', MagicMock(return_value=account_resp())) self._start_patcher(get_account_patcher) return self def _create_container(self, container_name): container = {'count': 0, 'bytes': 0, 'name': container_name} self._containers[container_name] = container self._containers_list.append(container) self._objects[container_name] = [] def _ensure_container_exists(self, container): self._connection.get_container(container) def _delete_container(self, container): self._remove_object(container, self._containers_list) del self._containers[container] del self._objects[container] def with_container(self, container_name): """ sets expectations for creating a container and subsequently getting its information example: if FAKE: swift_stub.with_container('test-container-name') # returns swift container information - mostly faked component_using.swift.create_container('test-container-name') component_using_swift.get_container_info('test-container-name') :param container_name: container name that is expected to be created """ def container_resp(container): return ({'content-length': '2', 'x-container-object-count': '0', 'accept-ranges': 'bytes', 'x-container-bytes-used': '0', 'x-timestamp': '1363370869.72356', 'x-trans-id': 'tx7731801ac6ec4e5f8f7da61cde46bed7', 'date': 'Fri, 10 Mar 2013 18:07:58 GMT', 'content-type': 'application/json; charset=utf-8'}, self._objects[container]) # if this is called multiple times then nothing happens put_container_patcher = patch.object(swift_client.Connection, 'put_container') self._start_patcher(put_container_patcher) def side_effect_func(*args, **kwargs): if args[0] in self._containers: return container_resp(args[0]) else: raise swiftclient.ClientException('Resource Not Found', http_status=404) self._create_container(container_name) # return container headers get_container_patcher = patch.object( swift_client.Connection, 'get_container', MagicMock(side_effect=side_effect_func)) self._start_patcher(get_container_patcher) return self def without_container(self, container): """ sets expectations for removing a container and subsequently throwing an exception for further interactions example: if FAKE: swift_stub.without_container('test-container-name') # returns swift container information - mostly faked component_using.swift.remove_container('test-container-name') # throws exception "Resource Not Found - 404" component_using_swift.get_container_info('test-container-name') :param container: container name that is expected to be removed """ # first ensure container self._ensure_container_exists(container) self._delete_container(container) return self def with_object(self, container, name, contents): """ sets expectations for creating an object and subsequently getting its contents example: if FAKE: swift_stub.with_object('test-container-name', 'test-object-name', 'test-object-contents') # returns swift object info and contents component_using_swift.create_object('test-container-name', 'test-object-name', 'test-contents') component_using_swift.get_object('test-container-name', 'test-object-name') :param container: container name that is the object belongs :param name: the name of the object expected to be created :param contents: the contents of the object """ put_object_patcher = patch.object( swift_client.Connection, 'put_object', MagicMock(return_value=uuid.uuid1())) self._start_patcher(put_object_patcher) def side_effect_func(*args, **kwargs): if (args[0] in self._containers and args[1] in map(lambda x: x['name'], self._objects[args[0]])): return ( {'content-length': len(contents), 'accept-ranges': 'bytes', 'last-modified': 'Mon, 10 Mar 2013 01:06:34 GMT', 'etag': 'eb15a6874ce265e2c3eb1b4891567bab', 'x-timestamp': '1363568794.67584', 'x-trans-id': 'txef3aaf26c897420c8e77c9750ce6a501', 'date': 'Mon, 10 Mar 2013 05:35:14 GMT', 'content-type': 'application/octet-stream'}, [obj for obj in self._objects[args[0]] if obj['name'] == args[1]][0]['contents']) else: raise swiftclient.ClientException('Resource Not Found', http_status=404) get_object_patcher = patch.object( swift_client.Connection, 'get_object', MagicMock(side_effect=side_effect_func)) self._start_patcher(get_object_patcher) self._remove_object(name, self._objects[container]) self._objects[container].append( {'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950', 'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': name, 'content_type': 'application/octet-stream', 'contents': contents}) return self def without_object(self, container, name): """ sets expectations for deleting an object example: if FAKE: swift_stub.without_object('test-container-name', 'test-object-name') # allows container to be removed ONCE component_using_swift.remove_container('test-container-name') # throws ClientException - 404 component_using_swift.get_container('test-container-name') component_using_swift.remove_container('test-container-name') :param container: container name that is the object belongs :param name: the name of the object expected to be removed """ self._ensure_container_exists(container) self._ensure_object_exists(container, name) def side_effect_func(*args, **kwargs): if not [obj for obj in self._objects[args[0]] if obj['name'] == [args[1]]]: raise swiftclient.ClientException('Resource Not found', http_status=404) else: return None delete_object_patcher = patch.object( swift_client.Connection, 'delete_object', MagicMock(side_effect=side_effect_func)) self._start_patcher(delete_object_patcher) self._remove_object(name, self._objects[container]) return self def fake_create_swift_client(calculate_etag=False, *args): return FakeSwiftClient.Connection(*args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/fakes/taskmanager.py0000644000175000017500000000406700000000000022417 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from collections import defaultdict import eventlet from oslo_log import log as logging from trove import rpc from trove.taskmanager.api import API from trove.taskmanager.manager import Manager import trove.tests.util.usage as usage LOG = logging.getLogger(__name__) MESSAGE_QUEUE = defaultdict(list) class FakeRpcClient(object): def call(self, context, method_name, *args, **kwargs): manager, method = self._get_tm_method(method_name) return method(manager, context, *args, **kwargs) def cast(self, context, method_name, *args, **kwargs): manager, method = self._get_tm_method(method_name) def func(): try: method(manager, context, *args, **kwargs) except Exception: LOG.exception("Error running %s", method) eventlet.spawn_after(0.1, func) def _get_tm_method(self, method_name): manager = Manager() method = getattr(Manager, method_name) return manager, method def prepare(self, *args, **kwargs): return self class FakeNotifier(object): def info(self, ctxt, event_type, payload): usage.notify(event_type, payload) def monkey_patch(): def fake_get_client(self, *args, **kwargs): return FakeRpcClient() def fake_get_notifier(service=None, host=None, publisher_id=None): return FakeNotifier() API.get_client = fake_get_client rpc.get_notifier = fake_get_notifier ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/int_tests.py0000644000175000017500000002603100000000000021040 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import proboscis from trove import tests from trove.tests.scenario import groups from trove.tests.scenario.groups import backup_group from trove.tests.scenario.groups import cluster_group from trove.tests.scenario.groups import configuration_group from trove.tests.scenario.groups import database_actions_group from trove.tests.scenario.groups import guest_log_group from trove.tests.scenario.groups import instance_actions_group from trove.tests.scenario.groups import instance_create_group from trove.tests.scenario.groups import instance_delete_group from trove.tests.scenario.groups import instance_error_create_group from trove.tests.scenario.groups import instance_force_delete_group from trove.tests.scenario.groups import instance_upgrade_group from trove.tests.scenario.groups import module_group from trove.tests.scenario.groups import replication_group from trove.tests.scenario.groups import root_actions_group from trove.tests.scenario.groups import user_actions_group def build_group(*groups): def merge(collection, *items): for item in items: if isinstance(item, list): merge(collection, *item) else: if item not in collection: collection.append(item) out = [] merge(out, *groups) return out def register(group_names, *test_groups, **kwargs): if kwargs: register(group_names, kwargs.values()) for suffix, grp_set in kwargs.items(): # Recursively call without the kwargs register([name + '_' + suffix for name in group_names], *grp_set) return # Do the actual registration here proboscis.register(groups=build_group(group_names), depends_on_groups=build_group(*test_groups)) # Now register the same groups with '-' instead of '_' proboscis.register( groups=build_group([name.replace('_', '-') for name in group_names]), depends_on_groups=build_group(*test_groups)) # Base groups for all other groups base_groups = [ tests.DBAAS_API_VERSIONS, ] # Cluster-based groups cluster_create_groups = list(base_groups) cluster_create_groups.extend([groups.CLUSTER_DELETE_WAIT]) cluster_actions_groups = list(cluster_create_groups) cluster_actions_groups.extend([groups.CLUSTER_ACTIONS_SHRINK_WAIT]) cluster_root_groups = list(cluster_create_groups) cluster_root_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ENABLE]) cluster_root_actions_groups = list(cluster_actions_groups) cluster_root_actions_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ACTIONS]) cluster_restart_groups = list(cluster_create_groups) cluster_restart_groups.extend([groups.CLUSTER_ACTIONS_RESTART_WAIT]) cluster_upgrade_groups = list(cluster_create_groups) cluster_upgrade_groups.extend([groups.CLUSTER_UPGRADE_WAIT]) cluster_config_groups = list(cluster_create_groups) cluster_config_groups.extend([groups.CLUSTER_CFGGRP_DELETE]) cluster_config_actions_groups = list(cluster_config_groups) cluster_config_actions_groups.extend([groups.CLUSTER_ACTIONS_CFGGRP_ACTIONS]) cluster_groups = list(cluster_actions_groups) cluster_groups.extend([cluster_group.GROUP]) # Single-instance based groups instance_create_groups = list(base_groups) instance_create_groups.extend([groups.INST_CREATE, groups.INST_DELETE_WAIT]) instance_error_create_groups = list(base_groups) instance_error_create_groups.extend([instance_error_create_group.GROUP]) instance_force_delete_groups = list(base_groups) instance_force_delete_groups.extend([instance_force_delete_group.GROUP]) instance_init_groups = list(base_groups) instance_init_groups.extend([instance_create_group.GROUP, instance_delete_group.GROUP]) instance_upgrade_groups = list(instance_create_groups) instance_upgrade_groups.extend([instance_upgrade_group.GROUP]) backup_groups = list(instance_create_groups) backup_groups.extend([groups.BACKUP, groups.BACKUP_INST]) backup_incremental_groups = list(backup_groups) backup_incremental_groups.extend([backup_group.GROUP]) backup_negative_groups = list(backup_groups) backup_negative_groups.extend([groups.BACKUP_CREATE_NEGATIVE]) configuration_groups = list(instance_create_groups) configuration_groups.extend([configuration_group.GROUP]) configuration_create_groups = list(base_groups) configuration_create_groups.extend([groups.CFGGRP_CREATE, groups.CFGGRP_DELETE]) database_actions_groups = list(instance_create_groups) database_actions_groups.extend([database_actions_group.GROUP]) guest_log_groups = list(instance_create_groups) guest_log_groups.extend([guest_log_group.GROUP]) instance_actions_groups = list(instance_create_groups) instance_actions_groups.extend([instance_actions_group.GROUP]) instance_groups = list(instance_actions_groups) instance_groups.extend([instance_error_create_group.GROUP, instance_force_delete_group.GROUP]) module_groups = list(instance_create_groups) module_groups.extend([module_group.GROUP]) module_create_groups = list(base_groups) module_create_groups.extend([groups.MODULE_CREATE, groups.MODULE_DELETE]) replication_groups = list(instance_create_groups) replication_groups.extend([groups.REPL_INST_DELETE_WAIT]) replication_promote_groups = list(replication_groups) replication_promote_groups.extend([replication_group.GROUP]) root_actions_groups = list(instance_create_groups) root_actions_groups.extend([root_actions_group.GROUP]) user_actions_groups = list(instance_create_groups) user_actions_groups.extend([user_actions_group.GROUP]) # groups common to all datastores common_groups = list(instance_create_groups) # NOTE(lxkong): Remove the module related tests(module_groups) for now because # of no use case. common_groups.extend([guest_log_groups, instance_init_groups]) integration_groups = [ tests.DBAAS_API_VERSIONS, tests.DBAAS_API_DATASTORES, tests.DBAAS_API_MGMT_DATASTORES, tests.DBAAS_API_INSTANCES, tests.DBAAS_API_USERS_ROOT, tests.DBAAS_API_USERS, tests.DBAAS_API_USERS_ACCESS, tests.DBAAS_API_DATABASES, tests.DBAAS_API_INSTANCE_ACTIONS, tests.DBAAS_API_BACKUPS, tests.DBAAS_API_CONFIGURATIONS, tests.DBAAS_API_REPLICATION, tests.DBAAS_API_INSTANCES_DELETE ] # We intentionally make the functional tests running in series and dependent # on each other, so that one test case failure will stop the whole testing. proboscis.register(groups=["mysql"], depends_on_groups=integration_groups) register( ["mysql_supported"], single=[instance_create_group.GROUP, backup_group.GROUP, configuration_group.GROUP, database_actions_group.GROUP, guest_log_group.GROUP, instance_actions_group.GROUP, instance_error_create_group.GROUP, instance_force_delete_group.GROUP, root_actions_group.GROUP, user_actions_group.GROUP, instance_delete_group.GROUP], multi=[replication_group.GROUP, instance_delete_group.GROUP] ) register( ["mariadb_supported"], single=[instance_create_group.GROUP, backup_group.GROUP, configuration_group.GROUP, database_actions_group.GROUP, guest_log_group.GROUP, instance_actions_group.GROUP, instance_error_create_group.GROUP, instance_force_delete_group.GROUP, root_actions_group.GROUP, user_actions_group.GROUP, instance_delete_group.GROUP], multi=[replication_group.GROUP, instance_delete_group.GROUP] ) register( ["db2_supported"], single=[common_groups, configuration_groups, database_actions_groups, user_actions_groups, ], multi=[] ) register( ["cassandra_supported"], single=[common_groups, backup_groups, database_actions_groups, configuration_groups, user_actions_groups, ], multi=[cluster_actions_groups, cluster_root_actions_groups, cluster_config_actions_groups, ] ) register( ["couchbase_supported"], single=[common_groups, backup_groups, root_actions_groups, ], multi=[] ) register( ["couchdb_supported"], single=[common_groups, backup_groups, database_actions_groups, root_actions_groups, user_actions_groups, ], multi=[] ) register( ["mongodb_supported"], single=[common_groups, backup_groups, configuration_groups, database_actions_groups, root_actions_groups, user_actions_groups, ], multi=[cluster_actions_groups, ] ) register( ["percona_supported"], single=[common_groups, backup_incremental_groups, configuration_groups, database_actions_groups, instance_upgrade_groups, root_actions_groups, user_actions_groups, ], multi=[replication_promote_groups, ] ) register( ["postgresql_supported"], single=[common_groups, backup_incremental_groups, database_actions_groups, configuration_groups, root_actions_groups, user_actions_groups, ], multi=[replication_groups, ] ) register( ["pxc_supported"], single=[common_groups, backup_incremental_groups, configuration_groups, database_actions_groups, root_actions_groups, user_actions_groups, ], multi=[] # multi=[cluster_actions_groups, # cluster_root_actions_groups, ] ) # Redis instances does not support inherit root state from backups, # so a customized root actions group is created, instance backuping # and restoring tests will not be included. redis_root_actions_groups = list(instance_create_groups) redis_root_actions_groups.extend([groups.ROOT_ACTION_ENABLE, groups.ROOT_ACTION_DISABLE]) register( ["redis_supported"], single=[common_groups, backup_groups, configuration_groups, redis_root_actions_groups, ], multi=[replication_promote_groups, ] # multi=[cluster_actions_groups, # replication_promote_groups, ] ) register( ["vertica_supported"], single=[common_groups, configuration_groups, root_actions_groups, ], multi=[cluster_actions_groups, cluster_root_actions_groups, ] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/root_logger.py0000644000175000017500000000503300000000000021345 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import traceback class DefaultRootHandler(logging.StreamHandler): """A singleton StreamHandler""" __handler = logging.StreamHandler() __singleton = None __info = None __enable_backtrace = False @classmethod def activate(cls, enable_backtrace=False): # leverage the singleton __handler which has an # acquire() method to create a critical section. cls.__handler.acquire() if cls.__singleton is None: cls.__singleton = DefaultRootHandler() cls.__enable_backtrace = enable_backtrace cls.__handler.release() return cls.__singleton @classmethod def set_info(cls, info=None): cls.__info = info def __init__(self): if DefaultRootHandler.__singleton is not None: raise Exception( "Do not directly instantiate DefaultRootHandler(). " "Only use the activate() class method.") super(DefaultRootHandler, self).__init__() def emit(self, record): if DefaultRootHandler.__info: msg = ("*************************\n" + "Unhandled message logged from " + DefaultRootHandler.__info + ", " + record.name + "\n") if DefaultRootHandler.__enable_backtrace: msg += ''.join(traceback.format_stack()) + "\n" msg += "*************************\n" self.stream.write(msg) self.flush() class DefaultRootLogger(object): """A root logger that uses the singleton handler""" def __init__(self, enable_backtrace=False): super(DefaultRootLogger, self).__init__() handler = DefaultRootHandler.activate( enable_backtrace=enable_backtrace ) handler.acquire() if handler not in logging.getLogger('').handlers: logging.getLogger('').addHandler(handler) handler.release() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7801108 trove-12.1.0.dev92/trove/tests/scenario/0000755000175000017500000000000000000000000020253 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/__init__.py0000644000175000017500000000000000000000000022352 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.784111 trove-12.1.0.dev92/trove/tests/scenario/groups/0000755000175000017500000000000000000000000021572 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/__init__.py0000644000175000017500000001615600000000000023714 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Labels for all the sub-groups are listed here, so that they can be # referenced by other groups (thus avoiding circular references when # loading modules). The main GROUP label is still defined in each # respective group file. # Backup Group BACKUP = "scenario.backup_grp" BACKUP_CREATE = "scenario.backup_create_grp" BACKUP_CREATE_NEGATIVE = "scenario.backup_create_negative_grp" BACKUP_CREATE_WAIT = "scenario.backup_create_wait_grp" BACKUP_DELETE = "scenario.backup_delete_grp" BACKUP_INST = "scenario.backup_inst_grp" BACKUP_INST_CREATE = "scenario.backup_inst_create_grp" BACKUP_INST_CREATE_WAIT = "scenario.backup_inst_create_wait_grp" BACKUP_INST_DELETE = "scenario.backup_inst_delete_grp" BACKUP_INST_DELETE_WAIT = "scenario.backup_inst_delete_wait_grp" BACKUP_INC = "scenario.backup_inc_grp" BACKUP_INC_CREATE = "scenario.backup_inc_create_grp" BACKUP_INC_DELETE = "scenario.backup_inc_delete_grp" BACKUP_INC_INST = "scenario.backup_inc_inst_grp" BACKUP_INC_INST_CREATE = "scenario.backup_inc_inst_create_grp" BACKUP_INC_INST_CREATE_WAIT = "scenario.backup_inc_inst_create_wait_grp" BACKUP_INC_INST_DELETE = "scenario.backup_inc_inst_delete_grp" BACKUP_INC_INST_DELETE_WAIT = "scenario.backup_inc_inst_delete_wait_grp" # Configuration Group CFGGRP_CREATE = "scenario.cfggrp_create_grp" CFGGRP_DELETE = "scenario.cfggrp_delete_grp" CFGGRP_INST = "scenario.cfggrp_inst_grp" CFGGRP_INST_CREATE = "scenario.cfggrp_inst_create_grp" CFGGRP_INST_CREATE_WAIT = "scenario.cfggrp_inst_create_wait_grp" CFGGRP_INST_DELETE = "scenario.cfggrp_inst_delete_grp" CFGGRP_INST_DELETE_WAIT = "scenario.cfggrp_inst_delete_wait_grp" # Cluster Actions Group CLUSTER_CFGGRP_CREATE = "scenario.cluster_actions_cfggrp_create_grp" CLUSTER_CFGGRP_DELETE = "scenario.cluster_actions_cfggrp_delete_grp" CLUSTER_ACTIONS = "scenario.cluster_actions_grp" CLUSTER_ACTIONS_CFGGRP_ACTIONS = "scenario.cluster_actions_cfggrp_actions_grp" CLUSTER_ACTIONS_ROOT_ENABLE = "scenario.cluster_actions_root_enable_grp" CLUSTER_ACTIONS_ROOT_ACTIONS = "scenario.cluster_actions_root_actions_grp" CLUSTER_ACTIONS_ROOT_GROW = "scenario.cluster_actions_root_grow_grp" CLUSTER_ACTIONS_ROOT_SHRINK = "scenario.cluster_actions_root_shrink_grp" CLUSTER_ACTIONS_GROW_SHRINK = "scenario.cluster_actions_grow_shrink_grp" CLUSTER_ACTIONS_GROW = "scenario.cluster_actions_grow_grp" CLUSTER_ACTIONS_GROW_WAIT = "scenario.cluster_actions_grow_wait_grp" CLUSTER_ACTIONS_SHRINK = "scenario.cluster_actions_shrink_grp" CLUSTER_ACTIONS_SHRINK_WAIT = "scenario.cluster_actions_shrink_wait_grp" CLUSTER_ACTIONS_RESTART = "scenario.cluster_actions_restart_grp" CLUSTER_ACTIONS_RESTART_WAIT = "scenario.cluster_actions_restart_wait_grp" # Cluster Create Group (in cluster_actions file) CLUSTER_CREATE = "scenario.cluster_create_grp" CLUSTER_CREATE_WAIT = "scenario.cluster_create_wait_grp" # Cluster Delete Group (in cluster_actions file) CLUSTER_DELETE = "scenario.cluster_delete_grp" CLUSTER_DELETE_WAIT = "scenario.cluster_delete_wait_grp" # Cluster Upgrade Group (in cluster_actions file) CLUSTER_UPGRADE = "scenario.cluster_upgrade_grp" CLUSTER_UPGRADE_WAIT = "scenario.cluster_upgrade_wait_grp" # Database Actions Group DB_ACTION_CREATE = "scenario.db_action_create_grp" DB_ACTION_DELETE = "scenario.db_action_delete_grp" DB_ACTION_INST = "scenario.db_action_inst_grp" DB_ACTION_INST_CREATE = "scenario.db_action_inst_create_grp" DB_ACTION_INST_CREATE_WAIT = "scenario.db_action_inst_create_wait_grp" DB_ACTION_INST_DELETE = "scenario.db_action_inst_delete_grp" DB_ACTION_INST_DELETE_WAIT = "scenario.db_action_inst_delete_wait_grp" # Instance Actions Group INST_ACTIONS = "scenario.inst_actions_grp" INST_ACTIONS_RESIZE = "scenario.inst_actions_resize_grp" INST_ACTIONS_RESIZE_WAIT = "scenario.inst_actions_resize_wait_grp" # Instance Upgrade Group INST_UPGRADE = "scenario.inst_upgrade_grp" # Instance Create Group INST_CREATE = "scenario.inst_create_grp" INST_CREATE_WAIT = "scenario.inst_create_wait_grp" INST_INIT_CREATE = "scenario.inst_init_create_grp" INST_INIT_CREATE_WAIT = "scenario.inst_init_create_wait_grp" INST_INIT_DELETE = "scenario.inst_init_delete_grp" INST_INIT_DELETE_WAIT = "scenario.inst_init_delete_wait_grp" # Instance Delete Group INST_DELETE = "scenario.inst_delete_grp" INST_DELETE_WAIT = "scenario.inst_delete_wait_grp" # Instance Error Create Group INST_ERROR_CREATE = "scenario.inst_error_create_grp" INST_ERROR_CREATE_WAIT = "scenario.inst_error_create_wait_grp" INST_ERROR_DELETE = "scenario.inst_error_delete_grp" INST_ERROR_DELETE_WAIT = "scenario.inst_error_delete_wait_grp" # Instance Force Delete Group INST_FORCE_DELETE = "scenario.inst_force_delete_grp" INST_FORCE_DELETE_WAIT = "scenario.inst_force_delete_wait_grp" # Module Group MODULE_CREATE = "scenario.module_create_grp" MODULE_DELETE = "scenario.module_delete_grp" MODULE_INST = "scenario.module_inst_grp" MODULE_INST_CREATE = "scenario.module_inst_create_grp" MODULE_INST_CREATE_WAIT = "scenario.module_inst_create_wait_grp" MODULE_INST_DELETE = "scenario.module_inst_delete_grp" MODULE_INST_DELETE_WAIT = "scenario.module_inst_delete_wait_grp" # Replication Group REPL_INST = "scenario.repl_inst_grp" REPL_INST_CREATE = "scenario.repl_inst_create_grp" REPL_INST_CREATE_WAIT = "scenario.repl_inst_create_wait_grp" REPL_INST_MULTI_CREATE = "scenario.repl_inst_multi_create_grp" REPL_INST_DELETE_NON_AFFINITY_WAIT = "scenario.repl_inst_delete_noaff_wait_grp" REPL_INST_MULTI_CREATE_WAIT = "scenario.repl_inst_multi_create_wait_grp" REPL_INST_MULTI_PROMOTE = "scenario.repl_inst_multi_promote_grp" REPL_INST_DELETE = "scenario.repl_inst_delete_grp" REPL_INST_DELETE_WAIT = "scenario.repl_inst_delete_wait_grp" # Root Actions Group ROOT_ACTION_ENABLE = "scenario.root_action_enable_grp" ROOT_ACTION_DISABLE = "scenario.root_action_disable_grp" ROOT_ACTION_INST = "scenario.root_action_inst_grp" ROOT_ACTION_INST_CREATE = "scenario.root_action_inst_create_grp" ROOT_ACTION_INST_CREATE_WAIT = "scenario.root_action_inst_create_wait_grp" ROOT_ACTION_INST_DELETE = "scenario.root_action_inst_delete_grp" ROOT_ACTION_INST_DELETE_WAIT = "scenario.root_action_inst_delete_wait_grp" # User Actions Group USER_ACTION_CREATE = "scenario.user_action_create_grp" USER_ACTION_DELETE = "scenario.user_action_delete_grp" USER_ACTION_INST = "scenario.user_action_inst_grp" USER_ACTION_INST_CREATE = "scenario.user_action_inst_create_grp" USER_ACTION_INST_CREATE_WAIT = "scenario.user_action_inst_create_wait_grp" USER_ACTION_INST_DELETE = "scenario.user_action_inst_delete_grp" USER_ACTION_INST_DELETE_WAIT = "scenario.user_action_inst_delete_wait_grp" # Instance Log Group INST_LOG = "scenario.inst_log_grp" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/backup_group.py0000644000175000017500000003550400000000000024634 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.backup_restore_group" class BackupRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'backup_runners' _runner_cls = 'BackupRunner' @test(depends_on_groups=[groups.INST_CREATE], groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE]) class BackupCreateGroup(TestGroup): """Test Backup Create functionality.""" def __init__(self): super(BackupCreateGroup, self).__init__( BackupRunnerFactory.instance()) @test def add_data_for_backup(self): """Add data to instance for restore verification.""" self.test_runner.run_add_data_for_backup() @test(runs_after=[add_data_for_backup]) def verify_data_for_backup(self): """Verify data in instance.""" self.test_runner.run_verify_data_for_backup() @test(runs_after=[verify_data_for_backup]) def save_backup_counts(self): """Store the existing backup counts.""" self.test_runner.run_save_backup_counts() @test(runs_after=[save_backup_counts]) def backup_create(self): """Check that create backup is started successfully.""" self.test_runner.run_backup_create() @test(depends_on_classes=[BackupCreateGroup], groups=[GROUP, groups.BACKUP_CREATE_NEGATIVE]) class BackupCreateNegativeGroup(TestGroup): """Test Backup Create Negative functionality.""" def __init__(self): super(BackupCreateNegativeGroup, self).__init__( BackupRunnerFactory.instance()) @test def backup_delete_while_backup_running(self): """Ensure delete backup fails while it is running.""" self.test_runner.run_backup_delete_while_backup_running() @test(runs_after=[backup_delete_while_backup_running]) def restore_instance_from_not_completed_backup(self): """Ensure a restore fails while the backup is running.""" self.test_runner.run_restore_instance_from_not_completed_backup() @test(runs_after=[restore_instance_from_not_completed_backup]) def backup_create_another_backup_running(self): """Ensure create backup fails when another backup is running.""" self.test_runner.run_backup_create_another_backup_running() @test(runs_after=[backup_create_another_backup_running]) def instance_action_right_after_backup_create(self): """Ensure any instance action fails while backup is running.""" self.test_runner.run_instance_action_right_after_backup_create() @test(runs_after=[instance_action_right_after_backup_create]) def delete_unknown_backup(self): """Ensure deleting an unknown backup fails.""" self.test_runner.run_delete_unknown_backup() @test(runs_after=[instance_action_right_after_backup_create]) def backup_create_instance_invalid(self): """Ensure create backup fails with invalid instance id.""" self.test_runner.run_backup_create_instance_invalid() @test(runs_after=[instance_action_right_after_backup_create]) def backup_create_instance_not_found(self): """Ensure create backup fails with unknown instance id.""" self.test_runner.run_backup_create_instance_not_found() @test(depends_on_classes=[BackupCreateNegativeGroup], groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE_WAIT]) class BackupCreateWaitGroup(TestGroup): """Wait for Backup Create to Complete.""" def __init__(self): super(BackupCreateWaitGroup, self).__init__( BackupRunnerFactory.instance()) @test def backup_create_completed(self): """Check that the backup completes successfully.""" self.test_runner.run_backup_create_completed() @test(depends_on=[backup_create_completed]) def instance_goes_active(self): """Check that the instance goes active after the backup.""" self.test_runner.run_instance_goes_active() @test(depends_on=[backup_create_completed]) def backup_list(self): """Test list backups.""" self.test_runner.run_backup_list() @test(depends_on=[backup_create_completed]) def backup_list_filter_datastore(self): """Test list backups and filter by datastore.""" self.test_runner.run_backup_list_filter_datastore() @test(depends_on=[backup_create_completed]) def backup_list_filter_datastore_not_found(self): """Test list backups and filter by unknown datastore.""" self.test_runner.run_backup_list_filter_datastore_not_found() @test(depends_on=[backup_create_completed]) def backup_list_for_instance(self): """Test backup list for instance.""" self.test_runner.run_backup_list_for_instance() @test(depends_on=[backup_create_completed]) def backup_get(self): """Test backup show.""" self.test_runner.run_backup_get() @test(depends_on=[backup_create_completed]) def backup_get_unauthorized_user(self): """Ensure backup show fails for an unauthorized user.""" self.test_runner.run_backup_get_unauthorized_user() @test(depends_on_classes=[BackupCreateWaitGroup], groups=[GROUP, groups.BACKUP_INC, groups.BACKUP_INC_CREATE]) class BackupIncCreateGroup(TestGroup): """Test Backup Incremental Create functionality.""" def __init__(self): super(BackupIncCreateGroup, self).__init__( BackupRunnerFactory.instance()) @test def add_data_for_inc_backup_1(self): """Add data to instance for inc backup 1.""" self.test_runner.run_add_data_for_inc_backup_1() @test(depends_on=[add_data_for_inc_backup_1]) def verify_data_for_inc_backup_1(self): """Verify data in instance for inc backup 1.""" self.test_runner.run_verify_data_for_inc_backup_1() @test(depends_on=[verify_data_for_inc_backup_1]) def inc_backup_1(self): """Run incremental backup 1.""" self.test_runner.run_inc_backup_1() @test(depends_on=[inc_backup_1]) def wait_for_inc_backup_1(self): """Check that inc backup 1 completes successfully.""" self.test_runner.run_wait_for_inc_backup_1() @test(depends_on=[wait_for_inc_backup_1]) def add_data_for_inc_backup_2(self): """Add data to instance for inc backup 2.""" self.test_runner.run_add_data_for_inc_backup_2() @test(depends_on=[add_data_for_inc_backup_2]) def verify_data_for_inc_backup_2(self): """Verify data in instance for inc backup 2.""" self.test_runner.run_verify_data_for_inc_backup_2() @test(depends_on=[wait_for_inc_backup_1], runs_after=[verify_data_for_inc_backup_2]) def instance_goes_active_inc_1(self): """Check that the instance goes active after the inc 1 backup.""" self.test_runner.run_instance_goes_active() @test(depends_on=[verify_data_for_inc_backup_2], runs_after=[instance_goes_active_inc_1]) def inc_backup_2(self): """Run incremental backup 2.""" self.test_runner.run_inc_backup_2() @test(depends_on=[inc_backup_2]) def wait_for_inc_backup_2(self): """Check that inc backup 2 completes successfully.""" self.test_runner.run_wait_for_inc_backup_2() @test(depends_on=[wait_for_inc_backup_2]) def instance_goes_active_inc_2(self): """Check that the instance goes active after the inc 2 backup.""" self.test_runner.run_instance_goes_active() @test(depends_on_classes=[BackupIncCreateGroup], groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE]) class BackupInstCreateGroup(TestGroup): """Test Backup Instance Create functionality.""" def __init__(self): super(BackupInstCreateGroup, self).__init__( BackupRunnerFactory.instance()) @test def restore_from_backup(self): """Check that restoring an instance from a backup starts.""" self.test_runner.run_restore_from_backup() @test(depends_on_classes=[BackupInstCreateGroup], groups=[GROUP, groups.BACKUP_INC_INST, groups.BACKUP_INC_INST_CREATE]) class BackupIncInstCreateGroup(TestGroup): """Test Backup Incremental Instance Create functionality.""" def __init__(self): super(BackupIncInstCreateGroup, self).__init__( BackupRunnerFactory.instance()) @test def restore_from_inc_1_backup(self): """Check that restoring an instance from inc 1 backup starts.""" self.test_runner.run_restore_from_inc_1_backup() @test(depends_on_classes=[BackupIncInstCreateGroup], groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE_WAIT]) class BackupInstCreateWaitGroup(TestGroup): """Test Backup Instance Create completes.""" def __init__(self): super(BackupInstCreateWaitGroup, self).__init__( BackupRunnerFactory.instance()) @test def restore_from_backup_completed(self): """Wait until restoring an instance from a backup completes.""" self.test_runner.run_restore_from_backup_completed() @test(depends_on=[restore_from_backup_completed]) def verify_data_in_restored_instance(self): """Verify data in restored instance.""" self.test_runner.run_verify_data_in_restored_instance() @test(depends_on=[restore_from_backup_completed]) def verify_databases_in_restored_instance(self): """Verify databases in restored instance.""" self.test_runner.run_verify_databases_in_restored_instance() @test(depends_on_classes=[BackupInstCreateWaitGroup], groups=[GROUP, groups.BACKUP_INC_INST, groups.BACKUP_INC_INST_CREATE_WAIT]) class BackupIncInstCreateWaitGroup(TestGroup): """Test Backup Incremental Instance Create completes.""" def __init__(self): super(BackupIncInstCreateWaitGroup, self).__init__( BackupRunnerFactory.instance()) @test def restore_from_inc_1_backup_completed(self): """Wait until restoring an inst from inc 1 backup completes.""" self.test_runner.run_restore_from_inc_1_backup_completed() @test(depends_on=[restore_from_inc_1_backup_completed]) def verify_data_in_restored_inc_1_instance(self): """Verify data in restored inc 1 instance.""" self.test_runner.run_verify_data_in_restored_inc_1_instance() @test(depends_on=[restore_from_inc_1_backup_completed]) def verify_databases_in_restored_inc_1_instance(self): """Verify databases in restored inc 1 instance.""" self.test_runner.run_verify_databases_in_restored_inc_1_instance() @test(depends_on_classes=[BackupIncInstCreateWaitGroup], groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE]) class BackupInstDeleteGroup(TestGroup): """Test Backup Instance Delete functionality.""" def __init__(self): super(BackupInstDeleteGroup, self).__init__( BackupRunnerFactory.instance()) @test def delete_restored_instance(self): """Test deleting the restored instance.""" self.test_runner.run_delete_restored_instance() @test(depends_on_classes=[BackupInstDeleteGroup], groups=[GROUP, groups.BACKUP_INC_INST, groups.BACKUP_INC_INST_DELETE]) class BackupIncInstDeleteGroup(TestGroup): """Test Backup Incremental Instance Delete functionality.""" def __init__(self): super(BackupIncInstDeleteGroup, self).__init__( BackupRunnerFactory.instance()) @test def delete_restored_inc_1_instance(self): """Test deleting the restored inc 1 instance.""" self.test_runner.run_delete_restored_inc_1_instance() @test(depends_on_classes=[BackupIncInstDeleteGroup], groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE_WAIT]) class BackupInstDeleteWaitGroup(TestGroup): """Test Backup Instance Delete completes.""" def __init__(self): super(BackupInstDeleteWaitGroup, self).__init__( BackupRunnerFactory.instance()) @test def wait_for_restored_instance_delete(self): """Wait until deleting the restored instance completes.""" self.test_runner.run_wait_for_restored_instance_delete() @test(depends_on_classes=[BackupInstDeleteWaitGroup], groups=[GROUP, groups.BACKUP_INC_INST, groups.BACKUP_INC_INST_DELETE_WAIT]) class BackupIncInstDeleteWaitGroup(TestGroup): """Test Backup Incremental Instance Delete completes.""" def __init__(self): super(BackupIncInstDeleteWaitGroup, self).__init__( BackupRunnerFactory.instance()) @test def wait_for_restored_inc_1_instance_delete(self): """Wait until deleting the restored inc 1 instance completes.""" self.test_runner.run_wait_for_restored_inc_1_instance_delete() @test(depends_on_classes=[BackupIncInstDeleteWaitGroup], groups=[GROUP, groups.BACKUP_INC, groups.BACKUP_INC_DELETE]) class BackupIncDeleteGroup(TestGroup): """Test Backup Incremental Delete functionality.""" def __init__(self): super(BackupIncDeleteGroup, self).__init__( BackupRunnerFactory.instance()) @test def delete_inc_2_backup(self): """Test deleting the inc 2 backup.""" # We only delete the inc 2 backup, as the inc 1 should be deleted # by the full backup delete that runs after. self.test_runner.run_delete_inc_2_backup() @test(depends_on_classes=[BackupIncDeleteGroup], groups=[GROUP, groups.BACKUP, groups.BACKUP_DELETE]) class BackupDeleteGroup(TestGroup): """Test Backup Delete functionality.""" def __init__(self): super(BackupDeleteGroup, self).__init__( BackupRunnerFactory.instance()) @test def delete_backup_unauthorized_user(self): """Ensure deleting backup by an unauthorized user fails.""" self.test_runner.run_delete_backup_unauthorized_user() @test(runs_after=[delete_backup_unauthorized_user]) def delete_backup(self): """Test deleting the backup.""" self.test_runner.run_delete_backup() @test(depends_on=[delete_backup]) def check_for_incremental_backup(self): """Test that backup children are deleted.""" self.test_runner.run_check_for_incremental_backup() @test def remove_backup_data_from_instance(self): """Remove the backup data from the original instance.""" self.test_runner.run_remove_backup_data_from_instance() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/groups/cluster_group.py0000644000175000017500000004527200000000000025053 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.cluster_group" class ClusterRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'cluster_runners' _runner_cls = 'ClusterRunner' @test(groups=[GROUP, groups.CLUSTER_CFGGRP_CREATE], runs_after_groups=[groups.MODULE_DELETE, groups.CFGGRP_INST_DELETE, groups.INST_ACTIONS_RESIZE_WAIT, groups.DB_ACTION_INST_DELETE, groups.USER_ACTION_DELETE, groups.USER_ACTION_INST_DELETE, groups.ROOT_ACTION_INST_DELETE, groups.REPL_INST_DELETE_WAIT, groups.INST_DELETE]) class ClusterConfigurationCreateGroup(TestGroup): def __init__(self): super(ClusterConfigurationCreateGroup, self).__init__( ClusterRunnerFactory.instance()) @test def create_initial_configuration(self): """Create a configuration group for a new cluster.""" self.test_runner.run_initial_configuration_create() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_CREATE], runs_after_groups=[groups.CLUSTER_CFGGRP_CREATE]) class ClusterCreateGroup(TestGroup): def __init__(self): super(ClusterCreateGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_create(self): """Create a cluster.""" self.test_runner.run_cluster_create() @test(groups=[GROUP, groups.CLUSTER_CREATE_WAIT], depends_on_groups=[groups.CLUSTER_CREATE], runs_after_groups=[groups.MODULE_INST_DELETE_WAIT, groups.CFGGRP_INST_DELETE_WAIT, groups.DB_ACTION_INST_DELETE_WAIT, groups.USER_ACTION_INST_DELETE_WAIT, groups.ROOT_ACTION_INST_DELETE_WAIT, groups.INST_DELETE_WAIT]) class ClusterCreateWaitGroup(TestGroup): def __init__(self): super(ClusterCreateWaitGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_create_wait(self): """Wait for cluster create to complete.""" self.test_runner.run_cluster_create_wait() @test(depends_on=[cluster_create_wait]) def verify_initial_configuration(self): """Verify initial configuration values on the cluster.""" self.test_runner.run_verify_initial_configuration() @test(depends_on=[cluster_create_wait]) def add_initial_cluster_data(self): """Add data to cluster.""" self.test_runner.run_add_initial_cluster_data() @test(depends_on=[add_initial_cluster_data]) def verify_initial_cluster_data(self): """Verify the initial data exists on cluster.""" self.test_runner.run_verify_initial_cluster_data() @test(depends_on=[cluster_create_wait]) def cluster_list(self): """List the clusters.""" self.test_runner.run_cluster_list() @test(depends_on=[cluster_create_wait]) def cluster_show(self): """Show a cluster.""" self.test_runner.run_cluster_show() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_RESTART], depends_on_groups=[groups.CLUSTER_CREATE_WAIT]) class ClusterRestartGroup(TestGroup): def __init__(self): super(ClusterRestartGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_restart(self): """Restart the cluster.""" self.test_runner.run_cluster_restart() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_RESTART_WAIT], depends_on_groups=[groups.CLUSTER_ACTIONS_RESTART]) class ClusterRestartWaitGroup(TestGroup): def __init__(self): super(ClusterRestartWaitGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_restart_wait(self): """Wait for cluster restart to complete.""" self.test_runner.run_cluster_restart_wait() @test(depends_on=[cluster_restart_wait]) def verify_initial_cluster_data(self): """Verify the initial data still exists after cluster restart.""" self.test_runner.run_verify_initial_cluster_data() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_ROOT_ENABLE], depends_on_groups=[groups.CLUSTER_CREATE_WAIT], runs_after_groups=[groups.CLUSTER_ACTIONS_RESTART_WAIT]) class ClusterRootEnableGroup(TestGroup): def __init__(self): super(ClusterRootEnableGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_root_enable(self): """Root Enable.""" self.test_runner.run_cluster_root_enable() @test(depends_on=[cluster_root_enable]) def verify_cluster_root_enable(self): """Verify Root Enable.""" self.test_runner.run_verify_cluster_root_enable() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_GROW_SHRINK, groups.CLUSTER_ACTIONS_GROW], depends_on_groups=[groups.CLUSTER_CREATE_WAIT], runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE]) class ClusterGrowGroup(TestGroup): def __init__(self): super(ClusterGrowGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_grow(self): """Grow cluster.""" self.test_runner.run_cluster_grow() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_GROW_SHRINK, groups.CLUSTER_ACTIONS_GROW_WAIT], depends_on_groups=[groups.CLUSTER_ACTIONS_GROW]) class ClusterGrowWaitGroup(TestGroup): def __init__(self): super(ClusterGrowWaitGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_grow_wait(self): """Wait for cluster grow to complete.""" self.test_runner.run_cluster_grow_wait() @test(depends_on=[cluster_grow_wait]) def verify_initial_configuration(self): """Verify initial configuration values on the cluster.""" self.test_runner.run_verify_initial_configuration() @test(depends_on=[cluster_grow_wait]) def verify_initial_cluster_data_after_grow(self): """Verify the initial data still exists after cluster grow.""" self.test_runner.run_verify_initial_cluster_data() @test(depends_on=[cluster_grow_wait], runs_after=[verify_initial_cluster_data_after_grow]) def add_grow_cluster_data(self): """Add more data to cluster after grow.""" self.test_runner.run_add_grow_cluster_data() @test(depends_on=[add_grow_cluster_data]) def verify_grow_cluster_data(self): """Verify the data added after cluster grow.""" self.test_runner.run_verify_grow_cluster_data() @test(depends_on=[add_grow_cluster_data], runs_after=[verify_grow_cluster_data]) def remove_grow_cluster_data(self): """Remove the data added after cluster grow.""" self.test_runner.run_remove_grow_cluster_data() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_ROOT_ACTIONS, groups.CLUSTER_ACTIONS_ROOT_GROW], depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT]) class ClusterRootEnableGrowGroup(TestGroup): def __init__(self): super(ClusterRootEnableGrowGroup, self).__init__( ClusterRunnerFactory.instance()) @test def verify_cluster_root_enable_after_grow(self): """Verify Root Enabled after grow.""" self.test_runner.run_verify_cluster_root_enable() @test(groups=[GROUP, groups.CLUSTER_UPGRADE], depends_on_groups=[groups.CLUSTER_CREATE_WAIT], runs_after_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT, groups.CLUSTER_ACTIONS_ROOT_GROW]) class ClusterUpgradeGroup(TestGroup): def __init__(self): super(ClusterUpgradeGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_upgrade(self): """Upgrade cluster.""" self.test_runner.run_cluster_upgrade() @test(groups=[GROUP, groups.CLUSTER_UPGRADE_WAIT], depends_on_groups=[groups.CLUSTER_UPGRADE]) class ClusterUpgradeWaitGroup(TestGroup): def __init__(self): super(ClusterUpgradeWaitGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_upgrade_wait(self): """Wait for cluster upgrade to complete.""" self.test_runner.run_cluster_upgrade_wait() @test(depends_on=[cluster_upgrade_wait]) def verify_initial_configuration(self): """Verify initial configuration values on the cluster.""" self.test_runner.run_verify_initial_configuration() @test(depends_on=[cluster_upgrade_wait]) def verify_initial_cluster_data_after_upgrade(self): """Verify the initial data still exists after cluster upgrade.""" self.test_runner.run_verify_initial_cluster_data() @test(depends_on=[cluster_upgrade_wait], runs_after=[verify_initial_cluster_data_after_upgrade]) def add_upgrade_cluster_data_after_upgrade(self): """Add more data to cluster after upgrade.""" self.test_runner.run_add_upgrade_cluster_data() @test(depends_on=[add_upgrade_cluster_data_after_upgrade]) def verify_upgrade_cluster_data_after_upgrade(self): """Verify the data added after cluster upgrade.""" self.test_runner.run_verify_upgrade_cluster_data() @test(depends_on=[add_upgrade_cluster_data_after_upgrade], runs_after=[verify_upgrade_cluster_data_after_upgrade]) def remove_upgrade_cluster_data_after_upgrade(self): """Remove the data added after cluster upgrade.""" self.test_runner.run_remove_upgrade_cluster_data() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_GROW_SHRINK, groups.CLUSTER_ACTIONS_SHRINK], depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT], runs_after_groups=[groups.CLUSTER_UPGRADE_WAIT]) class ClusterShrinkGroup(TestGroup): def __init__(self): super(ClusterShrinkGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_shrink(self): """Shrink cluster.""" self.test_runner.run_cluster_shrink() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_SHRINK_WAIT], depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK]) class ClusterShrinkWaitGroup(TestGroup): def __init__(self): super(ClusterShrinkWaitGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_shrink_wait(self): """Wait for the cluster shrink to complete.""" self.test_runner.run_cluster_shrink_wait() @test(depends_on=[cluster_shrink_wait]) def verify_initial_configuration(self): """Verify initial configuration values on the cluster.""" self.test_runner.run_verify_initial_configuration() @test(depends_on=[cluster_shrink_wait]) def verify_initial_cluster_data_after_shrink(self): """Verify the initial data still exists after cluster shrink.""" self.test_runner.run_verify_initial_cluster_data() @test(runs_after=[verify_initial_cluster_data_after_shrink]) def add_shrink_cluster_data(self): """Add more data to cluster after shrink.""" self.test_runner.run_add_shrink_cluster_data() @test(depends_on=[add_shrink_cluster_data]) def verify_shrink_cluster_data(self): """Verify the data added after cluster shrink.""" self.test_runner.run_verify_shrink_cluster_data() @test(depends_on=[add_shrink_cluster_data], runs_after=[verify_shrink_cluster_data]) def remove_shrink_cluster_data(self): """Remove the data added after cluster shrink.""" self.test_runner.run_remove_shrink_cluster_data() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_ROOT_ACTIONS, groups.CLUSTER_ACTIONS_ROOT_SHRINK], depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK_WAIT]) class ClusterRootEnableShrinkGroup(TestGroup): def __init__(self): super(ClusterRootEnableShrinkGroup, self).__init__( ClusterRunnerFactory.instance()) @test def verify_cluster_root_enable_after_shrink(self): """Verify Root Enable after shrink.""" self.test_runner.run_verify_cluster_root_enable() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_ACTIONS_CFGGRP_ACTIONS], depends_on_groups=[groups.CLUSTER_CREATE_WAIT], runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_SHRINK]) class ClusterConfigurationActionsGroup(TestGroup): def __init__(self): super(ClusterConfigurationActionsGroup, self).__init__( ClusterRunnerFactory.instance()) @test def detach_initial_configuration(self): """Detach initial configuration group.""" self.test_runner.run_detach_initial_configuration() @test(depends_on=[detach_initial_configuration]) def restart_cluster_after_detach(self): """Restarting cluster after configuration change.""" self.test_runner.restart_after_configuration_change() @test def create_dynamic_configuration(self): """Create a configuration group with only dynamic entries.""" self.test_runner.run_create_dynamic_configuration() @test def create_non_dynamic_configuration(self): """Create a configuration group with only non-dynamic entries.""" self.test_runner.run_create_non_dynamic_configuration() @test(depends_on=[create_dynamic_configuration, restart_cluster_after_detach]) def attach_dynamic_configuration(self): """Test attach dynamic group.""" self.test_runner.run_attach_dynamic_configuration() @test(depends_on=[attach_dynamic_configuration]) def verify_dynamic_configuration(self): """Verify dynamic values on the cluster.""" self.test_runner.run_verify_dynamic_configuration() @test(depends_on=[attach_dynamic_configuration], runs_after=[verify_dynamic_configuration]) def detach_dynamic_configuration(self): """Test detach dynamic group.""" self.test_runner.run_detach_dynamic_configuration() @test(depends_on=[create_non_dynamic_configuration, detach_initial_configuration], runs_after=[detach_dynamic_configuration]) def attach_non_dynamic_configuration(self): """Test attach non-dynamic group.""" self.test_runner.run_attach_non_dynamic_configuration() @test(depends_on=[attach_non_dynamic_configuration]) def restart_cluster_after_attach(self): """Restarting cluster after configuration change.""" self.test_runner.restart_after_configuration_change() @test(depends_on=[restart_cluster_after_attach]) def verify_non_dynamic_configuration(self): """Verify non-dynamic values on the cluster.""" self.test_runner.run_verify_non_dynamic_configuration() @test(depends_on=[attach_non_dynamic_configuration], runs_after=[verify_non_dynamic_configuration]) def detach_non_dynamic_configuration(self): """Test detach non-dynamic group.""" self.test_runner.run_detach_non_dynamic_configuration() @test(runs_after=[detach_dynamic_configuration, detach_non_dynamic_configuration]) def verify_initial_cluster_data(self): """Verify the initial data still exists.""" self.test_runner.run_verify_initial_cluster_data() @test(depends_on=[detach_dynamic_configuration]) def delete_dynamic_configuration(self): """Test delete dynamic configuration group.""" self.test_runner.run_delete_dynamic_configuration() @test(depends_on=[detach_non_dynamic_configuration]) def delete_non_dynamic_configuration(self): """Test delete non-dynamic configuration group.""" self.test_runner.run_delete_non_dynamic_configuration() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_DELETE], depends_on_groups=[groups.CLUSTER_CREATE_WAIT], runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE, groups.CLUSTER_ACTIONS_ROOT_GROW, groups.CLUSTER_ACTIONS_ROOT_SHRINK, groups.CLUSTER_ACTIONS_GROW_WAIT, groups.CLUSTER_ACTIONS_SHRINK_WAIT, groups.CLUSTER_UPGRADE_WAIT, groups.CLUSTER_ACTIONS_RESTART_WAIT, groups.CLUSTER_CFGGRP_CREATE, groups.CLUSTER_ACTIONS_CFGGRP_ACTIONS]) class ClusterDeleteGroup(TestGroup): def __init__(self): super(ClusterDeleteGroup, self).__init__( ClusterRunnerFactory.instance()) @test def remove_initial_cluster_data(self): """Remove the initial data from cluster.""" self.test_runner.run_remove_initial_cluster_data() @test(runs_after=[remove_initial_cluster_data]) def cluster_delete(self): """Delete an existing cluster.""" self.test_runner.run_cluster_delete() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_DELETE_WAIT], depends_on_groups=[groups.CLUSTER_DELETE]) class ClusterDeleteWaitGroup(TestGroup): def __init__(self): super(ClusterDeleteWaitGroup, self).__init__( ClusterRunnerFactory.instance()) @test def cluster_delete_wait(self): """Wait for the existing cluster to be gone.""" self.test_runner.run_cluster_delete_wait() @test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_CFGGRP_DELETE], depends_on_groups=[groups.CLUSTER_CFGGRP_CREATE], runs_after_groups=[groups.CLUSTER_DELETE_WAIT]) class ClusterConfigurationDeleteGroup(TestGroup): def __init__(self): super(ClusterConfigurationDeleteGroup, self).__init__( ClusterRunnerFactory.instance()) @test def delete_initial_configuration(self): """Delete initial configuration group.""" self.test_runner.run_delete_initial_configuration() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/configuration_group.py0000644000175000017500000002626300000000000026240 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.configuration_group" class ConfigurationRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'configuration_runners' _runner_cls = 'ConfigurationRunner' @test(groups=[GROUP, groups.CFGGRP_CREATE], depends_on_groups=[groups.BACKUP_DELETE]) class ConfigurationCreateGroup(TestGroup): """Test Configuration Group functionality.""" def __init__(self): super(ConfigurationCreateGroup, self).__init__( ConfigurationRunnerFactory.instance()) @test def create_bad_group(self): """Ensure a group with bad entries fails create.""" self.test_runner.run_create_bad_group() @test def create_invalid_groups(self): """Ensure a group with invalid entries fails create.""" self.test_runner.run_create_invalid_groups() @test def delete_non_existent_group(self): """Ensure delete non-existent group fails.""" self.test_runner.run_delete_non_existent_group() @test def delete_bad_group_id(self): """Ensure delete bad group fails.""" self.test_runner.run_delete_bad_group_id() @test def create_dynamic_group(self): """Create a group with only dynamic entries.""" self.test_runner.run_create_dynamic_group() @test def create_non_dynamic_group(self): """Create a group with only non-dynamic entries.""" self.test_runner.run_create_non_dynamic_group() @test(depends_on=[create_dynamic_group, create_non_dynamic_group]) def list_configuration_groups(self): """Test list configuration groups.""" self.test_runner.run_list_configuration_groups() @test(depends_on=[create_dynamic_group]) def dynamic_configuration_show(self): """Test show on dynamic group.""" self.test_runner.run_dynamic_configuration_show() @test(depends_on=[create_non_dynamic_group]) def non_dynamic_configuration_show(self): """Test show on non-dynamic group.""" self.test_runner.run_non_dynamic_configuration_show() @test(depends_on=[create_dynamic_group]) def dynamic_conf_get_unauthorized_user(self): """Ensure show dynamic fails with unauthorized user.""" self.test_runner.run_dynamic_conf_get_unauthorized_user() @test(depends_on=[create_non_dynamic_group]) def non_dynamic_conf_get_unauthorized_user(self): """Ensure show non-dynamic fails with unauthorized user.""" self.test_runner.run_non_dynamic_conf_get_unauthorized_user() @test(depends_on_classes=[ConfigurationCreateGroup], groups=[GROUP, groups.CFGGRP_INST, groups.CFGGRP_INST_CREATE]) class ConfigurationInstCreateGroup(TestGroup): """Test Instance Configuration Group Create functionality.""" def __init__(self): super(ConfigurationInstCreateGroup, self).__init__( ConfigurationRunnerFactory.instance()) @test def attach_non_existent_group(self): """Ensure attach non-existent group fails.""" self.test_runner.run_attach_non_existent_group() @test def attach_non_existent_group_to_non_existent_inst(self): """Ensure attach non-existent group to non-existent inst fails.""" self.test_runner.run_attach_non_existent_group_to_non_existent_inst() @test def detach_group_with_none_attached(self): """Test detach with none attached.""" self.test_runner.run_detach_group_with_none_attached() @test def attach_dynamic_group_to_non_existent_inst(self): """Ensure attach dynamic group to non-existent inst fails.""" self.test_runner.run_attach_dynamic_group_to_non_existent_inst() @test def attach_non_dynamic_group_to_non_existent_inst(self): """Ensure attach non-dynamic group to non-existent inst fails.""" self.test_runner.run_attach_non_dynamic_group_to_non_existent_inst() @test def list_dynamic_inst_conf_groups_before(self): """Count list instances for dynamic group before attach.""" self.test_runner.run_list_dynamic_inst_conf_groups_before() @test(depends_on=[list_dynamic_inst_conf_groups_before], runs_after=[attach_non_existent_group, detach_group_with_none_attached]) def attach_dynamic_group(self): """Test attach dynamic group.""" self.test_runner.run_attach_dynamic_group() @test(depends_on=[attach_dynamic_group]) def verify_dynamic_values(self): """Verify dynamic values on the instance.""" self.test_runner.run_verify_dynamic_values() @test(depends_on=[attach_dynamic_group], runs_after=[verify_dynamic_values]) def list_dynamic_inst_conf_groups_after(self): """Test list instances for dynamic group after attach.""" self.test_runner.run_list_dynamic_inst_conf_groups_after() @test(depends_on=[attach_dynamic_group], runs_after=[list_dynamic_inst_conf_groups_after]) def attach_dynamic_group_again(self): """Ensure attaching dynamic group again fails.""" self.test_runner.run_attach_dynamic_group_again() @test(depends_on=[attach_dynamic_group], runs_after=[attach_dynamic_group_again]) def delete_attached_dynamic_group(self): """Ensure deleting attached dynamic group fails.""" self.test_runner.run_delete_attached_dynamic_group() @test(depends_on=[attach_dynamic_group], runs_after=[delete_attached_dynamic_group]) def update_dynamic_group(self): """Test update dynamic group.""" self.test_runner.run_update_dynamic_group() @test(depends_on=[attach_dynamic_group], runs_after=[update_dynamic_group]) def detach_dynamic_group(self): """Test detach dynamic group.""" self.test_runner.run_detach_dynamic_group() @test(runs_after=[detach_dynamic_group]) def list_non_dynamic_inst_conf_groups_before(self): """Count list instances for non-dynamic group before attach.""" self.test_runner.run_list_non_dynamic_inst_conf_groups_before() @test(runs_after=[list_non_dynamic_inst_conf_groups_before, attach_non_existent_group]) def attach_non_dynamic_group(self): """Test attach non-dynamic group.""" self.test_runner.run_attach_non_dynamic_group() @test(depends_on=[attach_non_dynamic_group]) def verify_non_dynamic_values(self): """Verify non-dynamic values on the instance.""" self.test_runner.run_verify_non_dynamic_values() @test(depends_on=[attach_non_dynamic_group], runs_after=[verify_non_dynamic_values]) def list_non_dynamic_inst_conf_groups_after(self): """Test list instances for non-dynamic group after attach.""" self.test_runner.run_list_non_dynamic_inst_conf_groups_after() @test(depends_on=[attach_non_dynamic_group], runs_after=[list_non_dynamic_inst_conf_groups_after]) def attach_non_dynamic_group_again(self): """Ensure attaching non-dynamic group again fails.""" self.test_runner.run_attach_non_dynamic_group_again() @test(depends_on=[attach_non_dynamic_group], runs_after=[attach_non_dynamic_group_again]) def delete_attached_non_dynamic_group(self): """Ensure deleting attached non-dynamic group fails.""" self.test_runner.run_delete_attached_non_dynamic_group() @test(depends_on=[attach_non_dynamic_group], runs_after=[delete_attached_non_dynamic_group]) def update_non_dynamic_group(self): """Test update non-dynamic group.""" self.test_runner.run_update_non_dynamic_group() @test(depends_on=[attach_non_dynamic_group], runs_after=[update_non_dynamic_group]) def detach_non_dynamic_group(self): """Test detach non-dynamic group.""" self.test_runner.run_detach_non_dynamic_group() @test(runs_after=[detach_non_dynamic_group]) def create_instance_with_conf(self): """Test create instance with conf group.""" self.test_runner.run_create_instance_with_conf() @test(depends_on_classes=[ConfigurationInstCreateGroup], groups=[GROUP, groups.CFGGRP_INST, groups.CFGGRP_INST_CREATE_WAIT]) class ConfigurationInstCreateWaitGroup(TestGroup): """Test that Instance Configuration Group Create Completes.""" def __init__(self): super(ConfigurationInstCreateWaitGroup, self).__init__( ConfigurationRunnerFactory.instance()) @test def wait_for_conf_instance(self): """Test create instance with conf group completes.""" self.test_runner.run_wait_for_conf_instance() @test(depends_on=[wait_for_conf_instance]) def verify_instance_values(self): """Verify configuration values on the instance.""" self.test_runner.run_verify_instance_values() @test(depends_on_classes=[ConfigurationInstCreateWaitGroup], groups=[GROUP, groups.CFGGRP_INST, groups.CFGGRP_INST_DELETE]) class ConfigurationInstDeleteGroup(TestGroup): """Test Instance Configuration Group Delete functionality.""" def __init__(self): super(ConfigurationInstDeleteGroup, self).__init__( ConfigurationRunnerFactory.instance()) @test def delete_conf_instance(self): """Test delete instance with conf group.""" self.test_runner.run_delete_conf_instance() @test(depends_on_classes=[ConfigurationInstDeleteGroup], groups=[GROUP, groups.CFGGRP_INST, groups.CFGGRP_INST_DELETE_WAIT]) class ConfigurationInstDeleteWaitGroup(TestGroup): """Test that Instance Configuration Group Delete Completes.""" def __init__(self): super(ConfigurationInstDeleteWaitGroup, self).__init__( ConfigurationRunnerFactory.instance()) @test def wait_for_delete_conf_instance(self): """Wait for delete instance with conf group to complete.""" self.test_runner.run_wait_for_delete_conf_instance() @test(depends_on_classes=[ConfigurationInstDeleteWaitGroup], groups=[GROUP, groups.CFGGRP_DELETE]) class ConfigurationDeleteGroup(TestGroup): """Test Configuration Group Delete functionality.""" def __init__(self): super(ConfigurationDeleteGroup, self).__init__( ConfigurationRunnerFactory.instance()) @test def delete_dynamic_group(self): """Test delete dynamic group.""" self.test_runner.run_delete_dynamic_group() @test def delete_non_dynamic_group(self): """Test delete non-dynamic group.""" self.test_runner.run_delete_non_dynamic_group() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/database_actions_group.py0000644000175000017500000001520300000000000026645 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.database_actions_group" class DatabaseActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'database_actions_runners' _runner_cls = 'DatabaseActionsRunner' class InstanceCreateRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_create_runners' _runner_cls = 'InstanceCreateRunner' @test(depends_on_groups=[groups.CFGGRP_DELETE], groups=[GROUP, groups.DB_ACTION_CREATE]) class DatabaseActionsCreateGroup(TestGroup): """Test Database Actions Create functionality.""" def __init__(self): super(DatabaseActionsCreateGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) @test def create_databases(self): """Create databases on an existing instance.""" self.test_runner.run_databases_create() @test(depends_on=[create_databases]) def list_databases(self): """List the created databases.""" self.test_runner.run_databases_list() @test(depends_on=[create_databases], runs_after=[list_databases]) def create_database_with_no_attributes(self): """Ensure creating a database with blank specification fails.""" self.test_runner.run_database_create_with_no_attributes() @test(depends_on=[create_databases], runs_after=[create_database_with_no_attributes]) def create_database_with_blank_name(self): """Ensure creating a database with blank name fails.""" self.test_runner.run_database_create_with_blank_name() @test(depends_on=[create_databases], runs_after=[create_database_with_blank_name]) def create_existing_database(self): """Ensure creating an existing database fails.""" self.test_runner.run_existing_database_create() @test(depends_on_classes=[DatabaseActionsCreateGroup], groups=[GROUP, groups.DB_ACTION_DELETE]) class DatabaseActionsDeleteGroup(TestGroup): """Test Database Actions Delete functionality.""" def __init__(self): super(DatabaseActionsDeleteGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) @test def delete_database(self): """Delete the created databases.""" self.test_runner.run_database_delete() @test(runs_after=[delete_database]) def delete_nonexisting_database(self): """Delete non-existing databases.""" self.test_runner.run_nonexisting_database_delete() @test(runs_after=[delete_nonexisting_database]) def create_system_database(self): """Ensure creating a system database fails.""" self.test_runner.run_system_database_create() @test(runs_after=[create_system_database]) def delete_system_database(self): """Ensure deleting a system database fails.""" self.test_runner.run_system_database_delete() @test(depends_on_classes=[DatabaseActionsDeleteGroup], groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_CREATE]) class DatabaseActionsInstCreateGroup(TestGroup): """Test Database Actions Instance Create functionality.""" def __init__(self): super(DatabaseActionsInstCreateGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def create_initialized_instance(self): """Create an instance with initial databases.""" self.instance_create_runner.run_initialized_instance_create( with_dbs=True, with_users=False, configuration_id=None, name_suffix='_db') @test(depends_on_classes=[DatabaseActionsInstCreateGroup], groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_CREATE_WAIT]) class DatabaseActionsInstCreateWaitGroup(TestGroup): """Wait for Database Actions Instance Create to complete.""" def __init__(self): super(DatabaseActionsInstCreateWaitGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def wait_for_instances(self): """Waiting for database instance to become active.""" self.instance_create_runner.run_wait_for_init_instance() @test(depends_on=[wait_for_instances]) def add_initialized_instance_data(self): """Add data to the database instance.""" self.instance_create_runner.run_add_initialized_instance_data() @test(runs_after=[add_initialized_instance_data]) def validate_initialized_instance(self): """Validate the database instance data and properties.""" self.instance_create_runner.run_validate_initialized_instance() @test(depends_on_classes=[DatabaseActionsInstCreateWaitGroup], groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_DELETE]) class DatabaseActionsInstDeleteGroup(TestGroup): """Test Database Actions Instance Delete functionality.""" def __init__(self): super(DatabaseActionsInstDeleteGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def delete_initialized_instance(self): """Delete the database instance.""" self.instance_create_runner.run_initialized_instance_delete() @test(depends_on_classes=[DatabaseActionsInstDeleteGroup], groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_DELETE_WAIT]) class DatabaseActionsInstDeleteWaitGroup(TestGroup): """Wait for Database Actions Instance Delete to complete.""" def __init__(self): super(DatabaseActionsInstDeleteWaitGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def wait_for_delete_initialized_instance(self): """Wait for the database instance to delete.""" self.instance_create_runner.run_wait_for_init_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/guest_log_group.py0000644000175000017500000003057500000000000025362 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.guest_log_group" class GuestLogRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'guest_log_runners' _runner_cls = 'GuestLogRunner' @test(depends_on_groups=[groups.DB_ACTION_INST_DELETE_WAIT], groups=[GROUP, groups.INST_LOG]) class GuestLogGroup(TestGroup): """Test Guest Log functionality.""" def __init__(self): super(GuestLogGroup, self).__init__( GuestLogRunnerFactory.instance()) @test def test_log_list(self): """Test that log-list works.""" self.test_runner.run_test_log_list() @test def test_admin_log_list(self): """Test that log-list works for admin user.""" self.test_runner.run_test_admin_log_list() @test def test_log_enable_sys(self): """Ensure log-enable on SYS log fails.""" self.test_runner.run_test_log_enable_sys() @test def test_log_disable_sys(self): """Ensure log-disable on SYS log fails.""" self.test_runner.run_test_log_disable_sys() @test def test_log_show_unauth_user(self): """Ensure log-show by unauth client on USER log fails.""" self.test_runner.run_test_log_show_unauth_user() @test def test_log_list_unauth_user(self): """Ensure log-list by unauth client on USER log fails.""" self.test_runner.run_test_log_list_unauth_user() @test def test_log_generator_unauth_user(self): """Ensure log-generator by unauth client on USER log fails.""" self.test_runner.run_test_log_generator_unauth_user() @test def test_log_generator_publish_unauth_user(self): """Ensure log-generator by unauth client with publish fails.""" self.test_runner.run_test_log_generator_publish_unauth_user() @test def test_log_show_unexposed_user(self): """Ensure log-show on unexposed log fails for auth client.""" self.test_runner.run_test_log_show_unexposed_user() @test def test_log_enable_unexposed_user(self): """Ensure log-enable on unexposed log fails for auth client.""" self.test_runner.run_test_log_enable_unexposed_user() @test def test_log_disable_unexposed_user(self): """Ensure log-disable on unexposed log fails for auth client.""" self.test_runner.run_test_log_disable_unexposed_user() @test def test_log_publish_unexposed_user(self): """Ensure log-publish on unexposed log fails for auth client.""" self.test_runner.run_test_log_publish_unexposed_user() @test def test_log_discard_unexposed_user(self): """Ensure log-discard on unexposed log fails for auth client.""" self.test_runner.run_test_log_discard_unexposed_user() # USER log tests @test(runs_after=[test_log_list, test_admin_log_list]) def test_log_show(self): """Test that log-show works on USER log.""" self.test_runner.run_test_log_show() @test(runs_after=[test_log_show]) def test_log_enable_user(self): """Test log-enable on USER log.""" self.test_runner.run_test_log_enable_user() @test(runs_after=[test_log_enable_user]) def test_log_enable_flip_user(self): """Test that flipping restart-required log-enable works.""" self.test_runner.run_test_log_enable_flip_user() @test(runs_after=[test_log_enable_flip_user]) def test_restart_datastore(self): """Test restart datastore if required.""" self.test_runner.run_test_restart_datastore() @test(runs_after=[test_restart_datastore]) def test_wait_for_restart(self): """Wait for restart to complete.""" self.test_runner.run_test_wait_for_restart() @test(runs_after=[test_wait_for_restart]) def test_log_publish_user(self): """Test log-publish on USER log.""" self.test_runner.run_test_log_publish_user() @test(runs_after=[test_log_publish_user]) def test_add_data(self): """Add data for second log-publish on USER log.""" self.test_runner.run_test_add_data() @test(runs_after=[test_add_data]) def test_verify_data(self): """Verify data for second log-publish on USER log.""" self.test_runner.run_test_verify_data() @test(runs_after=[test_verify_data]) def test_log_publish_again_user(self): """Test log-publish again on USER log.""" self.test_runner.run_test_log_publish_again_user() @test(runs_after=[test_log_publish_again_user]) def test_log_generator_user(self): """Test log-generator on USER log.""" self.test_runner.run_test_log_generator_user() @test(runs_after=[test_log_generator_user]) def test_log_generator_publish_user(self): """Test log-generator with publish on USER log.""" self.test_runner.run_test_log_generator_publish_user() @test(runs_after=[test_log_generator_publish_user]) def test_log_generator_swift_client_user(self): """Test log-generator on USER log with passed-in Swift client.""" self.test_runner.run_test_log_generator_swift_client_user() @test(runs_after=[test_log_generator_swift_client_user]) def test_add_data_again(self): """Add more data for log-generator row-by-row test on USER log.""" self.test_runner.run_test_add_data_again() @test(runs_after=[test_add_data_again]) def test_verify_data_again(self): """Verify data for log-generator row-by-row test on USER log.""" self.test_runner.run_test_verify_data_again() @test(runs_after=[test_verify_data_again]) def test_log_generator_user_by_row(self): """Test log-generator on USER log row-by-row.""" self.test_runner.run_test_log_generator_user_by_row() @test(depends_on=[test_log_publish_user], runs_after=[test_log_generator_user_by_row]) def test_log_save_user(self): """Test log-save on USER log.""" self.test_runner.run_test_log_save_user() @test(depends_on=[test_log_publish_user], runs_after=[test_log_save_user]) def test_log_save_publish_user(self): """Test log-save on USER log with publish.""" self.test_runner.run_test_log_save_publish_user() @test(runs_after=[test_log_save_publish_user]) def test_log_discard_user(self): """Test log-discard on USER log.""" self.test_runner.run_test_log_discard_user() @test(runs_after=[test_log_discard_user]) def test_log_disable_user(self): """Test log-disable on USER log.""" self.test_runner.run_test_log_disable_user() @test(runs_after=[test_log_disable_user]) def test_restart_datastore_again(self): """Test restart datastore again if required.""" self.test_runner.run_test_restart_datastore() @test(runs_after=[test_restart_datastore_again]) def test_wait_for_restart_again(self): """Wait for restart to complete again.""" self.test_runner.run_test_wait_for_restart() @test(runs_after=[test_wait_for_restart_again]) def test_log_show_after_stop_details(self): """Get log-show details before adding data.""" self.test_runner.run_test_log_show_after_stop_details() @test(runs_after=[test_log_show_after_stop_details]) def test_add_data_again_after_stop(self): """Add more data to ensure logging has stopped on USER log.""" self.test_runner.run_test_add_data_again_after_stop() @test(runs_after=[test_add_data_again_after_stop]) def test_verify_data_again_after_stop(self): """Verify data for stopped logging on USER log.""" self.test_runner.run_test_verify_data_again_after_stop() @test(runs_after=[test_verify_data_again_after_stop]) def test_log_show_after_stop(self): """Test that log-show has same values on USER log.""" self.test_runner.run_test_log_show_after_stop() @test(runs_after=[test_log_show_after_stop]) def test_log_enable_user_after_stop(self): """Test log-enable still works on USER log.""" self.test_runner.run_test_log_enable_user_after_stop() @test(runs_after=[test_log_enable_user_after_stop]) def test_restart_datastore_after_stop_start(self): """Test restart datastore after stop/start if required.""" self.test_runner.run_test_restart_datastore() @test(runs_after=[test_restart_datastore_after_stop_start]) def test_wait_for_restart_after_stop_start(self): """Wait for restart to complete again after stop/start.""" self.test_runner.run_test_wait_for_restart() @test(runs_after=[test_wait_for_restart_after_stop_start]) def test_add_data_again_after_stop_start(self): """Add more data to ensure logging works again on USER log.""" self.test_runner.run_test_add_data_again_after_stop_start() @test(runs_after=[test_add_data_again_after_stop_start]) def test_verify_data_again_after_stop_start(self): """Verify data for re-enabled logging on USER log.""" self.test_runner.run_test_verify_data_again_after_stop_start() @test(runs_after=[test_verify_data_again_after_stop_start]) def test_log_publish_after_stop_start(self): """Test log-publish after stop/start on USER log.""" self.test_runner.run_test_log_publish_after_stop_start() @test(runs_after=[test_log_publish_after_stop_start]) def test_log_disable_user_after_stop_start(self): """Test log-disable on USER log after stop/start.""" self.test_runner.run_test_log_disable_user_after_stop_start() @test(runs_after=[test_log_disable_user_after_stop_start]) def test_restart_datastore_after_final_stop(self): """Test restart datastore again if required after final stop.""" self.test_runner.run_test_restart_datastore() @test(runs_after=[test_restart_datastore_after_final_stop]) def test_wait_for_restart_after_final_stop(self): """Wait for restart to complete again after final stop.""" self.test_runner.run_test_wait_for_restart() # SYS log tests @test def test_log_show_sys(self): """Test that log-show works for SYS log.""" self.test_runner.run_test_log_show_sys() @test(runs_after=[test_log_show_sys]) def test_log_publish_sys(self): """Test log-publish on SYS log.""" self.test_runner.run_test_log_publish_sys() @test(runs_after=[test_log_publish_sys]) def test_log_publish_again_sys(self): """Test log-publish again on SYS log.""" self.test_runner.run_test_log_publish_again_sys() @test(depends_on=[test_log_publish_again_sys]) def test_log_generator_sys(self): """Test log-generator on SYS log.""" self.test_runner.run_test_log_generator_sys() @test(runs_after=[test_log_generator_sys]) def test_log_generator_publish_sys(self): """Test log-generator with publish on SYS log.""" self.test_runner.run_test_log_generator_publish_sys() @test(depends_on=[test_log_publish_sys], runs_after=[test_log_generator_publish_sys]) def test_log_generator_swift_client_sys(self): """Test log-generator on SYS log with passed-in Swift client.""" self.test_runner.run_test_log_generator_swift_client_sys() @test(depends_on=[test_log_publish_sys], runs_after=[test_log_generator_swift_client_sys]) def test_log_save_sys(self): """Test log-save on SYS log.""" self.test_runner.run_test_log_save_sys() @test(runs_after=[test_log_save_sys]) def test_log_save_publish_sys(self): """Test log-save on SYS log with publish.""" self.test_runner.run_test_log_save_publish_sys() @test(runs_after=[test_log_save_publish_sys]) def test_log_discard_sys(self): """Test log-discard on SYS log.""" self.test_runner.run_test_log_discard_sys() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/instance_actions_group.py0000644000175000017500000001030300000000000026701 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.instance_actions_group" class InstanceActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_actions_runners' _runner_cls = 'InstanceActionsRunner' @test(depends_on_groups=[groups.INST_LOG], groups=[GROUP, groups.INST_ACTIONS]) class InstanceActionsGroup(TestGroup): """Test Instance Actions functionality.""" def __init__(self): super(InstanceActionsGroup, self).__init__( InstanceActionsRunnerFactory.instance()) @test def add_test_data(self): """Add test data.""" self.test_runner.run_add_test_data() @test(depends_on=[add_test_data]) def verify_test_data(self): """Verify test data.""" self.test_runner.run_verify_test_data() @test(runs_after=[verify_test_data]) def instance_restart(self): """Restart an existing instance.""" self.test_runner.run_instance_restart() @test(depends_on=[verify_test_data, instance_restart]) def verify_test_data_after_restart(self): """Verify test data after restart.""" self.test_runner.run_verify_test_data() @test(depends_on=[instance_restart], runs_after=[verify_test_data_after_restart]) def instance_resize_volume(self): """Resize attached volume.""" self.test_runner.run_instance_resize_volume() @test(depends_on=[verify_test_data, instance_resize_volume]) def verify_test_data_after_volume_resize(self): """Verify test data after volume resize.""" self.test_runner.run_verify_test_data() @test(depends_on=[add_test_data], runs_after=[verify_test_data_after_volume_resize]) def remove_test_data(self): """Remove test data.""" self.test_runner.run_remove_test_data() @test(depends_on_classes=[InstanceActionsGroup], groups=[GROUP, groups.INST_ACTIONS_RESIZE]) class InstanceActionsResizeGroup(TestGroup): """Test Instance Actions Resize functionality.""" def __init__(self): super(InstanceActionsResizeGroup, self).__init__( InstanceActionsRunnerFactory.instance()) @test def add_test_data(self): """Add test data.""" self.test_runner.run_add_test_data() @test(depends_on=[add_test_data]) def verify_test_data(self): """Verify test data.""" self.test_runner.run_verify_test_data() @test(runs_after=[verify_test_data]) def instance_resize_flavor(self): """Resize instance flavor.""" self.test_runner.run_instance_resize_flavor() @test(depends_on_classes=[InstanceActionsResizeGroup], groups=[GROUP, groups.INST_ACTIONS_RESIZE_WAIT]) class InstanceActionsResizeWaitGroup(TestGroup): """Test that Instance Actions Resize Completes.""" def __init__(self): super(InstanceActionsResizeWaitGroup, self).__init__( InstanceActionsRunnerFactory.instance()) @test def wait_for_instance_resize_flavor(self): """Wait for resize instance flavor to complete.""" self.test_runner.run_wait_for_instance_resize_flavor() @test(depends_on=[wait_for_instance_resize_flavor]) def verify_test_data_after_flavor_resize(self): """Verify test data after flavor resize.""" self.test_runner.run_verify_test_data() @test(runs_after=[verify_test_data_after_flavor_resize]) def remove_test_data(self): """Remove test data.""" self.test_runner.run_remove_test_data() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/instance_create_group.py0000644000175000017500000001117600000000000026515 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.instance_create_group" class InstanceCreateRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_create_runners' _runner_cls = 'InstanceCreateRunner' @test(groups=[GROUP, groups.INST_CREATE]) class InstanceCreateGroup(TestGroup): """Test Instance Create functionality.""" def __init__(self): super(InstanceCreateGroup, self).__init__( InstanceCreateRunnerFactory.instance()) @test def create_empty_instance(self): """Create an empty instance.""" self.test_runner.run_empty_instance_create() @test(depends_on_classes=[InstanceCreateGroup], groups=[GROUP, groups.INST_INIT_CREATE]) class InstanceInitCreateGroup(TestGroup): """Test Instance Init Create functionality.""" def __init__(self): super(InstanceInitCreateGroup, self).__init__( InstanceCreateRunnerFactory.instance()) @test def create_initial_configuration(self): """Create a configuration group for a new initialized instance.""" self.test_runner.run_initial_configuration_create() @test(runs_after=[create_initial_configuration]) def create_initialized_instance(self): """Create an instance with initial properties.""" self.test_runner.run_initialized_instance_create() @test(depends_on_classes=[InstanceCreateGroup], groups=[GROUP, groups.INST_CREATE]) class InstanceCreateWaitGroup(TestGroup): """Test that Instance Create Completes.""" def __init__(self): super(InstanceCreateWaitGroup, self).__init__( InstanceCreateRunnerFactory.instance()) @test def wait_for_instance(self): """Waiting for main instance to become active.""" self.test_runner.run_wait_for_instance() @test(depends_on_classes=[InstanceCreateWaitGroup], groups=[GROUP, groups.INST_INIT_CREATE_WAIT]) class InstanceInitCreateWaitGroup(TestGroup): """Test that Instance Init Create Completes.""" def __init__(self): super(InstanceInitCreateWaitGroup, self).__init__( InstanceCreateRunnerFactory.instance()) @test def wait_for_init_instance(self): """Waiting for init instance to become active.""" self.test_runner.run_wait_for_init_instance() @test(depends_on=[wait_for_init_instance]) def add_initialized_instance_data(self): """Add data to the initialized instance.""" self.test_runner.run_add_initialized_instance_data() @test(runs_after=[add_initialized_instance_data]) def validate_initialized_instance(self): """Validate the initialized instance data and properties.""" self.test_runner.run_validate_initialized_instance() @test(depends_on_classes=[InstanceInitCreateWaitGroup], groups=[GROUP, groups.INST_INIT_DELETE]) class InstanceInitDeleteGroup(TestGroup): """Test Initialized Instance Delete functionality.""" def __init__(self): super(InstanceInitDeleteGroup, self).__init__( InstanceCreateRunnerFactory.instance()) @test def delete_initialized_instance(self): """Delete the initialized instance.""" self.test_runner.run_initialized_instance_delete() @test(depends_on_classes=[InstanceInitDeleteGroup], groups=[GROUP, groups.INST_INIT_DELETE_WAIT]) class InstanceInitDeleteWaitGroup(TestGroup): """Test that Initialized Instance Delete Completes.""" def __init__(self): super(InstanceInitDeleteWaitGroup, self).__init__( InstanceCreateRunnerFactory.instance()) @test def wait_for_init_delete(self): """Wait for the initialized instance to be gone.""" self.test_runner.run_wait_for_init_delete() @test(runs_after=[wait_for_init_delete]) def delete_initial_configuration(self): """Delete the initial configuration group.""" self.test_runner.run_initial_configuration_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/instance_delete_group.py0000644000175000017500000000375100000000000026514 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.instance_delete_group" class InstanceDeleteRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_delete_runners' _runner_cls = 'InstanceDeleteRunner' @test(depends_on_groups=[groups.INST_CREATE], groups=[GROUP, groups.INST_DELETE], runs_after_groups=[groups.USER_ACTION_INST_DELETE_WAIT, groups.REPL_INST_DELETE_WAIT]) class InstanceDeleteGroup(TestGroup): """Test Instance Delete functionality.""" def __init__(self): super(InstanceDeleteGroup, self).__init__( InstanceDeleteRunnerFactory.instance()) @test def instance_delete(self): """Delete an existing instance.""" self.test_runner.run_instance_delete() @test(depends_on_classes=[InstanceDeleteGroup], groups=[GROUP, groups.INST_DELETE_WAIT]) class InstanceDeleteWaitGroup(TestGroup): """Test that Instance Delete Completes.""" def __init__(self): super(InstanceDeleteWaitGroup, self).__init__( InstanceDeleteRunnerFactory.instance()) @test def instance_delete_wait(self): """Wait for existing instance to be gone.""" self.test_runner.run_instance_delete_wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/instance_error_create_group.py0000644000175000017500000000711300000000000027722 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.instance_error_create_group" class InstanceErrorCreateRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_error_create_runners' _runner_cls = 'InstanceErrorCreateRunner' @test(depends_on_groups=[groups.INST_CREATE], groups=[GROUP, groups.INST_ERROR_CREATE]) class InstanceErrorCreateGroup(TestGroup): """Test Instance Error Create functionality.""" def __init__(self): super(InstanceErrorCreateGroup, self).__init__( InstanceErrorCreateRunnerFactory.instance()) @test def create_error_instance(self): """Create an instance in error state.""" self.test_runner.run_create_error_instance() @test(runs_after=[create_error_instance]) def create_error2_instance(self): """Create another instance in error state.""" self.test_runner.run_create_error2_instance() @test(depends_on_classes=[InstanceErrorCreateGroup], groups=[GROUP, groups.INST_ERROR_CREATE_WAIT]) class InstanceErrorCreateWaitGroup(TestGroup): """Test that Instance Error Create Completes.""" def __init__(self): super(InstanceErrorCreateWaitGroup, self).__init__( InstanceErrorCreateRunnerFactory.instance()) @test def wait_for_error_instances(self): """Wait for the error instances to fail.""" self.test_runner.run_wait_for_error_instances() @test(depends_on=[wait_for_error_instances]) def validate_error_instance(self): """Validate the error instance fault message.""" self.test_runner.run_validate_error_instance() @test(depends_on=[wait_for_error_instances], runs_after=[validate_error_instance]) def validate_error2_instance(self): """Validate the error2 instance fault message as admin.""" self.test_runner.run_validate_error2_instance() @test(depends_on_classes=[InstanceErrorCreateWaitGroup], groups=[GROUP, groups.INST_ERROR_DELETE]) class InstanceErrorDeleteGroup(TestGroup): """Test Instance Error Delete functionality.""" def __init__(self): super(InstanceErrorDeleteGroup, self).__init__( InstanceErrorCreateRunnerFactory.instance()) @test def delete_error_instances(self): """Delete the error instances.""" self.test_runner.run_delete_error_instances() @test(depends_on_classes=[InstanceErrorDeleteGroup], groups=[GROUP, groups.INST_ERROR_DELETE_WAIT]) class InstanceErrorDeleteWaitGroup(TestGroup): """Test that Instance Error Delete Completes.""" def __init__(self): super(InstanceErrorDeleteWaitGroup, self).__init__( InstanceErrorCreateRunnerFactory.instance()) @test def wait_for_error_delete(self): """Wait for the error instances to be gone.""" self.test_runner.run_wait_for_error_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/instance_force_delete_group.py0000644000175000017500000000426700000000000027675 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.instance_force_delete_group" class InstanceForceDeleteRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_force_delete_runners' _runner_cls = 'InstanceForceDeleteRunner' @test(depends_on_groups=[groups.INST_ERROR_DELETE_WAIT], groups=[GROUP, groups.INST_FORCE_DELETE]) class InstanceForceDeleteGroup(TestGroup): """Test Instance Force Delete functionality.""" def __init__(self): super(InstanceForceDeleteGroup, self).__init__( InstanceForceDeleteRunnerFactory.instance()) @test def create_build_instance(self): """Create an instance in BUILD state.""" self.test_runner.run_create_build_instance() @test(depends_on=['create_build_instance']) def delete_build_instance(self): """Make sure the instance in BUILD state deletes.""" self.test_runner.run_delete_build_instance() @test(depends_on_classes=[InstanceForceDeleteGroup], groups=[GROUP, groups.INST_FORCE_DELETE_WAIT]) class InstanceForceDeleteWaitGroup(TestGroup): """Make sure the Force Delete instance goes away.""" def __init__(self): super(InstanceForceDeleteWaitGroup, self).__init__( InstanceForceDeleteRunnerFactory.instance()) @test def wait_for_force_delete(self): """Wait for the Force Delete instance to be gone.""" self.test_runner.run_wait_for_force_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/groups/instance_upgrade_group.py0000644000175000017500000001013700000000000026675 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.instance_upgrade_group" class InstanceUpgradeRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_upgrade_runners' _runner_cls = 'InstanceUpgradeRunner' class UserActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'user_actions_runners' _runner_cls = 'UserActionsRunner' class DatabaseActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'database_actions_runners' _runner_cls = 'DatabaseActionsRunner' @test(depends_on_groups=[groups.INST_CREATE_WAIT], groups=[GROUP, groups.INST_UPGRADE], runs_after_groups=[groups.INST_ACTIONS]) class InstanceUpgradeGroup(TestGroup): def __init__(self): super(InstanceUpgradeGroup, self).__init__( InstanceUpgradeRunnerFactory.instance()) self.database_actions_runner = DatabaseActionsRunnerFactory.instance() self.user_actions_runner = UserActionsRunnerFactory.instance() @test def create_user_databases(self): """Create user databases on an existing instance.""" # These databases may be referenced by the users (below) so we need to # create them first. self.database_actions_runner.run_databases_create() @test(runs_after=[create_user_databases]) def create_users(self): """Create users on an existing instance.""" self.user_actions_runner.run_users_create() @test(runs_after=[create_users]) def add_test_data(self): """Add test data.""" self.test_runner.run_add_test_data() @test(depends_on=[add_test_data]) def verify_test_data(self): """Verify test data.""" self.test_runner.run_verify_test_data() @test(depends_on=[verify_test_data]) def list_users_before_upgrade(self): """List the created users before upgrade.""" self.user_actions_runner.run_users_list() @test(depends_on=[list_users_before_upgrade]) def instance_upgrade(self): """Upgrade an existing instance.""" raise SkipTest("Skip the instance upgrade integration test " "temporarily because of not stable in CI") # self.test_runner.run_instance_upgrade() @test(depends_on=[list_users_before_upgrade]) def show_user(self): """Show created users.""" self.user_actions_runner.run_user_show() @test(depends_on=[create_users], runs_after=[show_user]) def list_users(self): """List the created users.""" self.user_actions_runner.run_users_list() @test(depends_on=[verify_test_data, instance_upgrade]) def verify_test_data_after_upgrade(self): """Verify test data after upgrade.""" self.test_runner.run_verify_test_data() @test(depends_on=[add_test_data], runs_after=[verify_test_data_after_upgrade]) def remove_test_data(self): """Remove test data.""" self.test_runner.run_remove_test_data() @test(depends_on=[create_users], runs_after=[list_users]) def delete_user(self): """Delete the created users.""" self.user_actions_runner.run_user_delete() @test(depends_on=[create_user_databases], runs_after=[delete_user]) def delete_user_databases(self): """Delete the user databases.""" self.database_actions_runner.run_database_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/groups/module_group.py0000644000175000017500000007317200000000000024657 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.module_group" class ModuleRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'module_runners' _runner_cls = 'ModuleRunner' @test(groups=[GROUP, groups.MODULE_CREATE]) class ModuleCreateGroup(TestGroup): """Test Module Create functionality.""" def __init__(self): super(ModuleCreateGroup, self).__init__( ModuleRunnerFactory.instance()) @test def module_delete_existing(self): """Delete all previous test modules.""" self.test_runner.run_module_delete_existing() @test def module_create_bad_type(self): """Ensure create module with invalid type fails.""" self.test_runner.run_module_create_bad_type() @test def module_create_non_admin_auto(self): """Ensure create auto_apply module for non-admin fails.""" self.test_runner.run_module_create_non_admin_auto() @test def module_create_non_admin_all_tenant(self): """Ensure create all tenant module for non-admin fails.""" self.test_runner.run_module_create_non_admin_all_tenant() @test def module_create_non_admin_hidden(self): """Ensure create hidden module for non-admin fails.""" self.test_runner.run_module_create_non_admin_hidden() @test def module_create_non_admin_priority(self): """Ensure create priority module for non-admin fails.""" self.test_runner.run_module_create_non_admin_priority() @test def module_create_non_admin_no_full_access(self): """Ensure create no full access module for non-admin fails.""" self.test_runner.run_module_create_non_admin_no_full_access() @test def module_create_full_access_with_admin_opt(self): """Ensure create full access module with admin opts fails.""" self.test_runner.run_module_create_full_access_with_admin_opt() @test def module_create_bad_datastore(self): """Ensure create module with invalid datastore fails.""" self.test_runner.run_module_create_bad_datastore() @test def module_create_bad_datastore_version(self): """Ensure create module with invalid datastore_version fails.""" self.test_runner.run_module_create_bad_datastore_version() @test def module_create_missing_datastore(self): """Ensure create module with missing datastore fails.""" self.test_runner.run_module_create_missing_datastore() @test(runs_after=[module_delete_existing]) def module_create(self): """Check that create module works.""" self.test_runner.run_module_create() @test(runs_after=[module_create]) def module_create_for_update(self): """Check that create module for update works.""" self.test_runner.run_module_create_for_update() @test(depends_on=[module_create]) def module_create_dupe(self): """Ensure create with duplicate info fails.""" self.test_runner.run_module_create_dupe() @test(depends_on=[module_create_for_update]) def module_update_missing_datastore(self): """Ensure update module with missing datastore fails.""" self.test_runner.run_module_update_missing_datastore() @test(runs_after=[module_create_for_update]) def module_create_bin(self): """Check that create module with binary contents works.""" self.test_runner.run_module_create_bin() @test(runs_after=[module_create_bin]) def module_create_bin2(self): """Check that create module with other binary contents works.""" self.test_runner.run_module_create_bin2() @test(depends_on=[module_create]) def module_show(self): """Check that show module works.""" self.test_runner.run_module_show() @test(depends_on=[module_create]) def module_show_unauth_user(self): """Ensure that show module for unauth user fails.""" self.test_runner.run_module_show_unauth_user() @test(depends_on=[module_create, module_create_bin, module_create_bin2]) def module_list(self): """Check that list modules works.""" self.test_runner.run_module_list() @test(depends_on=[module_create, module_create_bin, module_create_bin2]) def module_list_unauth_user(self): """Ensure that list module for unauth user fails.""" self.test_runner.run_module_list_unauth_user() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_list]) def module_create_admin_all(self): """Check that create module works with all admin options.""" self.test_runner.run_module_create_admin_all() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_all]) def module_create_admin_hidden(self): """Check that create module works with hidden option.""" self.test_runner.run_module_create_admin_hidden() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_hidden]) def module_create_admin_auto(self): """Check that create module works with auto option.""" self.test_runner.run_module_create_admin_auto() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_auto]) def module_create_admin_live_update(self): """Check that create module works with live-update option.""" self.test_runner.run_module_create_admin_live_update() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_live_update]) def module_create_admin_priority_apply(self): """Check that create module works with priority-apply option.""" self.test_runner.run_module_create_admin_priority_apply() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_admin_priority_apply]) def module_create_datastore(self): """Check that create module with datastore works.""" self.test_runner.run_module_create_datastore() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_datastore]) def module_create_different_datastore(self): """Check that create module with different datastore works.""" self.test_runner.run_module_create_different_datastore() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_different_datastore]) def module_create_ds_version(self): """Check that create module with ds version works.""" self.test_runner.run_module_create_ds_version() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_ds_version]) def module_create_all_tenant(self): """Check that create 'all' tenants with datastore module works.""" self.test_runner.run_module_create_all_tenant() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_all_tenant, module_list_unauth_user]) def module_create_different_tenant(self): """Check that create with same name on different tenant works.""" self.test_runner.run_module_create_different_tenant() @test(depends_on=[module_create, module_create_bin, module_create_bin2], runs_after=[module_create_different_tenant]) def module_create_full_access(self): """Check that create by admin with full access works.""" self.test_runner.run_module_create_full_access() @test(depends_on=[module_create_all_tenant], runs_after=[module_create_full_access]) def module_full_access_toggle(self): """Check that toggling full access works.""" self.test_runner.run_module_full_access_toggle() @test(depends_on=[module_create_all_tenant], runs_after=[module_full_access_toggle]) def module_list_again(self): """Check that list modules skips invisible modules.""" self.test_runner.run_module_list_again() @test(depends_on=[module_create_ds_version], runs_after=[module_list_again]) def module_list_ds(self): """Check that list modules by datastore works.""" self.test_runner.run_module_list_ds() @test(depends_on=[module_create_ds_version], runs_after=[module_list_ds]) def module_list_ds_all(self): """Check that list modules by all datastores works.""" self.test_runner.run_module_list_ds_all() @test(depends_on=[module_create_admin_hidden]) def module_show_invisible(self): """Ensure that show invisible module for non-admin fails.""" self.test_runner.run_module_show_invisible() @test(depends_on=[module_create_all_tenant], runs_after=[module_create_different_tenant]) def module_list_admin(self): """Check that list modules for admin works.""" self.test_runner.run_module_list_admin() @test(depends_on=[module_create], runs_after=[module_show]) def module_update(self): """Check that update module works.""" self.test_runner.run_module_update() @test(depends_on=[module_update]) def module_update_same_contents(self): """Check that update module with same contents works.""" self.test_runner.run_module_update_same_contents() @test(depends_on=[module_update], runs_after=[module_update_same_contents]) def module_update_auto_toggle(self): """Check that update module works for auto apply toggle.""" self.test_runner.run_module_update_auto_toggle() @test(depends_on=[module_update], runs_after=[module_update_auto_toggle]) def module_update_all_tenant_toggle(self): """Check that update module works for all tenant toggle.""" self.test_runner.run_module_update_all_tenant_toggle() @test(depends_on=[module_update], runs_after=[module_update_all_tenant_toggle]) def module_update_invisible_toggle(self): """Check that update module works for invisible toggle.""" self.test_runner.run_module_update_invisible_toggle() @test(depends_on=[module_update], runs_after=[module_update_invisible_toggle]) def module_update_priority_toggle(self): """Check that update module works for priority toggle.""" self.test_runner.run_module_update_priority_toggle() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_unauth(self): """Ensure update module for unauth user fails.""" self.test_runner.run_module_update_unauth() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_auto(self): """Ensure update module to auto_apply for non-admin fails.""" self.test_runner.run_module_update_non_admin_auto() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_auto_off(self): """Ensure update module to auto_apply off for non-admin fails.""" self.test_runner.run_module_update_non_admin_auto_off() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_auto_any(self): """Ensure any update module to auto_apply for non-admin fails.""" self.test_runner.run_module_update_non_admin_auto_any() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_all_tenant(self): """Ensure update module to all tenant for non-admin fails.""" self.test_runner.run_module_update_non_admin_all_tenant() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_all_tenant_off(self): """Ensure update module to all tenant off for non-admin fails.""" self.test_runner.run_module_update_non_admin_all_tenant_off() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_all_tenant_any(self): """Ensure any update module to all tenant for non-admin fails.""" self.test_runner.run_module_update_non_admin_all_tenant_any() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_invisible(self): """Ensure update module to invisible for non-admin fails.""" self.test_runner.run_module_update_non_admin_invisible() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_invisible_off(self): """Ensure update module to invisible off for non-admin fails.""" self.test_runner.run_module_update_non_admin_invisible_off() @test(depends_on=[module_update], runs_after=[module_update_priority_toggle]) def module_update_non_admin_invisible_any(self): """Ensure any update module to invisible for non-admin fails.""" self.test_runner.run_module_update_non_admin_invisible_any() @test(depends_on_groups=[groups.INST_CREATE_WAIT, groups.MODULE_CREATE], runs_after_groups=[groups.INST_ERROR_DELETE, groups.INST_FORCE_DELETE], groups=[GROUP, groups.MODULE_INST, groups.MODULE_INST_CREATE]) class ModuleInstCreateGroup(TestGroup): """Test Module Instance Create functionality.""" def __init__(self): super(ModuleInstCreateGroup, self).__init__( ModuleRunnerFactory.instance()) @test def module_list_instance_empty(self): """Check that the instance has no modules associated.""" self.test_runner.run_module_list_instance_empty() @test(runs_after=[module_list_instance_empty]) def module_instances_empty(self): """Check that the module hasn't been applied to any instances.""" self.test_runner.run_module_instances_empty() @test(runs_after=[module_instances_empty]) def module_instance_count_empty(self): """Check that no instance count exists.""" self.test_runner.run_module_instance_count_empty() @test(runs_after=[module_instance_count_empty]) def module_query_empty(self): """Check that the instance has no modules applied.""" self.test_runner.run_module_query_empty() @test(runs_after=[module_query_empty]) def module_apply(self): """Check that module-apply works.""" self.test_runner.run_module_apply() @test(runs_after=[module_apply]) def module_apply_wrong_module(self): """Ensure that module-apply for wrong module fails.""" self.test_runner.run_module_apply_wrong_module() @test(depends_on=[module_apply_wrong_module]) def module_update_not_live(self): """Ensure updating a non live_update module fails.""" self.test_runner.run_module_update_not_live() @test(depends_on=[module_apply], runs_after=[module_update_not_live]) def module_list_instance_after_apply(self): """Check that the instance has the modules associated.""" self.test_runner.run_module_list_instance_after_apply() @test(runs_after=[module_list_instance_after_apply]) def module_apply_live_update(self): """Check that module-apply works for live_update.""" self.test_runner.run_module_apply_live_update() @test(depends_on=[module_apply_live_update]) def module_list_instance_after_apply_live(self): """Check that the instance has the right modules.""" self.test_runner.run_module_list_instance_after_apply_live() @test(runs_after=[module_list_instance_after_apply_live]) def module_instances_after_apply(self): """Check that the instance shows up in the list.""" self.test_runner.run_module_instances_after_apply() @test(runs_after=[module_instances_after_apply]) def module_instance_count_after_apply(self): """Check that the instance count is right after apply.""" self.test_runner.run_module_instance_count_after_apply() @test(runs_after=[module_instance_count_after_apply]) def module_query_after_apply(self): """Check that module-query works.""" self.test_runner.run_module_query_after_apply() @test(runs_after=[module_query_after_apply]) def module_update_live_update(self): """Check that update module works on 'live' applied module.""" self.test_runner.run_module_update_live_update() @test(runs_after=[module_update_live_update]) def module_apply_another(self): """Check that module-apply works for another module.""" self.test_runner.run_module_apply_another() @test(depends_on=[module_apply_another]) def module_list_instance_after_apply_another(self): """Check that the instance has the right modules again.""" self.test_runner.run_module_list_instance_after_apply_another() @test(runs_after=[module_list_instance_after_apply_another]) def module_instances_after_apply_another(self): """Check that the instance shows up in the list still.""" self.test_runner.run_module_instances_after_apply_another() @test(runs_after=[module_instances_after_apply_another]) def module_instance_count_after_apply_another(self): """Check that the instance count is right after another apply.""" self.test_runner.run_module_instance_count_after_apply_another() @test(depends_on=[module_apply_another], runs_after=[module_instance_count_after_apply_another]) def module_query_after_apply_another(self): """Check that module-query works after another apply.""" self.test_runner.run_module_query_after_apply_another() @test(depends_on=[module_apply], runs_after=[module_query_after_apply_another]) def create_inst_with_mods(self): """Check that creating an instance with modules works.""" self.test_runner.run_create_inst_with_mods() @test(runs_after=[create_inst_with_mods]) def create_inst_with_wrong_module(self): """Ensure that creating an inst with wrong ds mod fails.""" self.test_runner.run_create_inst_with_wrong_module() @test(depends_on=[module_apply], runs_after=[create_inst_with_wrong_module]) def module_delete_applied(self): """Ensure that deleting an applied module fails.""" self.test_runner.run_module_delete_applied() @test(depends_on=[module_apply], runs_after=[module_delete_applied]) def module_remove(self): """Check that module-remove works.""" self.test_runner.run_module_remove() @test(depends_on=[module_remove]) def module_query_after_remove(self): """Check that the instance has modules applied after remove.""" self.test_runner.run_module_query_after_remove() @test(depends_on=[module_remove], runs_after=[module_query_after_remove]) def module_update_after_remove(self): """Check that update module after remove works.""" self.test_runner.run_module_update_after_remove() @test(depends_on=[module_remove], runs_after=[module_update_after_remove]) def module_apply_another_again(self): """Check that module-apply another works a second time.""" self.test_runner.run_module_apply_another() @test(depends_on=[module_apply], runs_after=[module_apply_another_again]) def module_query_after_apply_another2(self): """Check that module-query works still.""" self.test_runner.run_module_query_after_apply_another() @test(depends_on=[module_apply_another_again], runs_after=[module_query_after_apply_another2]) def module_remove_again(self): """Check that module-remove works again.""" self.test_runner.run_module_remove() @test(depends_on=[module_remove_again]) def module_query_empty_after_again(self): """Check that the inst has right mod applied after 2nd remove.""" self.test_runner.run_module_query_after_remove() @test(depends_on=[module_remove_again], runs_after=[module_query_empty_after_again]) def module_update_after_remove_again(self): """Check that update module after remove again works.""" self.test_runner.run_module_update_after_remove_again() @test(depends_on_groups=[groups.MODULE_INST_CREATE], groups=[GROUP, groups.MODULE_INST, groups.MODULE_INST_CREATE_WAIT], runs_after_groups=[groups.INST_ACTIONS, groups.INST_UPGRADE]) class ModuleInstCreateWaitGroup(TestGroup): """Test that Module Instance Create Completes.""" def __init__(self): super(ModuleInstCreateWaitGroup, self).__init__( ModuleRunnerFactory.instance()) @test def wait_for_inst_with_mods(self): """Wait for create instance with modules to finish.""" self.test_runner.run_wait_for_inst_with_mods() @test(depends_on=[wait_for_inst_with_mods]) def module_query_after_inst_create(self): """Check that module-query works on new instance.""" self.test_runner.run_module_query_after_inst_create() @test(depends_on=[wait_for_inst_with_mods], runs_after=[module_query_after_inst_create]) def module_retrieve_after_inst_create(self): """Check that module-retrieve works on new instance.""" self.test_runner.run_module_retrieve_after_inst_create() @test(depends_on=[wait_for_inst_with_mods], runs_after=[module_retrieve_after_inst_create]) def module_query_after_inst_create_admin(self): """Check that module-query works for admin.""" self.test_runner.run_module_query_after_inst_create_admin() @test(depends_on=[wait_for_inst_with_mods], runs_after=[module_query_after_inst_create_admin]) def module_retrieve_after_inst_create_admin(self): """Check that module-retrieve works for admin.""" self.test_runner.run_module_retrieve_after_inst_create_admin() @test(depends_on=[wait_for_inst_with_mods], runs_after=[module_retrieve_after_inst_create_admin]) def module_delete_auto_applied(self): """Ensure that module-delete on auto-applied module fails.""" self.test_runner.run_module_delete_auto_applied() @test(runs_after=[module_delete_auto_applied]) def module_list_instance_after_mod_inst(self): """Check that the new instance has the right modules.""" self.test_runner.run_module_list_instance_after_mod_inst() @test(runs_after=[module_list_instance_after_mod_inst]) def module_instances_after_mod_inst(self): """Check that the new instance shows up in the list.""" self.test_runner.run_module_instances_after_mod_inst() @test(runs_after=[module_instances_after_mod_inst]) def module_instance_count_after_mod_inst(self): """Check that the new instance count is right.""" self.test_runner.run_module_instance_count_after_mod_inst() @test(runs_after=[module_instance_count_after_mod_inst]) def module_reapply_with_md5(self): """Check that module reapply with md5 works.""" self.test_runner.run_module_reapply_with_md5() @test(runs_after=[module_reapply_with_md5]) def module_reapply_with_md5_verify(self): """Verify the dates after md5 reapply (no-op).""" self.test_runner.run_module_reapply_with_md5_verify() @test(runs_after=[module_reapply_with_md5_verify]) def module_list_instance_after_reapply_md5(self): """Check that the instance's modules haven't changed.""" self.test_runner.run_module_list_instance_after_reapply_md5() @test(runs_after=[module_list_instance_after_reapply_md5]) def module_instances_after_reapply_md5(self): """Check that the new instance still shows up in the list.""" self.test_runner.run_module_instances_after_reapply_md5() @test(runs_after=[module_instances_after_reapply_md5]) def module_instance_count_after_reapply_md5(self): """Check that the instance count hasn't changed.""" self.test_runner.run_module_instance_count_after_reapply_md5() @test(runs_after=[module_instance_count_after_reapply_md5]) def module_reapply_all(self): """Check that module reapply works.""" self.test_runner.run_module_reapply_all() @test(runs_after=[module_reapply_all]) def module_reapply_all_wait(self): """Wait for module reapply to complete.""" self.test_runner.run_module_reapply_all_wait() @test(runs_after=[module_reapply_all_wait]) def module_instance_count_after_reapply(self): """Check that the reapply instance count is right.""" self.test_runner.run_module_instance_count_after_reapply() @test(runs_after=[module_instance_count_after_reapply]) def module_reapply_with_force(self): """Check that module reapply with force works.""" self.test_runner.run_module_reapply_with_force() @test(runs_after=[module_reapply_with_force]) def module_reapply_with_force_wait(self): """Wait for module reapply with force to complete.""" self.test_runner.run_module_reapply_with_force_wait() @test(runs_after=[module_reapply_with_force_wait]) def module_list_instance_after_reapply_force(self): """Check that the new instance still has the right modules.""" self.test_runner.run_module_list_instance_after_reapply() @test(runs_after=[module_list_instance_after_reapply_force]) def module_instances_after_reapply_force(self): """Check that the new instance still shows up in the list.""" self.test_runner.run_module_instances_after_reapply() @test(runs_after=[module_instances_after_reapply_force]) def module_instance_count_after_reapply_force(self): """Check that the instance count is right after reapply force.""" self.test_runner.run_module_instance_count_after_reapply() @test(depends_on_groups=[groups.MODULE_INST_CREATE_WAIT], groups=[GROUP, groups.MODULE_INST, groups.MODULE_INST_DELETE]) class ModuleInstDeleteGroup(TestGroup): """Test Module Instance Delete functionality.""" def __init__(self): super(ModuleInstDeleteGroup, self).__init__( ModuleRunnerFactory.instance()) @test def delete_inst_with_mods(self): """Check that instance with module can be deleted.""" self.test_runner.run_delete_inst_with_mods() @test(runs_after=[delete_inst_with_mods]) def remove_mods_from_main_inst(self): """Check that modules can be removed from the main instance.""" self.test_runner.run_remove_mods_from_main_inst() @test(depends_on_groups=[groups.MODULE_INST_DELETE], groups=[GROUP, groups.MODULE_INST, groups.MODULE_INST_DELETE_WAIT], runs_after_groups=[groups.INST_DELETE]) class ModuleInstDeleteWaitGroup(TestGroup): """Test that Module Instance Delete Completes.""" def __init__(self): super(ModuleInstDeleteWaitGroup, self).__init__( ModuleRunnerFactory.instance()) @test def wait_for_delete_inst_with_mods(self): """Wait until the instance with module is gone.""" self.test_runner.run_wait_for_delete_inst_with_mods() @test(depends_on_groups=[groups.MODULE_CREATE], runs_after_groups=[groups.MODULE_INST_DELETE_WAIT], groups=[GROUP, groups.MODULE_DELETE]) class ModuleDeleteGroup(TestGroup): """Test Module Delete functionality.""" def __init__(self): super(ModuleDeleteGroup, self).__init__( ModuleRunnerFactory.instance()) def module_delete_non_existent(self): """Ensure delete non-existent module fails.""" self.test_runner.run_module_delete_non_existent() def module_delete_unauth_user(self): """Ensure delete module by unauth user fails.""" self.test_runner.run_module_delete_unauth_user() @test(runs_after=[module_delete_unauth_user, module_delete_non_existent]) def module_delete_hidden_by_non_admin(self): """Ensure delete hidden module by non-admin user fails.""" self.test_runner.run_module_delete_hidden_by_non_admin() @test(runs_after=[module_delete_hidden_by_non_admin]) def module_delete_all_tenant_by_non_admin(self): """Ensure delete all tenant module by non-admin user fails.""" self.test_runner.run_module_delete_all_tenant_by_non_admin() @test(runs_after=[module_delete_all_tenant_by_non_admin]) def module_delete_auto_by_non_admin(self): """Ensure delete auto-apply module by non-admin user fails.""" self.test_runner.run_module_delete_auto_by_non_admin() @test(runs_after=[module_delete_auto_by_non_admin]) def module_delete(self): """Check that delete module works.""" self.test_runner.run_module_delete() @test(runs_after=[module_delete]) def module_delete_admin(self): """Check that delete module works for admin.""" self.test_runner.run_module_delete_admin() @test(runs_after=[module_delete_admin]) def module_delete_remaining(self): """Delete all remaining test modules.""" self.test_runner.run_module_delete_existing() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/replication_group.py0000644000175000017500000003161600000000000025700 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.replication_group" class ReplicationRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'replication_runners' _runner_cls = 'ReplicationRunner' class BackupRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'backup_runners' _runner_cls = 'BackupRunner' @test(depends_on_groups=[groups.INST_CREATE], groups=[GROUP, groups.REPL_INST_CREATE]) class ReplicationInstCreateGroup(TestGroup): """Test Replication Instance Create functionality.""" def __init__(self): super(ReplicationInstCreateGroup, self).__init__( ReplicationRunnerFactory.instance()) @test def add_data_for_replication(self): """Add data to master for initial replica setup.""" self.test_runner.run_add_data_for_replication() @test(depends_on=[add_data_for_replication]) def verify_data_for_replication(self): """Verify initial data exists on master.""" self.test_runner.run_verify_data_for_replication() @test(runs_after=[verify_data_for_replication]) def create_non_affinity_master(self): """Test creating a non-affinity master.""" self.test_runner.run_create_non_affinity_master() @test(runs_after=[create_non_affinity_master]) def create_single_replica(self): """Test creating a single replica.""" self.test_runner.run_create_single_replica() @test(depends_on_classes=[ReplicationInstCreateGroup], groups=[GROUP, groups.REPL_INST_CREATE_WAIT]) class ReplicationInstCreateWaitGroup(TestGroup): """Wait for Replication Instance Create to complete.""" def __init__(self): super(ReplicationInstCreateWaitGroup, self).__init__( ReplicationRunnerFactory.instance()) @test def wait_for_non_affinity_master(self): """Wait for non-affinity master to complete.""" self.test_runner.run_wait_for_non_affinity_master() @test(depends_on=[wait_for_non_affinity_master]) def create_non_affinity_replica(self): """Test creating a non-affinity replica.""" self.test_runner.run_create_non_affinity_replica() @test(depends_on=[create_non_affinity_replica]) def wait_for_non_affinity_replica_fail(self): """Wait for non-affinity replica to fail.""" self.test_runner.run_wait_for_non_affinity_replica_fail() @test(runs_after=[wait_for_non_affinity_replica_fail]) def delete_non_affinity_repl(self): """Test deleting non-affinity replica.""" self.test_runner.run_delete_non_affinity_repl() @test(runs_after=[delete_non_affinity_repl]) def wait_for_single_replica(self): """Wait for single replica to complete.""" self.test_runner.run_wait_for_single_replica() @test(depends_on=[wait_for_single_replica]) def add_data_after_replica(self): """Add data to master after initial replica is setup""" self.test_runner.run_add_data_after_replica() @test(depends_on=[add_data_after_replica]) def verify_replica_data_after_single(self): """Verify data exists on single replica""" self.test_runner.run_verify_replica_data_after_single() @test(depends_on_classes=[ReplicationInstCreateWaitGroup], groups=[GROUP, groups.REPL_INST_MULTI_CREATE]) class ReplicationInstMultiCreateGroup(TestGroup): """Test Replication Instance Multi-Create functionality.""" def __init__(self): super(ReplicationInstMultiCreateGroup, self).__init__( ReplicationRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() @test def backup_master_instance(self): """Backup the master instance.""" self.backup_runner.run_backup_create() self.backup_runner.run_backup_create_completed() self.test_runner.master_backup_count += 1 @test(depends_on=[backup_master_instance]) def create_multiple_replicas(self): """Test creating multiple replicas.""" self.test_runner.run_create_multiple_replicas() @test(depends_on=[create_multiple_replicas]) def check_has_incremental_backup(self): """Test that creating multiple replicas uses incr backup.""" self.backup_runner.run_check_has_incremental() @test(depends_on_classes=[ReplicationInstMultiCreateGroup], groups=[GROUP, groups.REPL_INST_DELETE_NON_AFFINITY_WAIT]) class ReplicationInstDeleteNonAffReplWaitGroup(TestGroup): """Wait for Replication Instance Non-Affinity repl to be gone.""" def __init__(self): super(ReplicationInstDeleteNonAffReplWaitGroup, self).__init__( ReplicationRunnerFactory.instance()) @test def wait_for_delete_non_affinity_repl(self): """Wait for the non-affinity replica to delete.""" self.test_runner.run_wait_for_delete_non_affinity_repl() @test(depends_on=[wait_for_delete_non_affinity_repl]) def delete_non_affinity_master(self): """Test deleting non-affinity master.""" self.test_runner.run_delete_non_affinity_master() @test(depends_on_classes=[ReplicationInstDeleteNonAffReplWaitGroup], groups=[GROUP, groups.REPL_INST_MULTI_CREATE_WAIT]) class ReplicationInstMultiCreateWaitGroup(TestGroup): """Wait for Replication Instance Multi-Create to complete.""" def __init__(self): super(ReplicationInstMultiCreateWaitGroup, self).__init__( ReplicationRunnerFactory.instance()) @test def wait_for_delete_non_affinity_master(self): """Wait for the non-affinity master to delete.""" self.test_runner.run_wait_for_delete_non_affinity_master() @test(runs_after=[wait_for_delete_non_affinity_master]) def wait_for_multiple_replicas(self): """Wait for multiple replicas to complete.""" self.test_runner.run_wait_for_multiple_replicas() @test(depends_on=[wait_for_multiple_replicas]) def verify_replica_data_orig(self): """Verify original data was transferred to replicas.""" self.test_runner.run_verify_replica_data_orig() @test(depends_on=[wait_for_multiple_replicas], runs_after=[verify_replica_data_orig]) def add_data_to_replicate(self): """Add new data to master to verify replication.""" self.test_runner.run_add_data_to_replicate() @test(depends_on=[add_data_to_replicate]) def verify_data_to_replicate(self): """Verify new data exists on master.""" self.test_runner.run_verify_data_to_replicate() @test(depends_on=[add_data_to_replicate], runs_after=[verify_data_to_replicate]) def verify_replica_data_orig2(self): """Verify original data was transferred to replicas.""" self.test_runner.run_verify_replica_data_orig() @test(depends_on=[add_data_to_replicate], runs_after=[verify_replica_data_orig2]) def verify_replica_data_new(self): """Verify new data was transferred to replicas.""" self.test_runner.run_verify_replica_data_new() @test(depends_on=[wait_for_multiple_replicas], runs_after=[verify_replica_data_new]) def promote_master(self): """Ensure promoting master fails.""" self.test_runner.run_promote_master() @test(depends_on=[wait_for_multiple_replicas], runs_after=[promote_master]) def eject_replica(self): """Ensure ejecting non master fails.""" self.test_runner.run_eject_replica() @test(depends_on=[wait_for_multiple_replicas], runs_after=[eject_replica]) def eject_valid_master(self): """Ensure ejecting valid master fails.""" self.test_runner.run_eject_valid_master() @test(depends_on=[wait_for_multiple_replicas], runs_after=[eject_valid_master]) def delete_valid_master(self): """Ensure deleting valid master fails.""" self.test_runner.run_delete_valid_master() @test(depends_on_classes=[ReplicationInstMultiCreateWaitGroup], groups=[GROUP, groups.REPL_INST_MULTI_PROMOTE]) class ReplicationInstMultiPromoteGroup(TestGroup): """Test Replication Instance Multi-Promote functionality.""" def __init__(self): super(ReplicationInstMultiPromoteGroup, self).__init__( ReplicationRunnerFactory.instance()) @test def promote_to_replica_source(self): """Test promoting a replica to replica source (master).""" self.test_runner.run_promote_to_replica_source() @test(depends_on=[promote_to_replica_source]) def verify_replica_data_new_master(self): """Verify data is still on new master.""" self.test_runner.run_verify_replica_data_new_master() @test(depends_on=[promote_to_replica_source], runs_after=[verify_replica_data_new_master]) def add_data_to_replicate2(self): """Add data to new master to verify replication.""" self.test_runner.run_add_data_to_replicate2() @test(depends_on=[add_data_to_replicate2]) def verify_data_to_replicate2(self): """Verify data exists on new master.""" self.test_runner.run_verify_data_to_replicate2() @test(depends_on=[add_data_to_replicate2], runs_after=[verify_data_to_replicate2]) def verify_replica_data_new2(self): """Verify data was transferred to new replicas.""" self.test_runner.run_verify_replica_data_new2() @test(depends_on=[promote_to_replica_source], runs_after=[verify_replica_data_new2]) def promote_original_source(self): """Test promoting back the original replica source.""" self.test_runner.run_promote_original_source() @test(depends_on=[promote_original_source]) def add_final_data_to_replicate(self): """Add final data to original master to verify switch.""" self.test_runner.run_add_final_data_to_replicate() @test(depends_on=[add_final_data_to_replicate]) def verify_data_to_replicate_final(self): """Verify final data exists on master.""" self.test_runner.run_verify_data_to_replicate_final() @test(depends_on=[verify_data_to_replicate_final]) def verify_final_data_replicated(self): """Verify final data was transferred to all replicas.""" self.test_runner.run_verify_final_data_replicated() @test(depends_on_classes=[ReplicationInstMultiPromoteGroup], groups=[GROUP, groups.REPL_INST_DELETE]) class ReplicationInstDeleteGroup(TestGroup): """Test Replication Instance Delete functionality.""" def __init__(self): super(ReplicationInstDeleteGroup, self).__init__( ReplicationRunnerFactory.instance()) @test def remove_replicated_data(self): """Remove replication data.""" self.test_runner.run_remove_replicated_data() @test(runs_after=[remove_replicated_data]) def detach_replica_from_source(self): """Test detaching a replica from the master.""" self.test_runner.run_detach_replica_from_source() @test(runs_after=[detach_replica_from_source]) def delete_detached_replica(self): """Test deleting the detached replica.""" self.test_runner.run_delete_detached_replica() @test(runs_after=[delete_detached_replica]) def delete_all_replicas(self): """Test deleting all the remaining replicas.""" self.test_runner.run_delete_all_replicas() @test(depends_on_classes=[ReplicationInstDeleteGroup], groups=[GROUP, groups.REPL_INST_DELETE_WAIT]) class ReplicationInstDeleteWaitGroup(TestGroup): """Wait for Replication Instance Delete to complete.""" def __init__(self): super(ReplicationInstDeleteWaitGroup, self).__init__( ReplicationRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() @test def wait_for_delete_replicas(self): """Wait for all the replicas to delete.""" self.test_runner.run_wait_for_delete_replicas() @test(runs_after=[wait_for_delete_replicas]) def test_backup_deleted(self): """Remove the full backup and test that the created backup is now gone. """ self.test_runner.run_test_backup_deleted() self.backup_runner.run_delete_backup() @test(runs_after=[test_backup_deleted]) def cleanup_master_instance(self): """Remove slave users from master instance.""" self.test_runner.run_cleanup_master_instance() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/root_actions_group.py0000644000175000017500000002262000000000000026065 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.root_actions_group" class RootActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'root_actions_runners' _runner_cls = 'RootActionsRunner' class BackupRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'backup_runners' _runner_cls = 'BackupRunner' class BackupRunnerFactory2(test_runners.RunnerFactory): _runner_ns = 'backup_runners' _runner_cls = 'BackupRunner' @test(depends_on_groups=[groups.INST_FORCE_DELETE_WAIT], groups=[GROUP, groups.ROOT_ACTION_ENABLE]) class RootActionsEnableGroup(TestGroup): """Test Root Actions Enable functionality.""" def __init__(self): super(RootActionsEnableGroup, self).__init__( RootActionsRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() self.backup_runner2 = BackupRunnerFactory2.instance() @test def check_root_never_enabled(self): """Check the root has never been enabled on the instance.""" self.test_runner.run_check_root_never_enabled() @test(depends_on=[check_root_never_enabled]) def disable_root_before_enabled(self): """Ensure disable fails if root was never enabled.""" self.test_runner.check_root_disable_supported() self.test_runner.run_disable_root_before_enabled() @test(depends_on=[check_root_never_enabled], runs_after=[disable_root_before_enabled]) def enable_root_no_password(self): """Enable root (without specifying a password).""" self.test_runner.run_enable_root_no_password() @test(depends_on=[enable_root_no_password]) def check_root_enabled(self): """Check the root is now enabled.""" self.test_runner.run_check_root_enabled() @test(depends_on=[check_root_enabled]) def backup_root_enabled_instance(self): """Backup the root-enabled instance.""" self.test_runner.check_inherit_root_state_supported() self.backup_runner.run_backup_create() self.backup_runner.run_backup_create_completed() @test(depends_on=[check_root_enabled], runs_after=[backup_root_enabled_instance]) def delete_root(self): """Ensure an attempt to delete the root user fails.""" self.test_runner.run_delete_root() @test(depends_on=[check_root_never_enabled], runs_after=[delete_root]) def enable_root_with_password(self): """Enable root (with a given password).""" self.test_runner.run_enable_root_with_password() @test(depends_on=[enable_root_with_password]) def check_root_still_enabled(self): """Check the root is still enabled.""" self.test_runner.run_check_root_enabled() @test(depends_on_classes=[RootActionsEnableGroup], groups=[GROUP, groups.ROOT_ACTION_DISABLE]) class RootActionsDisableGroup(TestGroup): """Test Root Actions Disable functionality.""" def __init__(self): super(RootActionsDisableGroup, self).__init__( RootActionsRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() self.backup_runner2 = BackupRunnerFactory2.instance() @test def disable_root(self): """Disable root.""" self.test_runner.check_root_disable_supported() self.test_runner.run_disable_root() @test(depends_on=[disable_root]) def check_root_still_enabled_after_disable(self): """Check the root is still marked as enabled after disable.""" self.test_runner.check_root_disable_supported() self.test_runner.run_check_root_still_enabled_after_disable() @test(depends_on=[check_root_still_enabled_after_disable]) def backup_root_disabled_instance(self): """Backup the root-disabled instance.""" self.test_runner.check_root_disable_supported() self.test_runner.check_inherit_root_state_supported() self.backup_runner2.run_backup_create() self.backup_runner2.run_backup_create_completed() @test(depends_on_classes=[RootActionsDisableGroup], groups=[GROUP, groups.ROOT_ACTION_INST, groups.ROOT_ACTION_INST_CREATE]) class RootActionsInstCreateGroup(TestGroup): """Test Root Actions Instance Create functionality.""" def __init__(self): super(RootActionsInstCreateGroup, self).__init__( RootActionsRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() self.backup_runner2 = BackupRunnerFactory2.instance() @test def restore_root_enabled_instance(self): """Restore the root-enabled instance.""" self.backup_runner.run_restore_from_backup(suffix='_root_enable') @test def restore_root_disabled_instance(self): """Restore the root-disabled instance.""" self.test_runner.check_root_disable_supported() self.backup_runner2.run_restore_from_backup(suffix='_root_disable') @test(depends_on_classes=[RootActionsInstCreateGroup], groups=[GROUP, groups.ROOT_ACTION_INST, groups.ROOT_ACTION_INST_CREATE_WAIT]) class RootActionsInstCreateWaitGroup(TestGroup): """Wait for Root Actions Instance Create to complete.""" def __init__(self): super(RootActionsInstCreateWaitGroup, self).__init__( RootActionsRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() self.backup_runner2 = BackupRunnerFactory2.instance() @test def wait_for_restored_instance(self): """Wait until restoring a root-enabled instance completes.""" self.backup_runner.run_restore_from_backup_completed() @test(depends_on=[wait_for_restored_instance]) def check_root_enabled_after_restore(self): """Check the root is also enabled on the restored instance.""" instance_id = self.backup_runner.restore_instance_id root_creds = self.test_runner.restored_root_creds self.test_runner.run_check_root_enabled_after_restore( instance_id, root_creds) @test def wait_for_restored_instance2(self): """Wait until restoring a root-disabled instance completes.""" self.test_runner.check_root_disable_supported() self.backup_runner2.run_restore_from_backup_completed() @test(depends_on=[wait_for_restored_instance2]) def check_root_enabled_after_restore2(self): """Check the root is also enabled on the restored instance.""" instance_id = self.backup_runner2.restore_instance_id root_creds = self.test_runner.restored_root_creds2 self.test_runner.run_check_root_enabled_after_restore2( instance_id, root_creds) @test(depends_on_classes=[RootActionsInstCreateWaitGroup], groups=[GROUP, groups.ROOT_ACTION_INST, groups.ROOT_ACTION_INST_DELETE]) class RootActionsInstDeleteGroup(TestGroup): """Test Root Actions Instance Delete functionality.""" def __init__(self): super(RootActionsInstDeleteGroup, self).__init__( RootActionsRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() self.backup_runner2 = BackupRunnerFactory2.instance() @test def delete_restored_instance(self): """Delete the restored root-enabled instance.""" self.backup_runner.run_delete_restored_instance() @test def delete_instance_backup(self): """Delete the root-enabled instance backup.""" self.backup_runner.run_delete_backup() @test def delete_restored_instance2(self): """Delete the restored root-disabled instance.""" self.test_runner.check_root_disable_supported() self.backup_runner2.run_delete_restored_instance() @test def delete_instance_backup2(self): """Delete the root-disabled instance backup.""" self.test_runner.check_root_disable_supported() self.backup_runner2.run_delete_backup() @test(depends_on_classes=[RootActionsInstDeleteGroup], groups=[GROUP, groups.ROOT_ACTION_INST, groups.ROOT_ACTION_INST_DELETE_WAIT]) class RootActionsInstDeleteWaitGroup(TestGroup): """Wait for Root Actions Instance Delete to complete.""" def __init__(self): super(RootActionsInstDeleteWaitGroup, self).__init__( RootActionsRunnerFactory.instance()) self.backup_runner = BackupRunnerFactory.instance() self.backup_runner2 = BackupRunnerFactory2.instance() @test def wait_for_restored_instance_delete(self): """Wait for the root-enabled instance to be deleted.""" self.backup_runner.run_wait_for_restored_instance_delete() @test def wait_for_restored_instance2_delete(self): """Wait for the root-disabled instance to be deleted.""" self.backup_runner2.run_wait_for_restored_instance_delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/groups/test_group.py0000644000175000017500000000153500000000000024343 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class TestGroup(object): def __init__(self, test_runner): self._test_runner = test_runner @property def test_runner(self): return self._test_runner ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/groups/user_actions_group.py0000644000175000017500000002270000000000000026057 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import test from trove.tests.scenario import groups from trove.tests.scenario.groups.test_group import TestGroup from trove.tests.scenario.runners import test_runners GROUP = "scenario.user_actions_group" class UserActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'user_actions_runners' _runner_cls = 'UserActionsRunner' class InstanceCreateRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'instance_create_runners' _runner_cls = 'InstanceCreateRunner' class DatabaseActionsRunnerFactory(test_runners.RunnerFactory): _runner_ns = 'database_actions_runners' _runner_cls = 'DatabaseActionsRunner' @test(depends_on_groups=[groups.ROOT_ACTION_INST_DELETE_WAIT], groups=[GROUP, groups.USER_ACTION_CREATE]) class UserActionsCreateGroup(TestGroup): """Test User Actions Create functionality.""" def __init__(self): super(UserActionsCreateGroup, self).__init__( UserActionsRunnerFactory.instance()) self.database_actions_runner = DatabaseActionsRunnerFactory.instance() @test def create_user_databases(self): """Create user databases on an existing instance.""" # These databases may be referenced by the users (below) so we need to # create them first. self.database_actions_runner.run_databases_create() @test(runs_after=[create_user_databases]) def create_users(self): """Create users on an existing instance.""" self.test_runner.run_users_create() @test(depends_on=[create_users]) def show_user(self): """Show created users.""" self.test_runner.run_user_show() @test(depends_on=[create_users], runs_after=[show_user]) def list_users(self): """List the created users.""" self.test_runner.run_users_list() @test(depends_on=[create_users], runs_after=[list_users]) def show_user_access(self): """Show user access list.""" self.test_runner.run_user_access_show() @test(depends_on=[create_users], runs_after=[show_user_access]) def revoke_user_access(self): """Revoke user database access.""" self.test_runner.run_user_access_revoke() @test(depends_on=[create_users], runs_after=[revoke_user_access]) def grant_user_access(self): """Grant user database access.""" self.test_runner.run_user_access_grant() @test(depends_on=[create_users], runs_after=[grant_user_access]) def create_user_with_no_attributes(self): """Ensure creating a user with blank specification fails.""" self.test_runner.run_user_create_with_no_attributes() @test(depends_on=[create_users], runs_after=[create_user_with_no_attributes]) def create_user_with_blank_name(self): """Ensure creating a user with blank name fails.""" self.test_runner.run_user_create_with_blank_name() @test(depends_on=[create_users], runs_after=[create_user_with_blank_name]) def create_user_with_blank_password(self): """Ensure creating a user with blank password fails.""" self.test_runner.run_user_create_with_blank_password() @test(depends_on=[create_users], runs_after=[create_user_with_blank_password]) def create_existing_user(self): """Ensure creating an existing user fails.""" self.test_runner.run_existing_user_create() @test(depends_on=[create_users], runs_after=[create_existing_user]) def update_user_with_blank_name(self): """Ensure updating a user with blank name fails.""" self.test_runner.run_user_update_with_blank_name() @test(depends_on=[create_users], runs_after=[update_user_with_blank_name]) def update_user_with_existing_name(self): """Ensure updating a user with an existing name fails.""" self.test_runner.run_user_update_with_existing_name() @test(depends_on=[create_users], runs_after=[update_user_with_existing_name]) def update_user_attributes(self): """Update an existing user.""" self.test_runner.run_user_attribute_update() @test(depends_on=[update_user_attributes]) def recreate_user_with_no_access(self): """Re-create a renamed user with no access rights.""" self.test_runner.run_user_recreate_with_no_access() @test def show_nonexisting_user(self): """Ensure show on non-existing user fails.""" self.test_runner.run_nonexisting_user_show() @test def update_nonexisting_user(self): """Ensure updating a non-existing user fails.""" self.test_runner.run_nonexisting_user_update() @test def delete_nonexisting_user(self): """Ensure deleting a non-existing user fails.""" self.test_runner.run_nonexisting_user_delete() @test def create_system_user(self): """Ensure creating a system user fails.""" self.test_runner.run_system_user_create() @test def show_system_user(self): """Ensure showing a system user fails.""" self.test_runner.run_system_user_show() @test def update_system_user(self): """Ensure updating a system user fails.""" self.test_runner.run_system_user_attribute_update() @test(depends_on_classes=[UserActionsCreateGroup], groups=[GROUP, groups.USER_ACTION_DELETE]) class UserActionsDeleteGroup(TestGroup): """Test User Actions Delete functionality.""" def __init__(self): super(UserActionsDeleteGroup, self).__init__( UserActionsRunnerFactory.instance()) self.database_actions_runner = DatabaseActionsRunnerFactory.instance() @test def delete_user(self): """Delete the created users.""" self.test_runner.run_user_delete() @test def delete_system_user(self): """Ensure deleting a system user fails.""" self.test_runner.run_system_user_delete() @test def delete_user_databases(self): """Delete the user databases.""" self.database_actions_runner.run_database_delete() @test(groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_CREATE], depends_on_classes=[UserActionsDeleteGroup]) class UserActionsInstCreateGroup(TestGroup): """Test User Actions Instance Create functionality.""" def __init__(self): super(UserActionsInstCreateGroup, self).__init__( UserActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def create_initialized_instance(self): """Create an instance with initial users.""" self.instance_create_runner.run_initialized_instance_create( with_dbs=False, with_users=True, configuration_id=None, create_helper_user=False, name_suffix='_user') @test(depends_on_classes=[UserActionsInstCreateGroup], groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_CREATE_WAIT]) class UserActionsInstCreateWaitGroup(TestGroup): """Wait for User Actions Instance Create to complete.""" def __init__(self): super(UserActionsInstCreateWaitGroup, self).__init__( UserActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def wait_for_instances(self): """Waiting for user instance to become active.""" self.instance_create_runner.run_wait_for_init_instance() @test(depends_on=[wait_for_instances]) def validate_initialized_instance(self): """Validate the user instance data and properties.""" self.instance_create_runner.run_validate_initialized_instance() @test(depends_on_classes=[UserActionsInstCreateWaitGroup], groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_DELETE]) class UserActionsInstDeleteGroup(TestGroup): """Test User Actions Instance Delete functionality.""" def __init__(self): super(UserActionsInstDeleteGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def delete_initialized_instance(self): """Delete the user instance.""" self.instance_create_runner.run_initialized_instance_delete() @test(depends_on_classes=[UserActionsInstDeleteGroup], groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_DELETE_WAIT]) class UserActionsInstDeleteWaitGroup(TestGroup): """Wait for User Actions Instance Delete to complete.""" def __init__(self): super(UserActionsInstDeleteWaitGroup, self).__init__( DatabaseActionsRunnerFactory.instance()) self.instance_create_runner = InstanceCreateRunnerFactory.instance() @test def wait_for_delete_initialized_instance(self): """Wait for the user instance to delete.""" self.instance_create_runner.run_wait_for_init_delete() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.784111 trove-12.1.0.dev92/trove/tests/scenario/helpers/0000755000175000017500000000000000000000000021715 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/__init__.py0000644000175000017500000000000000000000000024014 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/cassandra_helper.py0000644000175000017500000001407100000000000025570 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cassandra.auth import PlainTextAuthProvider from cassandra.cluster import Cluster from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class CassandraClient(object): # Cassandra 2.1 only supports protocol versions 3 and lower. NATIVE_PROTOCOL_VERSION = 3 def __init__(self, contact_points, user, password, keyspace): super(CassandraClient, self).__init__() self._cluster = None self._session = None self._cluster = Cluster( contact_points=contact_points, auth_provider=PlainTextAuthProvider(user, password), protocol_version=self.NATIVE_PROTOCOL_VERSION) self._session = self._connect(keyspace) def _connect(self, keyspace): if not self._cluster.is_shutdown: return self._cluster.connect(keyspace) else: raise Exception("Cannot perform this operation on a terminated " "cluster.") @property def session(self): return self._session def __del__(self): if self._cluster is not None: self._cluster.shutdown() if self._session is not None: self._session.shutdown() class CassandraHelper(TestHelper): DATA_COLUMN_NAME = 'value' def __init__(self, expected_override_name, report): super(CassandraHelper, self).__init__(expected_override_name, report) self._data_cache = dict() def create_client(self, host, *args, **kwargs): user = self.get_helper_credentials() username = kwargs.get('username', user['name']) password = kwargs.get('password', user['password']) database = kwargs.get('database', user['database']) return CassandraClient([host], username, password, database) def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) self._create_data_table(client, data_label) stmt = client.session.prepare("INSERT INTO %s (%s) VALUES (?)" % (data_label, self.DATA_COLUMN_NAME)) count = self._count_data_rows(client, data_label) if count == 0: for value in self._get_dataset(data_size): client.session.execute(stmt, [value]) def _create_data_table(self, client, table_name): client.session.execute('CREATE TABLE IF NOT EXISTS %s ' '(%s INT PRIMARY KEY)' % (table_name, self.DATA_COLUMN_NAME)) def _count_data_rows(self, client, table_name): rows = client.session.execute('SELECT COUNT(*) FROM %s' % table_name) if rows: return rows[0][0] return 0 def _get_dataset(self, data_size): cache_key = str(data_size) if cache_key in self._data_cache: return self._data_cache.get(cache_key) data = self._generate_dataset(data_size) self._data_cache[cache_key] = data return data def _generate_dataset(self, data_size): return range(1, data_size + 1) def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) self._drop_table(client, data_label) def _drop_table(self, client, table_name): client.session.execute('DROP TABLE %s' % table_name) def verify_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): expected_data = self._get_dataset(data_size) client = self.get_client(host, *args, **kwargs) actual_data = self._select_data_rows(client, data_label) TestRunner.assert_equal(len(expected_data), len(actual_data), "Unexpected number of result rows.") for expected_row in expected_data: TestRunner.assert_true(expected_row in actual_data, "Row not found in the result set: %s" % expected_row) def _select_data_rows(self, client, table_name): rows = client.session.execute('SELECT %s FROM %s' % (self.DATA_COLUMN_NAME, table_name)) return [value[0] for value in rows] def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'firstdb'} def ping(self, host, *args, **kwargs): try: self.get_client(host, *args, **kwargs) return True except Exception: return False def get_valid_database_definitions(self): return [{"name": 'db1'}, {"name": 'db2'}, {"name": 'db3'}] def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': []}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_non_dynamic_group(self): return {'sstable_preemptive_open_interval_in_mb': 40} def get_invalid_groups(self): return [{'sstable_preemptive_open_interval_in_mb': -1}, {'sstable_preemptive_open_interval_in_mb': 'string_value'}] def get_exposed_user_log_names(self): return ['system'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/couchbase_helper.py0000644000175000017500000001001600000000000025560 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from couchbase.bucket import Bucket from couchbase import exceptions as cb_except from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner from trove.tests.util import utils class CouchbaseHelper(TestHelper): def __init__(self, expected_override_name, report): super(CouchbaseHelper, self).__init__(expected_override_name, report) self._data_cache = dict() def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass'} def create_client(self, host, *args, **kwargs): user = self.get_helper_credentials() return self._create_test_bucket(host, user['name'], user['password']) def _create_test_bucket(self, host, bucket_name, password): return Bucket('couchbase://%s/%s' % (host, bucket_name), password=password) # Add data overrides def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) if not self._key_exists(client, data_label, *args, **kwargs): self._set_data_point(client, data_label, self._get_dataset(data_start, data_size)) @utils.retry((cb_except.TemporaryFailError, cb_except.BusyError)) def _key_exists(self, client, key, *args, **kwargs): return client.get(key, quiet=True).success @utils.retry((cb_except.TemporaryFailError, cb_except.BusyError)) def _set_data_point(self, client, key, value, *args, **kwargs): client.insert(key, value) def _get_dataset(self, data_start, data_size): cache_key = str(data_size) if cache_key in self._data_cache: return self._data_cache.get(cache_key) data = range(data_start, data_start + data_size) self._data_cache[cache_key] = data return data # Remove data overrides def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) if self._key_exists(client, data_label, *args, **kwargs): self._remove_data_point(client, data_label, *args, **kwargs) @utils.retry((cb_except.TemporaryFailError, cb_except.BusyError)) def _remove_data_point(self, client, key, *args, **kwargs): client.remove(key) # Verify data overrides def verify_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) expected_value = self._get_dataset(data_start, data_size) self._verify_data_point(client, data_label, expected_value) def _verify_data_point(self, client, key, expected_value, *args, **kwargs): value = self._get_data_point(client, key, *args, **kwargs) TestRunner.assert_equal(expected_value, value, "Unexpected value '%s' returned from " "Couchbase key '%s'" % (value, key)) @utils.retry((cb_except.TemporaryFailError, cb_except.BusyError)) def _get_data_point(self, client, key, *args, **kwargs): return client.get(key).value def ping(self, host, *args, **kwargs): try: self.create_client(host, *args, **kwargs) return True except Exception: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/couchdb_helper.py0000644000175000017500000001012600000000000025235 0ustar00coreycorey00000000000000# Copyright 2016 IBM Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import couchdb from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class CouchdbHelper(TestHelper): def __init__(self, expected_override_name, report): super(CouchdbHelper, self).__init__(expected_override_name, report) self._data_cache = dict() self.field_name = 'ff-%s' self.database = 'firstdb' def create_client(self, host, *args, **kwargs): username = self.get_helper_credentials()['name'] password = self.get_helper_credentials()["password"] url = 'http://%(username)s:%(password)s@%(host)s:5984/' % { 'username': username, 'password': password, 'host': host, } server = couchdb.Server(url) return server def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) db = client[self.database] doc = {} doc_id, doc_rev = db.save(doc) data = self._get_dataset(data_size) doc = db.get(doc_id) for value in data: key = self.field_name % value doc[key] = value db.save(doc) def _get_dataset(self, data_size): cache_key = str(data_size) if cache_key in self._data_cache: return self._data_cache.get(cache_key) data = self._generate_dataset(data_size) self._data_cache[cache_key] = data return data def _generate_dataset(self, data_size): return range(1, data_size + 1) def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host) db = client[self.database + "_" + data_label] client.delete(db) def verify_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): expected_data = self._get_dataset(data_size) client = self.get_client(host, *args, **kwargs) db = client[self.database] actual_data = [] TestRunner.assert_equal(len(db), 1) for i in db: items = db[i].items() actual_data = ([value for key, value in items if key not in ['_id', '_rev']]) TestRunner.assert_equal(len(expected_data), len(actual_data), "Unexpected number of result rows.") for expected_row in expected_data: TestRunner.assert_true(expected_row in actual_data, "Row not found in the result set: %s" % expected_row) def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': self.database} def get_helper_credentials_root(self): return {'name': 'root', 'password': 'rootpass'} def get_valid_database_definitions(self): return [{'name': 'db1'}, {'name': 'db2'}, {"name": 'db3'}] def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': [], 'host': '127.0.0.1'}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}], 'host': '0.0.0.0'}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/db2_helper.py0000644000175000017500000000317000000000000024276 0ustar00coreycorey00000000000000# Copyright 2016 IBM Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.test_helper import TestHelper class Db2Helper(TestHelper): def __init__(self, expected_override_name, report): super(Db2Helper, self).__init__(expected_override_name, report) def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'lite'} def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': []}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_dynamic_group(self): return {'MON_HEAP_SZ': 40} def get_non_dynamic_group(self): return {'NUMDB': 30} def get_invalid_groups(self): return [{'timezone': 997}, {"max_worker_processes": 'string_value'}, {"standard_conforming_strings": 'string_value'}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/mariadb_helper.py0000644000175000017500000000153600000000000025232 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.mysql_helper import MysqlHelper class MariadbHelper(MysqlHelper): def __init__(self, expected_override_name, report): super(MariadbHelper, self).__init__(expected_override_name, report) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/mongodb_helper.py0000644000175000017500000000314200000000000025253 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.test_helper import TestHelper class MongodbHelper(TestHelper): def __init__(self, expected_override_name, report): super(MongodbHelper, self).__init__(expected_override_name, report) def get_valid_database_definitions(self): return [{"name": 'db1'}, {"name": 'db2'}, {'name': 'db3'}] def get_valid_user_definitions(self): return [{'name': 'db0.user1', 'password': 'password1', 'databases': []}, {'name': 'db0.user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'db1.user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_non_dynamic_group(self): return {'systemLog.verbosity': 4} def get_invalid_groups(self): return [{'net.maxIncomingConnections': -1}, {'storage.mmapv1.nsSize': 4096}, {'storage.journal.enabled': 'string_value'}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/mysql_helper.py0000644000175000017500000000435600000000000025003 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.sql_helper import SqlHelper class MysqlHelper(SqlHelper): def __init__(self, expected_override_name, report): super(MysqlHelper, self).__init__(expected_override_name, report, 'mysql+pymysql') def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'firstdb'} def get_helper_credentials_root(self): return {'name': 'root', 'password': 'rootpass'} def get_valid_database_definitions(self): return [{'name': 'db1', 'character_set': 'latin2', 'collate': 'latin2_general_ci'}, {'name': 'db2'}, {"name": 'db3'}] def get_valid_user_definitions(self): return [{'name': 'a_user1', 'password': 'password1', 'databases': [], 'host': '127.0.0.1'}, {'name': 'a_user2', 'password': 'password1', 'databases': [{'name': 'db1'}], 'host': '0.0.0.0'}, {'name': 'a_user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_dynamic_group(self): return {'key_buffer_size': 10485760, 'join_buffer_size': 10485760} def get_non_dynamic_group(self): return {'innodb_buffer_pool_size': 10485760, 'long_query_time': 59.1} def get_invalid_groups(self): return [{'key_buffer_size': -1}, {"join_buffer_size": 'string_value'}] def get_exposed_user_log_names(self): return ['general', 'slow_query'] def get_unexposed_sys_log_names(self): return ['guest', 'error'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/percona_helper.py0000644000175000017500000000153600000000000025262 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.mysql_helper import MysqlHelper class PerconaHelper(MysqlHelper): def __init__(self, expected_override_name, report): super(PerconaHelper, self).__init__(expected_override_name, report) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/postgresql_helper.py0000644000175000017500000000505000000000000026031 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.sql_helper import SqlHelper class PostgresqlHelper(SqlHelper): def __init__(self, expected_override_name, report, port=5432): super(PostgresqlHelper, self).__init__(expected_override_name, report, 'postgresql', port=port) @property def test_schema(self): return 'public' def get_helper_credentials(self): # There must be a database with the same name as the user in order # for the user to be able to login. return {'name': 'lite', 'password': 'litepass', 'database': 'lite'} def get_helper_credentials_root(self): return {'name': 'postgres', 'password': 'rootpass'} def get_valid_database_definitions(self): return [{'name': 'db1'}, {'name': 'db2'}, {'name': 'db3'}] def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': []}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def get_dynamic_group(self): return {'effective_cache_size': '528MB'} def get_non_dynamic_group(self): return {'max_connections': 113, 'log_min_duration_statement': '257ms'} def get_invalid_groups(self): return [{'timezone': 997}, {"vacuum_cost_delay": 'string_value'}, {"standard_conforming_strings": 'string_value'}] def get_configuration_value(self, property_name, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) cmd = "SHOW %s;" % property_name row = client.execute(cmd).fetchone() return row[0] def get_exposed_user_log_names(self): return ['general'] def log_enable_requires_restart(self): return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/pxc_helper.py0000644000175000017500000000152600000000000024424 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.scenario.helpers.mysql_helper import MysqlHelper class PxcHelper(MysqlHelper): def __init__(self, expected_override_name, report): super(PxcHelper, self).__init__(expected_override_name, report) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/redis_helper.py0000644000175000017500000002237300000000000024743 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import redis from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class RedisHelper(TestHelper): def __init__(self, expected_override_name, report): super(RedisHelper, self).__init__(expected_override_name, report) self.key_patterns = ['user_a:%s', 'user_b:%s'] self.value_pattern = 'id:%s' self.label_value = 'value_set' self._ds_client_cache = dict() def get_helper_credentials_root(self): return {'name': '-', 'password': 'rootpass'} def get_client(self, host, *args, **kwargs): # We need to cache the Redis client in order to prevent Error 99 # (Cannot assign requested address) when working with large data sets. # A new client may be created frequently due to how the redirection # works (see '_execute_with_redirection'). # The old (now closed) connections however have to wait for about 60s # (TIME_WAIT) before the port can be released. # This is a feature of the operating system that helps it dealing with # packets that arrive after the connection is closed. # # NOTE(zhaochao): when connecting to Redis server with a password, # current cached client may not updated to use the same password, # connection_kwargs of the ConnectPool object should be checked, # if the new password is different, A new client instance will be # created. recreate_client = True # NOTE(zhaochao): Another problem about caching clients is, when # the 'requirepass' paramter of Redis server is changed, already # connected client can still issue commands. If we want to make sure # old passwords cannot be used to connect to the server, cached # clients shouldn't be used, a new one should be created instead. # We cannot easily tell whether the 'requirepass' paramter is changed. # So we have to always recreate a client when a password is explicitly # specified. The cached client is only used when no password # specified(i.e. we're going to use the default password) and the # cached password is same as the default one. if (host in self._ds_client_cache and 'password' not in kwargs): default_password = self.get_helper_credentials()['password'] cached_password = (self._ds_client_cache[host] .connection_pool .connection_kwargs.get('password')) if cached_password == default_password: recreate_client = False if recreate_client: self._ds_client_cache[host] = ( self.create_client(host, *args, **kwargs)) return self._ds_client_cache[host] def create_client(self, host, *args, **kwargs): user = self.get_helper_credentials() password = kwargs.get('password', user['password']) client = redis.StrictRedis(password=password, host=host) return client # Add data overrides # We use multiple keys to make the Redis backup take longer def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): test_set = self._get_data_point(host, data_label, *args, **kwargs) if not test_set: for num in range(data_start, data_start + data_size): for key_pattern in self.key_patterns: self._set_data_point( host, key_pattern % str(num), self.value_pattern % str(num), *args, **kwargs) # now that the data is there, add the label self._set_data_point( host, data_label, self.label_value, *args, **kwargs) def _set_data_point(self, host, key, value, *args, **kwargs): def set_point(client, key, value): return client.set(key, value) self._execute_with_redirection( host, set_point, [key, value], *args, **kwargs) def _get_data_point(self, host, key, *args, **kwargs): def get_point(client, key): return client.get(key) return self._execute_with_redirection( host, get_point, [key], *args, **kwargs) def _execute_with_redirection(self, host, callback, callback_args, *args, **kwargs): """Redis clustering is a relatively new feature still not supported in a fully transparent way by all clients. The application itself is responsible for connecting to the right node when accessing a key in a Redis cluster instead. Clients may be redirected to other nodes by redirection errors: redis.exceptions.ResponseError: MOVED 10778 10.64.0.2:6379 This method tries to execute a given callback on a given host. If it gets a redirection error it parses the new host from the response and issues the same callback on this new host. """ client = self.get_client(host, *args, **kwargs) try: return callback(client, *callback_args) except redis.exceptions.ResponseError as ex: response = str(ex) if response: tokens = response.split() if tokens[0] == 'MOVED': redirected_host = tokens[2].split(':')[0] if redirected_host: return self._execute_with_redirection( redirected_host, callback, callback_args, *args, **kwargs) raise ex # Remove data overrides # We use multiple keys to make the Redis backup take longer def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): test_set = self._get_data_point(host, data_label, *args, **kwargs) if test_set: for num in range(data_start, data_start + data_size): for key_pattern in self.key_patterns: self._expire_data_point(host, key_pattern % str(num), *args, **kwargs) # now that the data is gone, remove the label self._expire_data_point(host, data_label, *args, **kwargs) def _expire_data_point(self, host, key, *args, **kwargs): def expire_point(client, key): return client.expire(key, 0) self._execute_with_redirection( host, expire_point, [key], *args, **kwargs) # Verify data overrides # We use multiple keys to make the Redis backup take longer def verify_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): # make sure the data is there - tests edge cases and a random one self._verify_data_point(host, data_label, self.label_value, *args, **kwargs) midway_num = data_start + int(data_size / 2) random_num = random.randint(data_start + 2, data_start + data_size - 3) for num in [data_start, data_start + 1, midway_num, random_num, data_start + data_size - 2, data_start + data_size - 1]: for key_pattern in self.key_patterns: self._verify_data_point(host, key_pattern % num, self.value_pattern % num, *args, **kwargs) # negative tests for num in [data_start - 1, data_start + data_size]: for key_pattern in self.key_patterns: self._verify_data_point(host, key_pattern % num, None, *args, **kwargs) def _verify_data_point(self, host, key, expected_value, *args, **kwargs): value = self._get_data_point(host, key, *args, **kwargs) TestRunner.assert_equal(expected_value, value, "Unexpected value '%s' returned from Redis " "key '%s'" % (value, key)) def get_dynamic_group(self): return {'hz': 15} def get_non_dynamic_group(self): return {'databases': 24} def get_invalid_groups(self): return [{'hz': 600}, {'databases': -1}, {'databases': 'string_value'}] def ping(self, host, *args, **kwargs): try: client = self.get_client(host, *args, **kwargs) return client.ping() except Exception: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/sql_helper.py0000644000175000017500000001412600000000000024431 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy import MetaData, Table, Column, Integer from trove.tests.scenario.helpers.test_helper import TestHelper from trove.tests.scenario.runners.test_runners import TestRunner class SqlHelper(TestHelper): """This mixin provides data handling helper functions for SQL datastores. """ DATA_COLUMN_NAME = 'value' def __init__(self, expected_override_name, report, protocol="mysql+pymysql", port=None): super(SqlHelper, self).__init__(expected_override_name, report) self.protocol = protocol self.port = port self.credentials = self.get_helper_credentials() self.credentials_root = self.get_helper_credentials_root() self._schema_metadata = MetaData() self._data_cache = dict() @property def test_schema(self): return self.credentials['database'] def create_client(self, host, *args, **kwargs): username = kwargs.get('username', self.credentials['name']) password = kwargs.get('password', self.credentials['password']) database = kwargs.get('database', self.credentials['database']) creds = {"name": username, "password": password, "database": database} return sqlalchemy.create_engine( self._build_connection_string(host, creds)) def _build_connection_string(self, host, creds): if self.port: host = "%s:%d" % (host, self.port) credentials = {'protocol': self.protocol, 'host': host, 'user': creds.get('name', ''), 'password': creds.get('password', ''), 'database': creds.get('database', '')} return ('%(protocol)s://%(user)s:%(password)s@%(host)s/%(database)s' % credentials) # Add data overrides def add_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) self._create_data_table(client, self.test_schema, data_label) count = self._count_data_rows(client, self.test_schema, data_label) if count == 0: self._insert_data_rows(client, self.test_schema, data_label, data_size) def _create_data_table(self, client, schema_name, table_name): Table( table_name, self._schema_metadata, Column(self.DATA_COLUMN_NAME, Integer(), nullable=False, default=0), keep_existing=True, schema=schema_name ).create(client, checkfirst=True) def _count_data_rows(self, client, schema_name, table_name): data_table = self._get_schema_table(schema_name, table_name) return client.execute(data_table.count()).scalar() def _insert_data_rows(self, client, schema_name, table_name, data_size): data_table = self._get_schema_table(schema_name, table_name) client.execute(data_table.insert(), self._get_dataset(data_size)) def _get_schema_table(self, schema_name, table_name): qualified_table_name = '%s.%s' % (schema_name, table_name) return self._schema_metadata.tables.get(qualified_table_name) def _get_dataset(self, data_size): cache_key = str(data_size) if cache_key in self._data_cache: return self._data_cache.get(cache_key) data = self._generate_dataset(data_size) self._data_cache[cache_key] = data return data def _generate_dataset(self, data_size): return [{self.DATA_COLUMN_NAME: value} for value in range(1, data_size + 1)] # Remove data overrides def remove_actual_data(self, data_label, data_start, data_size, host, *args, **kwargs): client = self.get_client(host) self._drop_table(client, self.test_schema, data_label) def _drop_table(self, client, schema_name, table_name): data_table = self._get_schema_table(schema_name, table_name) data_table.drop(client, checkfirst=True) # Verify data overrides def verify_actual_data(self, data_label, data_Start, data_size, host, *args, **kwargs): expected_data = [(item[self.DATA_COLUMN_NAME],) for item in self._get_dataset(data_size)] client = self.get_client(host, *args, **kwargs) actual_data = self._select_data_rows(client, self.test_schema, data_label) TestRunner.assert_equal(len(expected_data), len(actual_data), "Unexpected number of result rows.") TestRunner.assert_list_elements_equal( expected_data, actual_data, "Unexpected rows in the result set.") def _select_data_rows(self, client, schema_name, table_name): data_table = self._get_schema_table(schema_name, table_name) return client.execute(data_table.select()).fetchall() def ping(self, host, *args, **kwargs): try: root_client = self.get_client(host, *args, **kwargs) root_client.execute("SELECT 1;") return True except Exception as e: print("Failed to execute sql command, error: %s" % str(e)) return False def get_configuration_value(self, property_name, host, *args, **kwargs): client = self.get_client(host, *args, **kwargs) cmd = "SHOW GLOBAL VARIABLES LIKE '%s';" % property_name row = client.execute(cmd).fetchone() return row['Value'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/test_helper.py0000644000175000017500000004716300000000000024620 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from enum import Enum import inspect from proboscis import SkipTest from time import sleep class DataType(Enum): """ Represent the type of data to add to a datastore. This allows for multiple 'states' of data that can be verified after actions are performed by Trove. If new entries are added here, sane values should be added to the _fn_data dictionary defined in TestHelper. """ # micro amount of data, useful for testing datastore logging, etc. micro = 1 # another micro dataset (also for datastore logging) micro2 = 2 # another micro dataset (also for datastore logging) micro3 = 3 # another micro dataset (also for datastore logging) micro4 = 4 # very tiny amount of data, useful for testing replication # propagation, etc. tiny = 5 # another tiny dataset (also for replication propagation) tiny2 = 6 # a third tiny dataset (also for replication propagation) tiny3 = 7 # a forth tiny dataset (for cluster propagation) tiny4 = 8 # small amount of data (this can be added to each instance # after creation, for example). small = 9 # large data, enough to make creating a backup take 20s or more. large = 10 class TestHelper(object): """ Base class for all 'Helper' classes. The Helper classes are designed to do datastore specific work that can be used by multiple runner classes. Things like adding data to datastores and verifying data or internal database states, etc. should be handled by these classes. """ # Define the actions that can be done on each DataType. When adding # a new action, remember to modify _data_fns FN_ADD = 'add' FN_REMOVE = 'remove' FN_VERIFY = 'verify' FN_TYPES = [FN_ADD, FN_REMOVE, FN_VERIFY] # Artificial 'DataType' name to use for the methods that do the # actual data manipulation work. DT_ACTUAL = 'actual' def __init__(self, expected_override_name, report): """Initialize the helper class by creating a number of stub functions that each datastore specific class can chose to override. Basically, the functions are of the form: {FN_TYPE}_{DataType.name}_data For example: add_tiny_data add_small_data remove_small_data verify_large_data and so on. Add and remove actions throw a SkipTest if not implemented, and verify actions by default do nothing. These methods, by default, call the corresponding *_actual_data() passing in 'data_label', 'data_start' and 'data_size' as defined for each DataType in the dictionary below. """ super(TestHelper, self).__init__() self._expected_override_name = expected_override_name self.report = report # For building data access functions # name/fn pairs for each action self._data_fns = {self.FN_ADD: {}, self.FN_REMOVE: {}, self.FN_VERIFY: {}} # Pattern used to create the data functions. The first parameter # is the function type (FN_TYPE), the second is the DataType # or DT_ACTUAL. self.data_fn_pattern = '%s_%s_data' # Values to distinguish between the different DataTypes. If these # values don't work for a datastore, it will need to override # the auto-generated {FN_TYPE}_{DataType.name}_data method. self.DATA_START = 'start' self.DATA_SIZE = 'size' self._fn_data = { DataType.micro.name: { self.DATA_START: 100, self.DATA_SIZE: 10}, DataType.micro2.name: { self.DATA_START: 200, self.DATA_SIZE: 10}, DataType.micro3.name: { self.DATA_START: 300, self.DATA_SIZE: 10}, DataType.micro4.name: { self.DATA_START: 400, self.DATA_SIZE: 10}, DataType.tiny.name: { self.DATA_START: 1000, self.DATA_SIZE: 100}, DataType.tiny2.name: { self.DATA_START: 2000, self.DATA_SIZE: 100}, DataType.tiny3.name: { self.DATA_START: 3000, self.DATA_SIZE: 100}, DataType.tiny4.name: { self.DATA_START: 4000, self.DATA_SIZE: 100}, DataType.small.name: { self.DATA_START: 10000, self.DATA_SIZE: 1000}, DataType.large.name: { self.DATA_START: 100000, self.DATA_SIZE: 100000}, } self._build_data_fns() ################# # Utility methods ################# def get_class_name(self): """Builds a string of the expected class name, plus the actual one being used if it's not the same. """ class_name_str = "'%s'" % self._expected_override_name if self._expected_override_name != self.__class__.__name__: class_name_str += ' (using %s)' % self.__class__.__name__ return class_name_str ################ # Client related ################ def get_client(self, host, *args, **kwargs): """Gets the datastore client. This isn't cached as the database may be restarted in between calls, causing lost connection errors. """ return self.create_client(host, *args, **kwargs) def create_client(self, host, *args, **kwargs): """Create a datastore client. This is datastore specific, so this method should be overridden if datastore access is desired. """ raise SkipTest('No client defined') def get_helper_credentials(self): """Return the credentials that the client will be using to access the database. """ return {'name': None, 'password': None, 'database': None} def ping(self, host, *args, **kwargs): """Try to connect to a given host and perform a simple read-only action. Return True on success or False otherwise. """ pass ############## # Root related ############## def get_helper_credentials_root(self): """Return the credentials that the client will be using to access the database as root. """ return {'name': None, 'password': None} ############## # Data related ############## def add_data(self, data_type, host, *args, **kwargs): """Adds data of type 'data_type' to the database. Descendant classes should implement a function 'add_actual_data' that has the following signature: def add_actual_data( self, # standard self reference data_label, # label used to identify the 'type' to add data_start, # a start count data_size, # a size to use host, # the host to add the data to *args, # for possible future expansion **kwargs # for possible future expansion ): The data_label could be used to create a database or a table if the datastore supports that. The data_start and data_size values are designed not to overlap, such that all the data could be stored in a single namespace (for example, creating ids from data_start to data_start + data_size). Since this method may be called multiple times, the 'add_actual_data' function should be idempotent. """ self._perform_data_action(self.FN_ADD, data_type.name, host, *args, **kwargs) def remove_data(self, data_type, host, *args, **kwargs): """Removes all data associated with 'data_type'. See instructions for 'add_data' for implementation guidance. """ self._perform_data_action(self.FN_REMOVE, data_type.name, host, *args, **kwargs) def verify_data(self, data_type, host, *args, **kwargs): """Verify that the data of type 'data_type' exists in the datastore. This can be done by testing edge cases, and possibly some random elements within the set. See instructions for 'add_data' for implementation guidance. """ self._perform_data_action(self.FN_VERIFY, data_type.name, host, *args, **kwargs) def _perform_data_action(self, fn_type, fn_name, host, *args, **kwargs): """By default, the action is attempted 10 times, sleeping for 3 seconds between each attempt. This can be controlled by the retry_count and retry_sleep kwarg values. """ retry_count = kwargs.pop('retry_count', 10) or 0 retry_sleep = kwargs.pop('retry_sleep', 3) or 0 fns = self._data_fns[fn_type] data_fn_name = self.data_fn_pattern % (fn_type, fn_name) attempts = -1 while True: attempts += 1 try: fns[data_fn_name](self, host, *args, **kwargs) break except SkipTest: raise except Exception as ex: self.report.log("Attempt %d to %s data type %s failed\n%s" % (attempts, fn_type, fn_name, ex)) if attempts > retry_count: raise RuntimeError("Error calling %s from class %s - %s" % (data_fn_name, self.__class__.__name__, ex)) self.report.log("Trying again (after %d second sleep)" % retry_sleep) sleep(retry_sleep) def _build_data_fns(self): """Build the base data functions specified by FN_TYPE_* for each of the types defined in the DataType class. For example, 'add_small_data' and 'verify_large_data'. These functions are set to call '*_actual_data' and will pass in sane values for label, start and size. The '*_actual_data' methods should be overwritten by a descendant class, and are the ones that do the actual work. The original 'add_small_data', etc. methods can also be overridden if needed, and those overwritten functions will be bound before calling any data functions such as 'add_data' or 'remove_data'. """ for fn_type in self.FN_TYPES: fn_dict = self._data_fns[fn_type] for data_type in DataType: self._data_fn_builder(fn_type, data_type.name, fn_dict) self._data_fn_builder(fn_type, self.DT_ACTUAL, fn_dict) self._override_data_fns() def _data_fn_builder(self, fn_type, fn_name, fn_dict): """Builds the actual function with a SkipTest exception, and changes the name to reflect the pattern. """ data_fn_name = self.data_fn_pattern % (fn_type, fn_name) # Build the overridable 'actual' Data Manipulation methods if fn_name == self.DT_ACTUAL: def data_fn(self, data_label, data_start, data_size, host, *args, **kwargs): # default action is to skip the test cls_str = '' if self._expected_override_name != self.__class__.__name__: cls_str = (' (%s not loaded)' % self._expected_override_name) raise SkipTest("Data function '%s' not found in '%s'%s" % ( data_fn_name, self.__class__.__name__, cls_str)) else: def data_fn(self, host, *args, **kwargs): # call the corresponding 'actual' method fns = self._data_fns[fn_type] var_dict = self._fn_data[fn_name] data_start = var_dict[self.DATA_START] data_size = var_dict[self.DATA_SIZE] actual_fn_name = self.data_fn_pattern % ( fn_type, self.DT_ACTUAL) try: fns[actual_fn_name](self, fn_name, data_start, data_size, host, *args, **kwargs) except SkipTest: raise except Exception as ex: raise RuntimeError("Error calling %s from class %s: %s" % ( data_fn_name, self.__class__.__name__, ex)) data_fn.__name__ = data_fn.func_name = data_fn_name fn_dict[data_fn_name] = data_fn def _override_data_fns(self): """Bind the override methods to the dict.""" members = inspect.getmembers(self.__class__, predicate=inspect.ismethod) for fn_type in self.FN_TYPES: fns = self._data_fns[fn_type] for name, fn in members: if name in fns: fns[name] = fn ####################### # Database/User related ####################### def get_valid_database_definitions(self): """Return a list of valid database JSON definitions. These definitions will be used by tests that create databases. Return an empty list if the datastore does not support databases. """ return list() def get_valid_user_definitions(self): """Return a list of valid user JSON definitions. These definitions will be used by tests that create users. Return an empty list if the datastore does not support users. """ return list() def get_non_existing_database_definition(self): """Return a valid JSON definition for a non-existing database. This definition will be used by negative database tests. The database will not be created by any of the tests. Return None if the datastore does not support databases. """ valid_defs = self.get_valid_database_definitions() return self._get_non_existing_definition(valid_defs) def get_non_existing_user_definition(self): """Return a valid JSON definition for a non-existing user. This definition will be used by negative user tests. The user will not be created by any of the tests. Return None if the datastore does not support users. """ valid_defs = self.get_valid_user_definitions() return self._get_non_existing_definition(valid_defs) def _get_non_existing_definition(self, existing_defs): """This will create a unique definition for a non-existing object by randomizing one of an existing object. """ if existing_defs: non_existing_def = dict(existing_defs[0]) while non_existing_def in existing_defs: non_existing_def = self._randomize_on_name(non_existing_def) return non_existing_def return None def _randomize_on_name(self, definition): def_copy = dict(definition) def_copy['name'] = ''.join([def_copy['name'], 'rnd']) return def_copy ############################# # Configuration Group related ############################# def get_dynamic_group(self): """Return a definition of a dynamic configuration group. A dynamic group should contain only properties that do not require database restart. Return an empty dict if the datastore does not have any. """ return dict() def get_non_dynamic_group(self): """Return a definition of a non-dynamic configuration group. A non-dynamic group has to include at least one property that requires database restart. Return an empty dict if the datastore does not have any. """ return dict() def get_invalid_groups(self): """Return a list of configuration groups with invalid values. An empty list indicates that no 'invalid' tests should be run. """ return [] def get_configuration_value(self, property_name, host, *args, **kwargs): """Use the client to retrieve the value of a given configuration property. """ raise SkipTest("Runtime configuration retrieval not implemented in %s" % self.get_class_name()) ################### # Guest Log related ################### def get_exposed_log_list(self): """Return the list of exposed logs for the datastore. This method shouldn't need to be overridden. """ logs = [] try: logs.extend(self.get_exposed_user_log_names()) except SkipTest: pass try: logs.extend(self.get_exposed_sys_log_names()) except SkipTest: pass return logs def get_full_log_list(self): """Return the full list of all logs for the datastore. This method shouldn't need to be overridden. """ logs = self.get_exposed_log_list() try: logs.extend(self.get_unexposed_user_log_names()) except SkipTest: pass try: logs.extend(self.get_unexposed_sys_log_names()) except SkipTest: pass return logs # Override these guest log methods if needed def get_exposed_user_log_names(self): """Return the names of the user logs that are visible to all users. The first log name will be used for tests. """ raise SkipTest("No exposed user log names defined.") def get_unexposed_user_log_names(self): """Return the names of the user logs that not visible to all users. The first log name will be used for tests. """ raise SkipTest("No unexposed user log names defined.") def get_exposed_sys_log_names(self): """Return the names of SYS logs that are visible to all users. The first log name will be used for tests. """ raise SkipTest("No exposed sys log names defined.") def get_unexposed_sys_log_names(self): """Return the names of the sys logs that not visible to all users. The first log name will be used for tests. """ return ['guest'] def log_enable_requires_restart(self): """Returns whether enabling or disabling a USER log requires a restart of the datastore. """ return False ################ # Module related ################ def get_valid_module_type(self): """Return a valid module type.""" return "Ping" ################# # Cluster related ################# def get_cluster_types(self): """Returns a list of cluster type lists to use when creating instances. The list should be the same size as the number of cluster instances that will be created. If not specified, no types are sent to cluster-create. Cluster grow uses the first type in the list for the first instance, and doesn't use anything for the second instance (i.e. doesn't pass in anything for 'type'). An example for this method would be: return [['data', 'other_type'], ['third_type']] """ return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/helpers/vertica_helper.py0000644000175000017500000000411500000000000025264 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.helpers.sql_helper import SqlHelper class VerticaHelper(SqlHelper): def __init__(self, expected_override_name, report): super(VerticaHelper, self).__init__(expected_override_name, report, 'vertica') def get_helper_credentials(self): return {'name': 'lite', 'password': 'litepass', 'database': 'lite'} def get_valid_user_definitions(self): return [{'name': 'user1', 'password': 'password1', 'databases': []}, {'name': 'user2', 'password': 'password1', 'databases': [{'name': 'db1'}]}, {'name': 'user3', 'password': 'password1', 'databases': [{'name': 'db1'}, {'name': 'db2'}]}] def add_actual_data(self, *args, **kwargs): raise SkipTest("Adding data to Vertica is not implemented") def verify_actual_data(self, *args, **kwargs): raise SkipTest("Verifying data in Vertica is not implemented") def remove_actual_data(self, *args, **kwargs): raise SkipTest("Removing data from Vertica is not implemented") def get_dynamic_group(self): return {'ActivePartitionCount': 3} def get_non_dynamic_group(self): return {'BlockCacheSize': 1024} def get_invalid_groups(self): return [{'timezone': 997}, {"max_worker_processes": 'string_value'}, {"standard_conforming_strings": 'string_value'}] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.788111 trove-12.1.0.dev92/trove/tests/scenario/runners/0000755000175000017500000000000000000000000021747 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/__init__.py0000644000175000017500000000024600000000000024062 0ustar00coreycorey00000000000000BUG_EJECT_VALID_MASTER = 1622014 BUG_WRONG_API_VALIDATION = 1498573 BUG_STOP_DB_IN_CLUSTER = 1645096 BUG_UNAUTH_TEST_WRONG = 1653614 BUG_FORCE_DELETE_FAILS = 1656422 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/scenario/runners/backup_runners.py0000644000175000017500000004746700000000000025364 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from troveclient.compat import exceptions from trove.common.utils import generate_uuid from trove.common.utils import poll_until from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner class BackupRunner(TestRunner): def __init__(self): self.TIMEOUT_BACKUP_CREATE = 60 * 30 self.TIMEOUT_BACKUP_DELETE = 120 super(BackupRunner, self).__init__(timeout=self.TIMEOUT_BACKUP_CREATE) self.BACKUP_NAME = 'backup_test' self.BACKUP_DESC = 'test description' self.backup_host = None self.backup_info = None self.backup_count_prior_to_create = 0 self.backup_count_for_ds_prior_to_create = 0 self.backup_count_for_instance_prior_to_create = 0 self.databases_before_backup = None self.backup_inc_1_info = None self.backup_inc_2_info = None self.data_types_added = [] self.restore_instance_id = None self.restore_host = None self.restore_inc_1_instance_id = None self.restore_inc_1_host = None def run_backup_create_instance_invalid( self, expected_exception=exceptions.BadRequest, expected_http_code=400): invalid_inst_id = 'invalid-inst-id' client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.backups.create, self.BACKUP_NAME, invalid_inst_id, self.BACKUP_DESC) def run_backup_create_instance_not_found( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.backups.create, self.BACKUP_NAME, generate_uuid(), self.BACKUP_DESC) def run_add_data_for_backup(self): self.backup_host = self.get_instance_host() self.assert_add_data_for_backup(self.backup_host, DataType.large) def assert_add_data_for_backup(self, host, data_type): """In order for this to work, the corresponding datastore 'helper' class should implement the 'add_actual_data' method. """ self.test_helper.add_data(data_type, host) self.data_types_added.append(data_type) def run_verify_data_for_backup(self): self.assert_verify_backup_data(self.backup_host, DataType.large) def assert_verify_backup_data(self, host, data_type): """In order for this to work, the corresponding datastore 'helper' class should implement the 'verify_actual_data' method. """ self.test_helper.verify_data(data_type, host) def run_save_backup_counts(self): # Necessary to test that the count increases. self.backup_count_prior_to_create = len( self.auth_client.backups.list()) self.backup_count_for_ds_prior_to_create = len( self.auth_client.backups.list( datastore=self.instance_info.dbaas_datastore)) self.backup_count_for_instance_prior_to_create = len( self.auth_client.instances.backups(self.instance_info.id)) def run_backup_create(self): if self.test_helper.get_valid_database_definitions(): self.databases_before_backup = self._get_databases( self.instance_info.id) self.backup_info = self.assert_backup_create( self.BACKUP_NAME, self.BACKUP_DESC, self.instance_info.id) def _get_databases(self, instance_id): return [database.name for database in self.auth_client.databases.list(instance_id)] def assert_backup_create(self, name, desc, instance_id, parent_id=None, incremental=False): client = self.auth_client datastore_version = client.datastore_versions.get( self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version) if incremental: result = client.backups.create( name, instance_id, desc, incremental=incremental) else: result = client.backups.create( name, instance_id, desc, parent_id=parent_id) self.assert_equal(name, result.name, 'Unexpected backup name') self.assert_equal(desc, result.description, 'Unexpected backup description') self.assert_equal(instance_id, result.instance_id, 'Unexpected instance ID for backup') self.assert_equal('NEW', result.status, 'Unexpected status for backup') if parent_id: self.assert_equal(parent_id, result.parent_id, 'Unexpected status for backup') instance = client.instances.get(instance_id) self.assert_equal('BACKUP', instance.status, 'Unexpected instance status') self.assert_equal(self.instance_info.dbaas_datastore, result.datastore['type'], 'Unexpected datastore') self.assert_equal(self.instance_info.dbaas_datastore_version, result.datastore['version'], 'Unexpected datastore version') self.assert_equal(datastore_version.id, result.datastore['version_id'], 'Unexpected datastore version id') return result def run_restore_instance_from_not_completed_backup( self, expected_exception=exceptions.Conflict, expected_http_code=409): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, None, self._restore_from_backup, client, self.backup_info.id) self.assert_client_code(client, expected_http_code) def run_instance_action_right_after_backup_create( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): client = self.auth_client self.assert_raises(expected_exception, expected_http_code, client, client.instances.resize_instance, self.instance_info.id, 1) def run_backup_create_another_backup_running( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): client = self.auth_client self.assert_raises(expected_exception, expected_http_code, client, client.backups.create, 'backup_test2', self.instance_info.id, 'test description2') def run_backup_delete_while_backup_running( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): client = self.auth_client result = client.backups.list() backup = result[0] self.assert_raises(expected_exception, expected_http_code, client, client.backups.delete, backup.id) def run_backup_create_completed(self): self._verify_backup(self.backup_info.id) def _verify_backup(self, backup_id): def _result_is_active(): backup = self.auth_client.backups.get(backup_id) if backup.status == 'COMPLETED': return True else: self.assert_not_equal('FAILED', backup.status, 'Backup status should not be') return False poll_until(_result_is_active, time_out=self.TIMEOUT_BACKUP_CREATE) def run_instance_goes_active(self, expected_states=['BACKUP', 'HEALTHY']): self._assert_instance_states(self.instance_info.id, expected_states) def run_backup_list(self): backup_list = self.auth_client.backups.list() self.assert_backup_list( backup_list, self.backup_count_prior_to_create + 1) def assert_backup_list(self, backup_list, expected_count): self.assert_equal(expected_count, len(backup_list), 'Unexpected number of backups found') if expected_count: backup = backup_list[0] self.assert_equal(self.BACKUP_NAME, backup.name, 'Unexpected backup name') self.assert_equal(self.BACKUP_DESC, backup.description, 'Unexpected backup description') self.assert_not_equal(0.0, backup.size, 'Unexpected backup size') self.assert_equal(self.instance_info.id, backup.instance_id, 'Unexpected instance id') self.assert_equal('COMPLETED', backup.status, 'Unexpected backup status') def run_backup_list_filter_datastore(self): backup_list = self.auth_client.backups.list( datastore=self.instance_info.dbaas_datastore) self.assert_backup_list( backup_list, self.backup_count_for_ds_prior_to_create + 1) def run_backup_list_filter_datastore_not_found( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.backups.list, datastore='NOT_FOUND') def run_backup_list_for_instance(self): backup_list = self.auth_client.instances.backups( self.instance_info.id) self.assert_backup_list( backup_list, self.backup_count_for_instance_prior_to_create + 1) def run_backup_get(self): backup = self.auth_client.backups.get(self.backup_info.id) self.assert_backup_list([backup], 1) self.assert_equal(self.instance_info.dbaas_datastore, backup.datastore['type'], 'Unexpected datastore type') self.assert_equal(self.instance_info.dbaas_datastore_version, backup.datastore['version'], 'Unexpected datastore version') datastore_version = self.auth_client.datastore_versions.get( self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version) self.assert_equal(datastore_version.id, backup.datastore['version_id']) def run_backup_get_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.unauth_client self.assert_raises( expected_exception, expected_http_code, client, client.backups.get, self.backup_info.id) def run_add_data_for_inc_backup_1(self): self.backup_host = self.get_instance_host() self.assert_add_data_for_backup(self.backup_host, DataType.tiny) def run_verify_data_for_inc_backup_1(self): self.assert_verify_backup_data(self.backup_host, DataType.tiny) def run_inc_backup_1(self): suffix = '_inc_1' self.backup_inc_1_info = self.assert_backup_create( self.BACKUP_NAME + suffix, self.BACKUP_DESC + suffix, self.instance_info.id, parent_id=self.backup_info.id) def run_wait_for_inc_backup_1(self): self._verify_backup(self.backup_inc_1_info.id) def run_add_data_for_inc_backup_2(self): self.backup_host = self.get_instance_host() self.assert_add_data_for_backup(self.backup_host, DataType.tiny2) def run_verify_data_for_inc_backup_2(self): self.assert_verify_backup_data(self.backup_host, DataType.tiny2) def run_inc_backup_2(self): suffix = '_inc_2' self.backup_inc_2_info = self.assert_backup_create( self.BACKUP_NAME + suffix, self.BACKUP_DESC + suffix, self.instance_info.id, parent_id=self.backup_inc_1_info.id, incremental=True) def run_wait_for_inc_backup_2(self): self._verify_backup(self.backup_inc_2_info.id) def run_restore_from_backup(self, expected_http_code=200, suffix=''): self.restore_instance_id = self.assert_restore_from_backup( self.backup_info.id, suffix=suffix, expected_http_code=expected_http_code) def assert_restore_from_backup(self, backup_ref, suffix='', expected_http_code=200): client = self.auth_client result = self._restore_from_backup(client, backup_ref, suffix=suffix) self.assert_client_code(client, expected_http_code) self.assert_equal('BUILD', result.status, 'Unexpected instance status') self.register_debug_inst_ids(result.id) return result.id def _restore_from_backup(self, client, backup_ref, suffix=''): restore_point = {'backupRef': backup_ref} result = client.instances.create( self.instance_info.name + '_restore' + suffix, self.instance_info.dbaas_flavor_href, self.instance_info.volume, nics=self.instance_info.nics, restorePoint=restore_point, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) return result def run_restore_from_inc_1_backup(self, expected_http_code=200): self.restore_inc_1_instance_id = self.assert_restore_from_backup( self.backup_inc_1_info.id, suffix='_inc_1', expected_http_code=expected_http_code) def run_restore_from_backup_completed( self, expected_states=['BUILD', 'HEALTHY']): self.assert_restore_from_backup_completed( self.restore_instance_id, expected_states) self.restore_host = self.get_instance_host(self.restore_instance_id) def assert_restore_from_backup_completed( self, instance_id, expected_states): self._assert_instance_states(instance_id, expected_states) def run_restore_from_inc_1_backup_completed( self, expected_states=['BUILD', 'HEALTHY']): self.assert_restore_from_backup_completed( self.restore_inc_1_instance_id, expected_states) self.restore_inc_1_host = self.get_instance_host( self.restore_inc_1_instance_id) def run_verify_data_in_restored_instance(self): self.assert_verify_backup_data(self.restore_host, DataType.large) def run_verify_databases_in_restored_instance(self): self.assert_verify_backup_databases(self.restore_instance_id, self.databases_before_backup) def run_verify_data_in_restored_inc_1_instance(self): self.assert_verify_backup_data(self.restore_inc_1_host, DataType.large) self.assert_verify_backup_data(self.restore_inc_1_host, DataType.tiny) def run_verify_databases_in_restored_inc_1_instance(self): self.assert_verify_backup_databases(self.restore_instance_id, self.databases_before_backup) def assert_verify_backup_databases(self, instance_id, expected_databases): if expected_databases is not None: actual = self._get_databases(instance_id) self.assert_list_elements_equal( expected_databases, actual, "Unexpected databases on the restored instance.") else: raise SkipTest("Datastore does not support databases.") def run_delete_restored_instance(self, expected_http_code=202): self.assert_delete_restored_instance( self.restore_instance_id, expected_http_code) def assert_delete_restored_instance( self, instance_id, expected_http_code): client = self.auth_client client.instances.delete(instance_id) self.assert_client_code(client, expected_http_code) def run_delete_restored_inc_1_instance(self, expected_http_code=202): self.assert_delete_restored_instance( self.restore_inc_1_instance_id, expected_http_code) def run_wait_for_restored_instance_delete(self, expected_state='SHUTDOWN'): self.assert_restored_instance_deleted( self.restore_instance_id, expected_state) self.restore_instance_id = None self.restore_host = None def assert_restored_instance_deleted(self, instance_id, expected_state): self.assert_all_gone(instance_id, expected_state) def run_wait_for_restored_inc_1_instance_delete( self, expected_state='SHUTDOWN'): self.assert_restored_instance_deleted( self.restore_inc_1_instance_id, expected_state) self.restore_inc_1_instance_id = None self.restore_inc_1_host = None def run_delete_unknown_backup( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.backups.delete, 'unknown_backup') def run_delete_backup_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.unauth_client self.assert_raises( expected_exception, expected_http_code, client, client.backups.delete, self.backup_info.id) def run_delete_inc_2_backup(self, expected_http_code=202): self.assert_delete_backup( self.backup_inc_2_info.id, expected_http_code) self.backup_inc_2_info = None def assert_delete_backup( self, backup_id, expected_http_code): client = self.auth_client client.backups.delete(backup_id) self.assert_client_code(client, expected_http_code) self._wait_until_backup_is_gone(client, backup_id) def _wait_until_backup_is_gone(self, client, backup_id): def _backup_is_gone(): try: client.backups.get(backup_id) return False except exceptions.NotFound: return True poll_until(_backup_is_gone, time_out=self.TIMEOUT_BACKUP_DELETE) def run_delete_backup(self, expected_http_code=202): self.assert_delete_backup(self.backup_info.id, expected_http_code) def run_check_for_incremental_backup( self, expected_exception=exceptions.NotFound, expected_http_code=404): if self.backup_inc_1_info is None: raise SkipTest("Incremental Backup not created") client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.backups.get, self.backup_inc_1_info.id) self.backup_inc_1_info = None def run_remove_backup_data_from_instance(self): for data_type in self.data_types_added: self.test_helper.remove_data(data_type, self.backup_host) self.data_types_added = [] def run_check_has_incremental(self): self.assert_incremental_exists(self.backup_info.id) def assert_incremental_exists(self, parent_id): def _backup_with_parent_found(): backup_list = self.auth_client.backups.list() for bkup in backup_list: if bkup.parent_id == parent_id: return True return False poll_until(_backup_with_parent_found, time_out=30) class RedisBackupRunner(BackupRunner): def run_check_has_incremental(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/cluster_runners.py0000644000175000017500000010443600000000000025566 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from proboscis import SkipTest import six import time as timer from trove.common import exception from trove.common.utils import poll_until from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner from trove.tests.util.check import TypeCheck from troveclient.compat import exceptions class ClusterRunner(TestRunner): USE_CLUSTER_ID_FLAG = 'TESTS_USE_CLUSTER_ID' DO_NOT_DELETE_CLUSTER_FLAG = 'TESTS_DO_NOT_DELETE_CLUSTER' EXTRA_INSTANCE_NAME = "named_instance" def __init__(self): super(ClusterRunner, self).__init__() self.cluster_name = 'test_cluster' self.cluster_id = 0 self.cluster_inst_ids = None self.cluster_count_before_create = None self.srv_grp_id = None self.current_root_creds = None self.locality = 'affinity' self.initial_instance_count = None self.cluster_instances = None self.cluster_removed_instances = None self.active_config_group_id = None self.config_requires_restart = False self.initial_group_id = None self.dynamic_group_id = None self.non_dynamic_group_id = None @property def is_using_existing_cluster(self): return self.has_env_flag(self.USE_CLUSTER_ID_FLAG) @property def has_do_not_delete_cluster(self): return self.has_env_flag(self.DO_NOT_DELETE_CLUSTER_FLAG) @property def min_cluster_node_count(self): return 2 def run_initial_configuration_create(self, expected_http_code=200): group_id, requires_restart = self.create_initial_configuration( expected_http_code) if group_id: self.initial_group_id = group_id self.config_requires_restart = requires_restart else: raise SkipTest("No groups defined.") def run_cluster_create(self, num_nodes=None, expected_task_name='BUILDING', expected_http_code=200): self.cluster_count_before_create = len( self.auth_client.clusters.list()) if not num_nodes: num_nodes = self.min_cluster_node_count instance_flavor = self.get_instance_flavor() instance_defs = [ self.build_flavor( flavor_id=self.get_flavor_href(instance_flavor), volume_size=self.instance_info.volume['size']) for count in range(0, num_nodes)] types = self.test_helper.get_cluster_types() for index, instance_def in enumerate(instance_defs): instance_def['nics'] = self.instance_info.nics if types and index < len(types): instance_def['type'] = types[index] self.cluster_id = self.assert_cluster_create( self.cluster_name, instance_defs, self.locality, self.initial_group_id, expected_task_name, expected_http_code) def assert_cluster_create( self, cluster_name, instances_def, locality, configuration, expected_task_name, expected_http_code): self.report.log("Testing cluster create: %s" % cluster_name) client = self.auth_client cluster = self.get_existing_cluster() if cluster: self.report.log("Using an existing cluster: %s" % cluster.id) else: cluster = client.clusters.create( cluster_name, self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version, instances=instances_def, locality=locality, configuration=configuration) self.assert_client_code(client, expected_http_code) self.active_config_group_id = configuration self._assert_cluster_values(cluster, expected_task_name) for instance in cluster.instances: self.register_debug_inst_ids(instance['id']) return cluster.id def run_cluster_create_wait(self, expected_instance_states=['BUILD', 'HEALTHY']): self.assert_cluster_create_wait( self.cluster_id, expected_instance_states=expected_instance_states) def assert_cluster_create_wait( self, cluster_id, expected_instance_states): client = self.auth_client cluster_instances = self._get_cluster_instances(client, cluster_id) self.assert_all_instance_states( cluster_instances, expected_instance_states) # Create the helper user/database on the first node. # The cluster should handle the replication itself. if not self.get_existing_cluster(): self.create_test_helper_on_instance(cluster_instances[0]) # Although all instances have already acquired the expected state, # we still need to poll for the final cluster task, because # it may take up to the periodic task interval until the task name # gets updated in the Trove database. self._assert_cluster_states(client, cluster_id, ['NONE']) # make sure the server_group was created self.cluster_inst_ids = [inst.id for inst in cluster_instances] for id in self.cluster_inst_ids: srv_grp_id = self.assert_server_group_exists(id) if self.srv_grp_id and self.srv_grp_id != srv_grp_id: self.fail("Found multiple server groups for cluster") self.srv_grp_id = srv_grp_id def get_existing_cluster(self): if self.is_using_existing_cluster: cluster_id = os.environ.get(self.USE_CLUSTER_ID_FLAG) return self.auth_client.clusters.get(cluster_id) def run_cluster_list(self, expected_http_code=200): self.assert_cluster_list( self.cluster_count_before_create + 1, expected_http_code) def assert_cluster_list(self, expected_count, expected_http_code): client = self.auth_client count = len(client.clusters.list()) self.assert_client_code(client, expected_http_code) self.assert_equal(expected_count, count, "Unexpected cluster count") def run_cluster_show(self, expected_http_code=200, expected_task_name='NONE'): self.assert_cluster_show( self.cluster_id, expected_task_name, expected_http_code) def run_cluster_restart(self, expected_http_code=202, expected_task_name='RESTARTING_CLUSTER'): self.assert_cluster_restart( self.cluster_id, expected_task_name, expected_http_code) def assert_cluster_restart( self, cluster_id, expected_task_name, expected_http_code): client = self.auth_client client.clusters.restart(cluster_id) self.assert_client_code(client, expected_http_code) self._assert_cluster_response( client, cluster_id, expected_task_name) def run_cluster_restart_wait(self): self.assert_cluster_restart_wait(self.cluster_id) def assert_cluster_restart_wait(self, cluster_id): client = self.auth_client cluster_instances = self._get_cluster_instances( client, cluster_id) self.assert_all_instance_states( cluster_instances, ['REBOOT', 'HEALTHY']) self._assert_cluster_states( client, cluster_id, ['NONE']) self._assert_cluster_response( client, cluster_id, 'NONE') def assert_cluster_show(self, cluster_id, expected_task_name, expected_http_code): self._assert_cluster_response(self.auth_client, cluster_id, expected_task_name) def run_cluster_root_enable(self, expected_task_name=None, expected_http_code=200): root_credentials = self.test_helper.get_helper_credentials_root() if not root_credentials or not root_credentials.get('name'): raise SkipTest("No root credentials provided.") client = self.auth_client self.current_root_creds = client.root.create_cluster_root( self.cluster_id, root_credentials['password']) self.assert_client_code(client, expected_http_code) self._assert_cluster_response( client, self.cluster_id, expected_task_name) self.assert_equal(root_credentials['name'], self.current_root_creds[0]) self.assert_equal(root_credentials['password'], self.current_root_creds[1]) def run_verify_cluster_root_enable(self): if not self.current_root_creds: raise SkipTest("Root not enabled.") cluster = self.auth_client.clusters.get(self.cluster_id) for instance in cluster.instances: root_enabled_test = self.auth_client.root.is_instance_root_enabled( instance['id']) self.assert_true(root_enabled_test.rootEnabled) for ipv4 in self.extract_ipv4s(cluster.ip): self.report.log("Pinging cluster as superuser via node: %s" % ipv4) ping_response = self.test_helper.ping( ipv4, username=self.current_root_creds[0], password=self.current_root_creds[1]) self.assert_true(ping_response) def run_add_initial_cluster_data(self, data_type=DataType.tiny): self.assert_add_cluster_data(data_type, self.cluster_id) def assert_add_cluster_data(self, data_type, cluster_id): cluster = self.auth_client.clusters.get(cluster_id) self.test_helper.add_data(data_type, self.extract_ipv4s(cluster.ip)[0]) def run_verify_initial_cluster_data(self, data_type=DataType.tiny): self.assert_verify_cluster_data(data_type, self.cluster_id) def assert_verify_cluster_data(self, data_type, cluster_id): cluster = self.auth_client.clusters.get(cluster_id) for ipv4 in self.extract_ipv4s(cluster.ip): self.report.log("Verifying cluster data via node: %s" % ipv4) self.test_helper.verify_data(data_type, ipv4) def run_remove_initial_cluster_data(self, data_type=DataType.tiny): self.assert_remove_cluster_data(data_type, self.cluster_id) def assert_remove_cluster_data(self, data_type, cluster_id): cluster = self.auth_client.clusters.get(cluster_id) self.test_helper.remove_data( data_type, self.extract_ipv4s(cluster.ip)[0]) def run_cluster_grow(self, expected_task_name='GROWING_CLUSTER', expected_http_code=202): # Add two instances. One with an explicit name. flavor_href = self.get_flavor_href(self.get_instance_flavor()) added_instance_defs = [ self._build_instance_def(flavor_href, self.instance_info.volume['size']), self._build_instance_def(flavor_href, self.instance_info.volume['size'], self.EXTRA_INSTANCE_NAME)] types = self.test_helper.get_cluster_types() if types and types[0]: added_instance_defs[0]['type'] = types[0] self.assert_cluster_grow( self.cluster_id, added_instance_defs, expected_task_name, expected_http_code) def _build_instance_def(self, flavor_id, volume_size, name=None): instance_def = self.build_flavor( flavor_id=flavor_id, volume_size=volume_size) if name: instance_def.update({'name': name}) instance_def.update({'nics': self.instance_info.nics}) return instance_def def assert_cluster_grow(self, cluster_id, added_instance_defs, expected_task_name, expected_http_code): client = self.auth_client cluster = client.clusters.get(cluster_id) initial_instance_count = len(cluster.instances) cluster = client.clusters.grow(cluster_id, added_instance_defs) self.assert_client_code(client, expected_http_code) self._assert_cluster_response(client, cluster_id, expected_task_name) self.assert_equal(len(added_instance_defs), len(cluster.instances) - initial_instance_count, "Unexpected number of added nodes.") def run_cluster_grow_wait(self): self.assert_cluster_grow_wait(self.cluster_id) def assert_cluster_grow_wait(self, cluster_id): client = self.auth_client cluster_instances = self._get_cluster_instances(client, cluster_id) self.assert_all_instance_states(cluster_instances, ['HEALTHY']) self._assert_cluster_states(client, cluster_id, ['NONE']) self._assert_cluster_response(client, cluster_id, 'NONE') def run_add_grow_cluster_data(self, data_type=DataType.tiny2): self.assert_add_cluster_data(data_type, self.cluster_id) def run_verify_grow_cluster_data(self, data_type=DataType.tiny2): self.assert_verify_cluster_data(data_type, self.cluster_id) def run_remove_grow_cluster_data(self, data_type=DataType.tiny2): self.assert_remove_cluster_data(data_type, self.cluster_id) def run_cluster_upgrade(self, expected_task_name='UPGRADING_CLUSTER', expected_http_code=202): self.assert_cluster_upgrade(self.cluster_id, expected_task_name, expected_http_code) def assert_cluster_upgrade(self, cluster_id, expected_task_name, expected_http_code): client = self.auth_client cluster = client.clusters.get(cluster_id) self.initial_instance_count = len(cluster.instances) client.clusters.upgrade( cluster_id, self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) self._assert_cluster_response(client, cluster_id, expected_task_name) def run_cluster_upgrade_wait(self): self.assert_cluster_upgrade_wait( self.cluster_id, expected_last_instance_states=['HEALTHY'] ) def assert_cluster_upgrade_wait(self, cluster_id, expected_last_instance_states): client = self.auth_client self._assert_cluster_states(client, cluster_id, ['NONE']) cluster_instances = self._get_cluster_instances(client, cluster_id) self.assert_equal( self.initial_instance_count, len(cluster_instances), "Unexpected number of instances after upgrade.") self.assert_all_instance_states(cluster_instances, expected_last_instance_states) self._assert_cluster_response(client, cluster_id, 'NONE') def run_add_upgrade_cluster_data(self, data_type=DataType.tiny3): self.assert_add_cluster_data(data_type, self.cluster_id) def run_verify_upgrade_cluster_data(self, data_type=DataType.tiny3): self.assert_verify_cluster_data(data_type, self.cluster_id) def run_remove_upgrade_cluster_data(self, data_type=DataType.tiny3): self.assert_remove_cluster_data(data_type, self.cluster_id) def run_cluster_shrink(self, expected_task_name='SHRINKING_CLUSTER', expected_http_code=202): self.assert_cluster_shrink(self.auth_client, self.cluster_id, [self.EXTRA_INSTANCE_NAME], expected_task_name, expected_http_code) def assert_cluster_shrink(self, client, cluster_id, removed_instance_names, expected_task_name, expected_http_code): cluster = client.clusters.get(cluster_id) self.initial_instance_count = len(cluster.instances) self.cluster_removed_instances = ( self._find_cluster_instances_by_name( cluster, removed_instance_names)) client.clusters.shrink( cluster_id, [{'id': instance.id} for instance in self.cluster_removed_instances]) self.assert_client_code(client, expected_http_code) self._assert_cluster_response(client, cluster_id, expected_task_name) def _find_cluster_instances_by_name(self, cluster, instance_names): return [self.auth_client.instances.get(instance['id']) for instance in cluster.instances if instance['name'] in instance_names] def run_cluster_shrink_wait(self): self.assert_cluster_shrink_wait( self.cluster_id, expected_last_instance_state='SHUTDOWN') def assert_cluster_shrink_wait(self, cluster_id, expected_last_instance_state): client = self.auth_client self._assert_cluster_states(client, cluster_id, ['NONE']) cluster = client.clusters.get(cluster_id) self.assert_equal( len(self.cluster_removed_instances), self.initial_instance_count - len(cluster.instances), "Unexpected number of removed nodes.") cluster_instances = self._get_cluster_instances(client, cluster_id) self.assert_all_instance_states(cluster_instances, ['HEALTHY']) self.assert_all_gone(self.cluster_removed_instances, expected_last_instance_state) self._assert_cluster_response(client, cluster_id, 'NONE') def run_add_shrink_cluster_data(self, data_type=DataType.tiny4): self.assert_add_cluster_data(data_type, self.cluster_id) def run_verify_shrink_cluster_data(self, data_type=DataType.tiny4): self.assert_verify_cluster_data(data_type, self.cluster_id) def run_remove_shrink_cluster_data(self, data_type=DataType.tiny4): self.assert_remove_cluster_data(data_type, self.cluster_id) def run_cluster_delete( self, expected_task_name='DELETING', expected_http_code=202): if self.has_do_not_delete_cluster: self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was " "specified, skipping delete...") raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.") self.assert_cluster_delete( self.cluster_id, expected_http_code) def assert_cluster_delete(self, cluster_id, expected_http_code): self.report.log("Testing cluster delete: %s" % cluster_id) client = self.auth_client self.cluster_instances = self._get_cluster_instances(client, cluster_id) client.clusters.delete(cluster_id) self.assert_client_code(client, expected_http_code) def _get_cluster_instances(self, client, cluster_id): cluster = client.clusters.get(cluster_id) return [client.instances.get(instance['id']) for instance in cluster.instances] def run_cluster_delete_wait( self, expected_task_name='DELETING', expected_last_instance_state='SHUTDOWN'): if self.has_do_not_delete_cluster: self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was " "specified, skipping delete wait...") raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.") self.assert_cluster_delete_wait( self.cluster_id, expected_task_name, expected_last_instance_state) def assert_cluster_delete_wait( self, cluster_id, expected_task_name, expected_last_instance_state): client = self.auth_client # Since the server_group is removed right at the beginning of the # cluster delete process we can't check for locality anymore. self._assert_cluster_response(client, cluster_id, expected_task_name, check_locality=False) self.assert_all_gone(self.cluster_instances, expected_last_instance_state) self._assert_cluster_gone(client, cluster_id) # make sure the server group is gone too self.assert_server_group_gone(self.srv_grp_id) def _assert_cluster_states(self, client, cluster_id, expected_states, fast_fail_status=None): for status in expected_states: start_time = timer.time() try: poll_until( lambda: self._has_task( client, cluster_id, status, fast_fail_status=fast_fail_status), sleep_time=self.def_sleep_time, time_out=self.def_timeout) self.report.log("Cluster has gone '%s' in %s." % (status, self._time_since(start_time))) except exception.PollTimeOut: self.report.log( "Status of cluster '%s' did not change to '%s' after %s." % (cluster_id, status, self._time_since(start_time))) return False return True def _has_task(self, client, cluster_id, task, fast_fail_status=None): cluster = client.clusters.get(cluster_id) task_name = cluster.task['name'] self.report.log("Waiting for cluster '%s' to become '%s': %s" % (cluster_id, task, task_name)) if fast_fail_status and task_name == fast_fail_status: raise RuntimeError("Cluster '%s' acquired a fast-fail task: %s" % (cluster_id, task)) return task_name == task def _assert_cluster_response(self, client, cluster_id, expected_task_name, check_locality=True): cluster = client.clusters.get(cluster_id) self._assert_cluster_values(cluster, expected_task_name, check_locality=check_locality) def _assert_cluster_values(self, cluster, expected_task_name, check_locality=True): with TypeCheck('Cluster', cluster) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("datastore", dict) check.has_field("instances", list) check.has_field("links", list) check.has_field("created", six.text_type) check.has_field("updated", six.text_type) if check_locality: check.has_field("locality", six.text_type) if self.active_config_group_id: check.has_field("configuration", six.text_type) for instance in cluster.instances: isinstance(instance, dict) self.assert_is_not_none(instance['id']) self.assert_is_not_none(instance['links']) self.assert_is_not_none(instance['name']) self.assert_equal(expected_task_name, cluster.task['name'], 'Unexpected cluster task name') if check_locality: self.assert_equal(self.locality, cluster.locality, "Unexpected cluster locality") def _assert_cluster_gone(self, client, cluster_id): t0 = timer.time() try: # This will poll until the cluster goes away. self._assert_cluster_states(client, cluster_id, ['NONE']) self.fail( "Cluster '%s' still existed after %s seconds." % (cluster_id, self._time_since(t0))) except exceptions.NotFound: self.assert_client_code(client, 404) def restart_after_configuration_change(self): if self.config_requires_restart: self.run_cluster_restart() self.run_cluster_restart_wait() self.config_requires_restart = False else: raise SkipTest("Not required.") def run_create_dynamic_configuration(self, expected_http_code=200): values = self.test_helper.get_dynamic_group() if values: self.dynamic_group_id = self.assert_create_group( 'dynamic_cluster_test_group', 'a fully dynamic group should not require restart', values, expected_http_code) elif values is None: raise SkipTest("No dynamic group defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no dynamic configuration values.") def assert_create_group(self, name, description, values, expected_http_code): json_def = json.dumps(values) client = self.auth_client result = client.configurations.create( name, json_def, description, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) return result.id def run_create_non_dynamic_configuration(self, expected_http_code=200): values = self.test_helper.get_non_dynamic_group() if values: self.non_dynamic_group_id = self.assert_create_group( 'non_dynamic_cluster_test_group', 'a group containing non-dynamic properties should always ' 'require restart', values, expected_http_code) elif values is None: raise SkipTest("No non-dynamic group defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no non-dynamic configuration " "values.") def run_attach_dynamic_configuration( self, expected_states=['NONE'], expected_http_code=202): if self.dynamic_group_id: self.assert_attach_configuration( self.cluster_id, self.dynamic_group_id, expected_states, expected_http_code) def assert_attach_configuration( self, cluster_id, group_id, expected_states, expected_http_code, restart_inst=False): client = self.auth_client client.clusters.configuration_attach(cluster_id, group_id) self.assert_client_code(client, expected_http_code) self.active_config_group_id = group_id self._assert_cluster_states(client, cluster_id, expected_states) self.assert_configuration_group(client, cluster_id, group_id) if restart_inst: self.config_requires_restart = True cluster_instances = self._get_cluster_instances(cluster_id) for node in cluster_instances: self.assert_equal( 'RESTART_REQUIRED', node.status, "Node '%s' should be in 'RESTART_REQUIRED' state." % node.id) def assert_configuration_group( self, client, cluster_id, expected_group_id): cluster = client.clusters.get(cluster_id) self.assert_equal( expected_group_id, cluster.configuration, "Attached group does not have the expected ID.") cluster_instances = self._get_cluster_instances(client, cluster_id) for node in cluster_instances: self.assert_equal( expected_group_id, cluster.configuration, "Attached group does not have the expected ID on " "cluster node: %s" % node.id) def run_attach_non_dynamic_configuration( self, expected_states=['NONE'], expected_http_code=202): if self.non_dynamic_group_id: self.assert_attach_configuration( self.cluster_id, self.non_dynamic_group_id, expected_states, expected_http_code, restart_inst=True) def run_verify_initial_configuration(self): if self.initial_group_id: self.verify_configuration(self.cluster_id, self.initial_group_id) def verify_configuration(self, cluster_id, expected_group_id): self.assert_configuration_group(cluster_id, expected_group_id) self.assert_configuration_values(cluster_id, expected_group_id) def assert_configuration_values(self, cluster_id, group_id): if group_id == self.initial_group_id: if not self.config_requires_restart: expected_configs = self.test_helper.get_dynamic_group() else: expected_configs = self.test_helper.get_non_dynamic_group() if group_id == self.dynamic_group_id: expected_configs = self.test_helper.get_dynamic_group() elif group_id == self.non_dynamic_group_id: expected_configs = self.test_helper.get_non_dynamic_group() self._assert_configuration_values(cluster_id, expected_configs) def _assert_configuration_values(self, cluster_id, expected_configs): cluster_instances = self._get_cluster_instances(cluster_id) for node in cluster_instances: host = self.get_instance_host(node) self.report.log( "Verifying cluster configuration via node: %s" % host) for name, value in expected_configs.items(): actual = self.test_helper.get_configuration_value(name, host) self.assert_equal(str(value), str(actual), "Unexpected value of property '%s'" % name) def run_verify_dynamic_configuration(self): if self.dynamic_group_id: self.verify_configuration(self.cluster_id, self.dynamic_group_id) def run_verify_non_dynamic_configuration(self): if self.non_dynamic_group_id: self.verify_configuration( self.cluster_id, self.non_dynamic_group_id) def run_detach_initial_configuration(self, expected_states=['NONE'], expected_http_code=202): if self.initial_group_id: self.assert_detach_configuration( self.cluster_id, expected_states, expected_http_code, restart_inst=self.config_requires_restart) def run_detach_dynamic_configuration(self, expected_states=['NONE'], expected_http_code=202): if self.dynamic_group_id: self.assert_detach_configuration( self.cluster_id, expected_states, expected_http_code) def assert_detach_configuration( self, cluster_id, expected_states, expected_http_code, restart_inst=False): client = self.auth_client client.clusters.configuration_detach(cluster_id) self.assert_client_code(client, expected_http_code) self.active_config_group_id = None self._assert_cluster_states(client, cluster_id, expected_states) cluster = client.clusters.get(cluster_id) self.assert_false( hasattr(cluster, 'configuration'), "Configuration group was not detached from the cluster.") cluster_instances = self._get_cluster_instances(client, cluster_id) for node in cluster_instances: self.assert_false( hasattr(node, 'configuration'), "Configuration group was not detached from cluster node: %s" % node.id) if restart_inst: self.config_requires_restart = True cluster_instances = self._get_cluster_instances(client, cluster_id) for node in cluster_instances: self.assert_equal( 'RESTART_REQUIRED', node.status, "Node '%s' should be in 'RESTART_REQUIRED' state." % node.id) def run_detach_non_dynamic_configuration( self, expected_states=['NONE'], expected_http_code=202): if self.non_dynamic_group_id: self.assert_detach_configuration( self.cluster_id, expected_states, expected_http_code, restart_inst=True) def run_delete_initial_configuration(self, expected_http_code=202): if self.initial_group_id: self.assert_group_delete(self.initial_group_id, expected_http_code) def assert_group_delete(self, group_id, expected_http_code): client = self.auth_client client.configurations.delete(group_id) self.assert_client_code(client, expected_http_code) def run_delete_dynamic_configuration(self, expected_http_code=202): if self.dynamic_group_id: self.assert_group_delete(self.dynamic_group_id, expected_http_code) def run_delete_non_dynamic_configuration(self, expected_http_code=202): if self.non_dynamic_group_id: self.assert_group_delete(self.non_dynamic_group_id, expected_http_code) class CassandraClusterRunner(ClusterRunner): def run_cluster_root_enable(self): raise SkipTest("Operation is currently not supported.") class MariadbClusterRunner(ClusterRunner): @property def min_cluster_node_count(self): return self.get_datastore_config_property('min_cluster_member_count') class MongodbClusterRunner(ClusterRunner): @property def min_cluster_node_count(self): return 3 def run_cluster_delete(self, expected_task_name='NONE', expected_http_code=202): raise SkipKnownBug(runners.BUG_STOP_DB_IN_CLUSTER) class PxcClusterRunner(ClusterRunner): @property def min_cluster_node_count(self): return self.get_datastore_config_property('min_cluster_member_count') class RedisClusterRunner(ClusterRunner): # Since Redis runs all the shrink code in the API server, the call # will not return until the task name has been set back to 'NONE' so # we can't check it. def run_cluster_shrink(self, expected_task_name='NONE', expected_http_code=202): return super(RedisClusterRunner, self).run_cluster_shrink( expected_task_name=expected_task_name, expected_http_code=expected_http_code) class VerticaClusterRunner(ClusterRunner): @property def min_cluster_node_count(self): return self.get_datastore_config_property('cluster_member_count') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/configuration_runners.py0000644000175000017500000006153100000000000026752 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import json from proboscis import SkipTest import six from trove.common.utils import generate_uuid from trove.tests.scenario.runners.test_runners import TestRunner from trove.tests.util.check import CollectionCheck from trove.tests.util.check import TypeCheck from troveclient.compat import exceptions class ConfigurationRunner(TestRunner): def __init__(self): super(ConfigurationRunner, self).__init__(sleep_time=10) self.dynamic_group_name = 'dynamic_test_group' self.dynamic_group_id = None self.dynamic_inst_count = 0 self.non_dynamic_group_name = 'non_dynamic_test_group' self.non_dynamic_group_id = None self.non_dynamic_inst_count = 0 self.initial_group_count = 0 self.additional_group_count = 0 self.config_id_for_inst = None self.config_inst_id = None def run_create_bad_group(self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): bad_group = {'unknown_datastore_key': 'bad_value'} self.assert_action_on_conf_group_failure( bad_group, expected_exception, expected_http_code) def assert_action_on_conf_group_failure( self, group_values, expected_exception, expected_http_code): json_def = json.dumps(group_values) client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.configurations.create, 'conf_group', json_def, 'Group with Bad or Invalid entries', datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) def run_create_invalid_groups( self, expected_exception=exceptions.UnprocessableEntity, expected_http_code=422): invalid_groups = self.test_helper.get_invalid_groups() if invalid_groups: for invalid_group in invalid_groups: self.assert_action_on_conf_group_failure( invalid_group, expected_exception, expected_http_code) elif invalid_groups is None: raise SkipTest("No invalid configuration values defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no invalid configuration values.") def run_delete_non_existent_group( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_group_delete_failure( None, expected_exception, expected_http_code) def run_delete_bad_group_id( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_group_delete_failure( generate_uuid(), expected_exception, expected_http_code) def run_attach_non_existent_group( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_instance_modify_failure( self.instance_info.id, generate_uuid(), expected_exception, expected_http_code) def run_attach_non_existent_group_to_non_existent_inst( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_instance_modify_failure( generate_uuid(), generate_uuid(), expected_exception, expected_http_code) def run_detach_group_with_none_attached(self, expected_states=['HEALTHY'], expected_http_code=202): self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code) # run again, just to make sure self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code) def run_create_dynamic_group(self, expected_http_code=200): self.initial_group_count = len(self.auth_client.configurations.list()) values = self.test_helper.get_dynamic_group() if values: self.dynamic_group_id = self.assert_create_group( self.dynamic_group_name, 'a fully dynamic group should not require restart', values, expected_http_code) self.additional_group_count += 1 elif values is None: raise SkipTest("No dynamic group defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no dynamic configuration values.") def assert_create_group(self, name, description, values, expected_http_code): json_def = json.dumps(values) client = self.auth_client result = client.configurations.create( name, json_def, description, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) with TypeCheck('Configuration', result) as configuration: configuration.has_field('name', six.string_types) configuration.has_field('description', six.string_types) configuration.has_field('values', dict) configuration.has_field('datastore_name', six.string_types) configuration.has_field('datastore_version_id', six.text_type) configuration.has_field('datastore_version_name', six.string_types) self.assert_equal(name, result.name) self.assert_equal(description, result.description) self.assert_equal(values, result.values) return result.id def run_create_non_dynamic_group(self, expected_http_code=200): values = self.test_helper.get_non_dynamic_group() if values: self.non_dynamic_group_id = self.assert_create_group( self.non_dynamic_group_name, 'a group containing non-dynamic properties should always ' 'require restart', values, expected_http_code) self.additional_group_count += 1 elif values is None: raise SkipTest("No non-dynamic group defined in %s." % self.test_helper.get_class_name()) else: raise SkipTest("Datastore has no non-dynamic configuration " "values.") def run_attach_dynamic_group_to_non_existent_inst( self, expected_exception=exceptions.NotFound, expected_http_code=404): if self.dynamic_group_id: self.assert_instance_modify_failure( generate_uuid(), self.dynamic_group_id, expected_exception, expected_http_code) def run_attach_non_dynamic_group_to_non_existent_inst( self, expected_exception=exceptions.NotFound, expected_http_code=404): if self.non_dynamic_group_id: self.assert_instance_modify_failure( generate_uuid(), self.non_dynamic_group_id, expected_exception, expected_http_code) def run_list_configuration_groups(self): configuration_list = self.auth_client.configurations.list() self.assert_configuration_list( configuration_list, self.initial_group_count + self.additional_group_count) def assert_configuration_list(self, configuration_list, expected_count): self.assert_equal(expected_count, len(configuration_list), 'Unexpected number of configurations found') if expected_count: configuration_names = [conf.name for conf in configuration_list] if self.dynamic_group_id: self.assert_true( self.dynamic_group_name in configuration_names) if self.non_dynamic_group_id: self.assert_true( self.non_dynamic_group_name in configuration_names) def run_dynamic_configuration_show(self): if self.dynamic_group_id: self.assert_configuration_show(self.dynamic_group_id, self.dynamic_group_name) else: raise SkipTest("No dynamic group created.") def assert_configuration_show(self, config_id, config_name): result = self.auth_client.configurations.get(config_id) self.assert_equal(config_id, result.id, "Unexpected config id") self.assert_equal(config_name, result.name, "Unexpected config name") # check the result field types with TypeCheck("configuration", result) as check: check.has_field("id", six.string_types) check.has_field("name", six.string_types) check.has_field("description", six.string_types) check.has_field("values", dict) check.has_field("created", six.string_types) check.has_field("updated", six.string_types) check.has_field("instance_count", int) # check for valid timestamps self.assert_true(self._is_valid_timestamp(result.created), 'Created timestamp %s is invalid' % result.created) self.assert_true(self._is_valid_timestamp(result.updated), 'Updated timestamp %s is invalid' % result.updated) with CollectionCheck("configuration_values", result.values) as check: # check each item has the correct type according to the rules for (item_key, item_val) in result.values.items(): print("item_key: %s" % item_key) print("item_val: %s" % item_val) param = ( self.auth_client.configuration_parameters.get_parameter( self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version, item_key)) if param.type == 'integer': check.has_element(item_key, int) if param.type == 'string': check.has_element(item_key, six.string_types) if param.type == 'boolean': check.has_element(item_key, bool) def _is_valid_timestamp(self, time_string): try: datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S") except ValueError: return False return True def run_non_dynamic_configuration_show(self): if self.non_dynamic_group_id: self.assert_configuration_show(self.non_dynamic_group_id, self.non_dynamic_group_name) else: raise SkipTest("No non-dynamic group created.") def run_dynamic_conf_get_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_conf_get_unauthorized_user(self.dynamic_group_id, expected_exception, expected_http_code) def assert_conf_get_unauthorized_user( self, config_id, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.unauth_client self.assert_raises( expected_exception, expected_http_code, client, client.configurations.get, config_id) def run_non_dynamic_conf_get_unauthorized_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_conf_get_unauthorized_user(self.dynamic_group_id, expected_exception, expected_http_code) def run_list_dynamic_inst_conf_groups_before(self): if self.dynamic_group_id: self.dynamic_inst_count = len( self.auth_client.configurations.instances( self.dynamic_group_id)) def assert_conf_instance_list(self, group_id, expected_count): conf_instance_list = self.auth_client.configurations.instances( group_id) self.assert_equal(expected_count, len(conf_instance_list), 'Unexpected number of configurations found') if expected_count: conf_instance_ids = [inst.id for inst in conf_instance_list] self.assert_true( self.instance_info.id in conf_instance_ids) def run_attach_dynamic_group( self, expected_states=['HEALTHY'], expected_http_code=202): if self.dynamic_group_id: self.assert_instance_modify( self.instance_info.id, self.dynamic_group_id, expected_states, expected_http_code) def run_verify_dynamic_values(self): if self.dynamic_group_id: self.assert_configuration_values(self.instance_info.id, self.dynamic_group_id) def assert_configuration_values(self, instance_id, group_id): if group_id == self.dynamic_group_id: expected_configs = self.test_helper.get_dynamic_group() elif group_id == self.non_dynamic_group_id: expected_configs = self.test_helper.get_non_dynamic_group() self._assert_configuration_values(instance_id, expected_configs) def _assert_configuration_values(self, instance_id, expected_configs): host = self.get_instance_host(instance_id) for name, value in expected_configs.items(): actual = self.test_helper.get_configuration_value(name, host) # Compare floating point numbers as floats to avoid rounding # and precision issues. try: expected_value = float(value) actual_value = float(actual) except ValueError: expected_value = str(value) actual_value = str(actual) self.assert_equal(expected_value, actual_value, "Unexpected value of property '%s'" % name) def run_list_dynamic_inst_conf_groups_after(self): if self.dynamic_group_id: self.assert_conf_instance_list(self.dynamic_group_id, self.dynamic_inst_count + 1) def run_attach_dynamic_group_again( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # The exception here should probably be UnprocessableEntity or # something else other than BadRequest as the request really is # valid. if self.dynamic_group_id: self.assert_instance_modify_failure( self.instance_info.id, self.dynamic_group_id, expected_exception, expected_http_code) def run_delete_attached_dynamic_group( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # The exception here should probably be UnprocessableEntity or # something else other than BadRequest as the request really is # valid. if self.dynamic_group_id: self.assert_group_delete_failure( self.dynamic_group_id, expected_exception, expected_http_code) def run_update_dynamic_group(self, expected_states=['HEALTHY'], expected_http_code=202): if self.dynamic_group_id: values = json.dumps(self.test_helper.get_dynamic_group()) self.assert_update_group( self.instance_info.id, self.dynamic_group_id, values, expected_states, expected_http_code) def assert_update_group( self, instance_id, group_id, values, expected_states, expected_http_code, restart_inst=False): client = self.auth_client client.configurations.update(group_id, values) self.assert_client_code(client, expected_http_code) self.assert_instance_action(instance_id, expected_states) if restart_inst: self._restart_instance(instance_id) def run_detach_dynamic_group( self, expected_states=['HEALTHY'], expected_http_code=202): if self.dynamic_group_id: self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code) def run_list_non_dynamic_inst_conf_groups_before(self): if self.non_dynamic_group_id: self.non_dynamic_inst_count = len( self.auth_client.configurations.instances( self.non_dynamic_group_id)) def run_attach_non_dynamic_group( self, expected_states=['RESTART_REQUIRED'], expected_http_code=202): if self.non_dynamic_group_id: self.assert_instance_modify( self.instance_info.id, self.non_dynamic_group_id, expected_states, expected_http_code, restart_inst=True) def run_verify_non_dynamic_values(self): if self.non_dynamic_group_id: self.assert_configuration_values(self.instance_info.id, self.non_dynamic_group_id) def run_list_non_dynamic_inst_conf_groups_after(self): if self.non_dynamic_group_id: self.assert_conf_instance_list(self.non_dynamic_group_id, self.non_dynamic_inst_count + 1) def run_attach_non_dynamic_group_again( self, expected_exception=exceptions.BadRequest, expected_http_code=400): if self.non_dynamic_group_id: self.assert_instance_modify_failure( self.instance_info.id, self.non_dynamic_group_id, expected_exception, expected_http_code) def run_delete_attached_non_dynamic_group( self, expected_exception=exceptions.BadRequest, expected_http_code=400): if self.non_dynamic_group_id: self.assert_group_delete_failure( self.non_dynamic_group_id, expected_exception, expected_http_code) def run_update_non_dynamic_group( self, expected_states=['RESTART_REQUIRED'], expected_http_code=202): if self.non_dynamic_group_id: values = json.dumps(self.test_helper.get_non_dynamic_group()) self.assert_update_group( self.instance_info.id, self.non_dynamic_group_id, values, expected_states, expected_http_code, restart_inst=True) def run_detach_non_dynamic_group( self, expected_states=['RESTART_REQUIRED'], expected_http_code=202): if self.non_dynamic_group_id: self.assert_instance_modify( self.instance_info.id, None, expected_states, expected_http_code, restart_inst=True) def assert_instance_modify( self, instance_id, group_id, expected_states, expected_http_code, restart_inst=False): client = self.auth_client client.instances.modify(instance_id, configuration=group_id) self.assert_client_code(client, expected_http_code) self.assert_instance_action(instance_id, expected_states) # Verify the group has been attached. instance = self.get_instance(instance_id) if group_id: group = self.auth_client.configurations.get(group_id) self.assert_equal( group.id, instance.configuration['id'], "Attached group does not have the expected ID") self.assert_equal( group.name, instance.configuration['name'], "Attached group does not have the expected name") else: self.assert_false( hasattr(instance, 'configuration'), "The configuration group was not detached from the instance.") if restart_inst: self._restart_instance(instance_id) def assert_instance_modify_failure( self, instance_id, group_id, expected_exception, expected_http_code): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.instances.modify, instance_id, configuration=group_id) def run_delete_dynamic_group(self, expected_http_code=202): if self.dynamic_group_id: self.assert_group_delete(self.dynamic_group_id, expected_http_code) def assert_group_delete(self, group_id, expected_http_code): client = self.auth_client client.configurations.delete(group_id) self.assert_client_code(client, expected_http_code) def run_delete_non_dynamic_group(self, expected_http_code=202): if self.non_dynamic_group_id: self.assert_group_delete(self.non_dynamic_group_id, expected_http_code) def assert_group_delete_failure(self, group_id, expected_exception, expected_http_code): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.configurations.delete, group_id) def _restart_instance( self, instance_id, expected_states=['REBOOT', 'HEALTHY'], expected_http_code=202): client = self.auth_client client.instances.restart(instance_id) self.assert_client_code(client, expected_http_code) self.assert_instance_action(instance_id, expected_states) def run_create_instance_with_conf(self): self.config_id_for_inst = ( self.dynamic_group_id or self.non_dynamic_group_id) if self.config_id_for_inst: self.config_inst_id = self.assert_create_instance_with_conf( self.config_id_for_inst) else: raise SkipTest("No groups (dynamic or non-dynamic) defined in %s." % self.test_helper.get_class_name()) def assert_create_instance_with_conf(self, config_id): # test that a new instance will apply the configuration on create client = self.auth_client result = client.instances.create( self.instance_info.name + "_config", self.instance_info.dbaas_flavor_href, self.instance_info.volume, [], [], datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, availability_zone="nova", configuration=config_id) self.assert_client_code(client, 200) self.assert_equal("BUILD", result.status, 'Unexpected inst status') self.register_debug_inst_ids(result.id) return result.id def run_wait_for_conf_instance( self, expected_states=['BUILD', 'HEALTHY']): if self.config_inst_id: self.assert_instance_action(self.config_inst_id, expected_states) self.create_test_helper_on_instance(self.config_inst_id) inst = self.auth_client.instances.get(self.config_inst_id) self.assert_equal(self.config_id_for_inst, inst.configuration['id']) else: raise SkipTest("No instance created with a configuration group.") def run_verify_instance_values(self): if self.config_id_for_inst: self.assert_configuration_values(self.config_inst_id, self.config_id_for_inst) else: raise SkipTest("No instance created with a configuration group.") def run_delete_conf_instance(self, expected_http_code=202): if self.config_inst_id: self.assert_delete_conf_instance( self.config_inst_id, expected_http_code) else: raise SkipTest("No instance created with a configuration group.") def assert_delete_conf_instance(self, instance_id, expected_http_code): client = self.auth_client client.instances.delete(instance_id) self.assert_client_code(client, expected_http_code) def run_wait_for_delete_conf_instance( self, expected_last_state=['SHUTDOWN']): if self.config_inst_id: self.assert_all_gone(self.config_inst_id, expected_last_state) else: raise SkipTest("No instance created with a configuration group.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/database_actions_runners.py0000644000175000017500000002250000000000000027360 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.common import exception from trove.common.utils import poll_until from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class DatabaseActionsRunner(TestRunner): def __init__(self): super(DatabaseActionsRunner, self).__init__() self.db_defs = [] @property def first_db_def(self): if self.db_defs: return self.db_defs[0] raise SkipTest("No valid database definitions provided.") @property def non_existing_db_def(self): db_def = self.test_helper.get_non_existing_database_definition() if db_def: return db_def raise SkipTest("No valid database definitions provided.") def run_databases_create(self, expected_http_code=202): databases = self.test_helper.get_valid_database_definitions() if databases: self.db_defs = self.assert_databases_create( self.instance_info.id, databases, expected_http_code) else: raise SkipTest("No valid database definitions provided.") def assert_databases_create(self, instance_id, serial_databases_def, expected_http_code): client = self.auth_client client.databases.create(instance_id, serial_databases_def) self.assert_client_code(client, expected_http_code) self.wait_for_database_create(client, instance_id, serial_databases_def) return serial_databases_def def run_databases_list(self, expected_http_code=200): self.assert_databases_list( self.instance_info.id, self.db_defs, expected_http_code) def assert_databases_list(self, instance_id, expected_database_defs, expected_http_code, limit=2): client = self.auth_client full_list = client.databases.list(instance_id) self.assert_client_code(client, expected_http_code) listed_databases = {database.name: database for database in full_list} self.assert_is_none(full_list.next, "Unexpected pagination in the list.") for database_def in expected_database_defs: database_name = database_def['name'] self.assert_true( database_name in listed_databases, "Database not included in the 'database-list' output: %s" % database_name) # Check that the system (ignored) databases are not included in the # output. system_databases = self.get_system_databases() self.assert_false( any(name in listed_databases for name in system_databases), "System databases should not be included in the 'database-list' " "output.") # Test list pagination. list_page = client.databases.list(instance_id, limit=limit) self.assert_client_code(client, expected_http_code) self.assert_true(len(list_page) <= limit) if len(full_list) > limit: self.assert_is_not_none(list_page.next, "List page is missing.") else: self.assert_is_none(list_page.next, "An extra page in the list.") marker = list_page.next self.assert_pagination_match(list_page, full_list, 0, limit) if marker: last_database = list_page[-1] expected_marker = last_database.name self.assert_equal(expected_marker, marker, "Pagination marker should be the last element " "in the page.") list_page = client.databases.list( instance_id, marker=marker) self.assert_client_code(client, expected_http_code) self.assert_pagination_match( list_page, full_list, limit, len(full_list)) def run_database_create_with_no_attributes( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_databases_create_failure( self.instance_info.id, {}, expected_exception, expected_http_code) def run_database_create_with_blank_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_databases_create_failure( self.instance_info.id, {'name': ''}, expected_exception, expected_http_code) def run_existing_database_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_databases_create_failure( self.instance_info.id, self.first_db_def, expected_exception, expected_http_code) def assert_databases_create_failure( self, instance_id, serial_databases_def, expected_exception, expected_http_code): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.databases.create, instance_id, serial_databases_def) def run_system_database_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_databases = self.get_system_databases() database_defs = [{'name': name} for name in system_databases] if system_databases: self.assert_databases_create_failure( self.instance_info.id, database_defs, expected_exception, expected_http_code) def run_database_delete(self, expected_http_code=202): for database_def in self.db_defs: self.assert_database_delete( self.instance_info.id, database_def['name'], expected_http_code) def assert_database_delete( self, instance_id, database_name, expected_http_code): client = self.auth_client client.databases.delete(instance_id, database_name) self.assert_client_code(client, expected_http_code) self._wait_for_database_delete(client, instance_id, database_name) def _wait_for_database_delete(self, client, instance_id, deleted_database_name): self.report.log("Waiting for deleted database to disappear from the " "listing: %s" % deleted_database_name) def _db_is_gone(): all_dbs = self.get_db_names(client, instance_id) return deleted_database_name not in all_dbs try: poll_until(_db_is_gone, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC) self.report.log("Database is now gone from the instance.") except exception.PollTimeOut: self.fail("Database still listed after the poll timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC) def run_nonexisting_database_delete( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_database_delete_failure( self.instance_info.id, self.non_existing_db_def['name'], expected_exception, expected_http_code) def run_system_database_delete( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_databases = self.get_system_databases() if system_databases: for name in system_databases: self.assert_database_delete_failure( self.instance_info.id, name, expected_exception, expected_http_code) def assert_database_delete_failure( self, instance_id, database_name, expected_exception, expected_http_code): client = self.auth_client self.assert_raises(expected_exception, expected_http_code, client, client.databases.delete, instance_id, database_name) def get_system_databases(self): return self.get_datastore_config_property('ignore_dbs') class PostgresqlDatabaseActionsRunner(DatabaseActionsRunner): def run_system_database_create(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) def run_system_database_delete(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/guest_log_runners.py0000644000175000017500000010530700000000000026073 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from swiftclient.client import ClientException import tempfile from troveclient.compat import exceptions from trove.common import cfg from trove.guestagent.common import operating_system from trove.guestagent import guest_log from trove.tests.config import CONFIG from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner CONF = cfg.CONF class GuestLogRunner(TestRunner): def __init__(self): super(GuestLogRunner, self).__init__() self.container = CONF.guest_log_container_name self.prefix_pattern = '%(instance_id)s/%(datastore)s-%(log)s/' self.stopped_log_details = None self._last_log_published = {} self._last_log_contents = {} def _get_last_log_published(self, log_name): return self._last_log_published.get(log_name, None) def _set_last_log_published(self, log_name, published): self._last_log_published[log_name] = published def _get_last_log_contents(self, log_name): return self._last_log_contents.get(log_name, []) def _set_last_log_contents(self, log_name, published): self._last_log_contents[log_name] = published def _get_exposed_user_log_names(self): """Returns the full list of exposed user logs.""" return self.test_helper.get_exposed_user_log_names() def _get_exposed_user_log_name(self): """Return the first exposed user log name.""" return self.test_helper.get_exposed_user_log_names()[0] def _get_unexposed_sys_log_name(self): """Return the first unexposed sys log name.""" return self.test_helper.get_unexposed_sys_log_names()[0] def run_test_log_list(self): self.assert_log_list(self.auth_client, self.test_helper.get_exposed_log_list()) def assert_log_list(self, client, expected_list): log_list = list(client.instances.log_list(self.instance_info.id)) log_names = list(ll.name for ll in log_list) self.assert_list_elements_equal(expected_list, log_names) self.register_debug_inst_ids(self.instance_info.id) def run_test_admin_log_list(self): self.assert_log_list(self.admin_client, self.test_helper.get_full_log_list()) def run_test_log_show(self): log_pending = self._set_zero_or_none() log_name = self._get_exposed_user_log_name() self.assert_log_show(self.auth_client, log_name, expected_published=0, expected_pending=log_pending) def _set_zero_or_none(self): """This attempts to handle the case where an existing instance is used. Values that would normally be '0' are not, and must be ignored. """ value = 0 if self.is_using_existing_instance: value = None return value def assert_log_show(self, client, log_name, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None, is_admin=False): self.report.log("Executing log_show for log '%s'" % log_name) log_details = client.instances.log_show( self.instance_info.id, log_name) self.assert_client_code(client, expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending, is_admin=is_admin) def assert_log_details(self, log_details, expected_log_name, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None, is_admin=False): """Check that the action generates the proper response data. For log_published and log_pending, setting the value to 'None' will skip that check (useful when using an existing instance, as there may be pending things in user logs right from the get-go) and setting it to a value other than '0' will verify that the actual value is '>=value' (since it's impossible to know what the actual value will be at any given time). '0' will still match exclusively. """ self.report.log("Validating log details for log '%s'" % expected_log_name) self._set_last_log_published(expected_log_name, log_details.published) self.assert_equal(expected_log_name, log_details.name, "Wrong log name for '%s' log" % expected_log_name) self.assert_equal(expected_type, log_details.type, "Wrong log type for '%s' log" % expected_log_name) current_status = log_details.status.replace(' ', '_') if not isinstance(expected_status, list): expected_status = [expected_status] self.assert_is_sublist([current_status], expected_status, "Wrong log status for '%s' log" % expected_log_name) if expected_published is None: pass elif expected_published == 0: self.assert_equal(0, log_details.published, "Wrong log published for '%s' log" % expected_log_name) else: self.assert_true(log_details.published >= expected_published, "Missing log published for '%s' log: " "expected %d, got %d" % (expected_log_name, expected_published, log_details.published)) if expected_pending is None: pass elif expected_pending == 0: self.assert_equal(0, log_details.pending, "Wrong log pending for '%s' log" % expected_log_name) else: self.assert_true(log_details.pending >= expected_pending, "Missing log pending for '%s' log: " "expected %d, got %d" % (expected_log_name, expected_pending, log_details.pending)) container = self.container prefix = self.prefix_pattern % { 'instance_id': self.instance_info.id, 'datastore': CONFIG.dbaas_datastore, 'log': expected_log_name} metafile = prefix.rstrip('/') + '_metafile' if expected_published == 0: self.assert_storage_gone(container, prefix, metafile, is_admin=is_admin) container = 'None' prefix = 'None' else: self.assert_storage_exists(container, prefix, metafile, is_admin=is_admin) self.assert_equal(container, log_details.container, "Wrong log container for '%s' log" % expected_log_name) self.assert_equal(prefix, log_details.prefix, "Wrong log prefix for '%s' log" % expected_log_name) self.assert_equal(metafile, log_details.metafile, "Wrong log metafile for '%s' log" % expected_log_name) def assert_log_enable(self, client, log_name, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_enable for log '%s'" % log_name) log_details = client.instances.log_action( self.instance_info.id, log_name, enable=True) self.assert_client_code(client, expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_log_disable(self, client, log_name, discard=None, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_disable for log '%s' (discard: %s)" % (log_name, discard)) log_details = client.instances.log_action( self.instance_info.id, log_name, disable=True, discard=discard) self.assert_client_code(client, expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_log_publish(self, client, log_name, disable=None, discard=None, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None, is_admin=False): self.report.log("Executing log_publish for log '%s' (disable: %s " "discard: %s)" % (log_name, disable, discard)) log_details = client.instances.log_action( self.instance_info.id, log_name, publish=True, disable=disable, discard=discard) self.assert_client_code(client, expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending, is_admin=is_admin) def assert_log_discard(self, client, log_name, expected_http_code=200, expected_type=guest_log.LogType.USER.name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=None, expected_pending=None): self.report.log("Executing log_discard for log '%s'" % log_name) log_details = client.instances.log_action( self.instance_info.id, log_name, discard=True) self.assert_client_code(client, expected_http_code) self.assert_log_details( log_details, log_name, expected_type=expected_type, expected_status=expected_status, expected_published=expected_published, expected_pending=expected_pending) def assert_storage_gone(self, container, prefix, metafile, is_admin=False): if is_admin: swift_client = self.admin_swift_client else: swift_client = self.swift_client try: headers, container_files = swift_client.get_container( container, prefix=prefix) self.assert_equal(0, len(container_files), "Found files in %s/%s: %s" % (container, prefix, container_files)) except ClientException as ex: if ex.http_status == 404: self.report.log("Container '%s' does not exist" % container) pass else: raise try: swift_client.get_object(container, metafile) self.fail("Found metafile after discard: %s" % metafile) except ClientException as ex: if ex.http_status == 404: self.report.log("Metafile '%s' gone as expected" % metafile) pass else: raise def assert_storage_exists(self, container, prefix, metafile, is_admin=False): if is_admin: swift_client = self.admin_swift_client else: swift_client = self.swift_client try: headers, container_files = swift_client.get_container( container, prefix=prefix) self.assert_true(len(container_files) > 0, "No files found in %s/%s" % (container, prefix)) except ClientException as ex: if ex.http_status == 404: self.fail("Container '%s' does not exist" % container) else: raise try: swift_client.get_object(container, metafile) except ClientException as ex: if ex.http_status == 404: self.fail("Missing metafile: %s" % metafile) else: raise def run_test_log_enable_sys(self, expected_exception=exceptions.BadRequest, expected_http_code=400): log_name = self._get_unexposed_sys_log_name() self.assert_log_enable_fails( self.admin_client, expected_exception, expected_http_code, log_name) def assert_log_enable_fails(self, client, expected_exception, expected_http_code, log_name): self.assert_raises(expected_exception, expected_http_code, client, client.instances.log_action, self.instance_info.id, log_name, enable=True) def run_test_log_disable_sys(self, expected_exception=exceptions.BadRequest, expected_http_code=400): log_name = self._get_unexposed_sys_log_name() self.assert_log_disable_fails( self.admin_client, expected_exception, expected_http_code, log_name) def assert_log_disable_fails(self, client, expected_exception, expected_http_code, log_name, discard=None): self.assert_raises(expected_exception, expected_http_code, client, client.instances.log_action, self.instance_info.id, log_name, disable=True, discard=discard) def run_test_log_show_unauth_user(self, expected_exception=exceptions.NotFound, expected_http_code=404): log_name = self._get_exposed_user_log_name() self.assert_log_show_fails( self.unauth_client, expected_exception, expected_http_code, log_name) def assert_log_show_fails(self, client, expected_exception, expected_http_code, log_name): self.assert_raises(expected_exception, expected_http_code, client, client.instances.log_show, self.instance_info.id, log_name) def run_test_log_list_unauth_user(self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.unauth_client self.assert_raises(expected_exception, expected_http_code, client, client.instances.log_list, self.instance_info.id) def run_test_log_generator_unauth_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): log_name = self._get_exposed_user_log_name() self.assert_log_generator_unauth_user( self.unauth_client, log_name, expected_exception, expected_http_code) def assert_log_generator_unauth_user(self, client, log_name, expected_exception, expected_http_code, publish=None): raise SkipKnownBug(runners.BUG_UNAUTH_TEST_WRONG) # self.assert_raises(expected_exception, expected_http_code, # client, client.instances.log_generator, # self.instance_info.id, log_name, publish=publish) def run_test_log_generator_publish_unauth_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): log_name = self._get_exposed_user_log_name() self.assert_log_generator_unauth_user( self.unauth_client, log_name, expected_exception, expected_http_code, publish=True) def run_test_log_show_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): log_name = self._get_unexposed_sys_log_name() self.assert_log_show_fails( self.auth_client, expected_exception, expected_http_code, log_name) def run_test_log_enable_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): log_name = self._get_unexposed_sys_log_name() self.assert_log_enable_fails( self.auth_client, expected_exception, expected_http_code, log_name) def run_test_log_disable_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): log_name = self._get_unexposed_sys_log_name() self.assert_log_disable_fails( self.auth_client, expected_exception, expected_http_code, log_name) def run_test_log_publish_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): log_name = self._get_unexposed_sys_log_name() self.assert_log_publish_fails( self.auth_client, expected_exception, expected_http_code, log_name) def assert_log_publish_fails(self, client, expected_exception, expected_http_code, log_name, disable=None, discard=None): self.assert_raises(expected_exception, expected_http_code, client, client.instances.log_action, self.instance_info.id, log_name, publish=True, disable=disable, discard=discard) def run_test_log_discard_unexposed_user( self, expected_exception=exceptions.BadRequest, expected_http_code=400): log_name = self._get_unexposed_sys_log_name() self.assert_log_discard_fails( self.auth_client, expected_exception, expected_http_code, log_name) def assert_log_discard_fails(self, client, expected_exception, expected_http_code, log_name): self.assert_raises(expected_exception, expected_http_code, client, client.instances.log_action, self.instance_info.id, log_name, discard=True) def run_test_log_enable_user(self): expected_status = guest_log.LogStatus.Ready.name expected_pending = 1 if self.test_helper.log_enable_requires_restart(): expected_status = guest_log.LogStatus.Restart_Required.name # if using an existing instance, there may already be something expected_pending = self._set_zero_or_none() for log_name in self._get_exposed_user_log_names(): self.assert_log_enable( self.auth_client, log_name, expected_status=expected_status, expected_published=0, expected_pending=expected_pending) def run_test_log_enable_flip_user(self): # for restart required datastores, test that flipping them # back to disabled returns the status to 'Disabled' # from 'Restart_Required' if self.test_helper.log_enable_requires_restart(): # if using an existing instance, there may already be something expected_pending = self._set_zero_or_none() for log_name in self._get_exposed_user_log_names(): self.assert_log_disable( self.auth_client, log_name, expected_status=guest_log.LogStatus.Disabled.name, expected_published=0, expected_pending=expected_pending) self.assert_log_enable( self.auth_client, log_name, expected_status=guest_log.LogStatus.Restart_Required.name, expected_published=0, expected_pending=expected_pending) def run_test_restart_datastore(self, expected_http_code=202): if self.test_helper.log_enable_requires_restart(): instance_id = self.instance_info.id # we need to wait until the heartbeat flips the instance # back into 'ACTIVE' before we issue the restart command expected_states = ['RESTART_REQUIRED', 'HEALTHY'] self.assert_instance_action(instance_id, expected_states) client = self.auth_client client.instances.restart(instance_id) self.assert_client_code(client, expected_http_code) def run_test_wait_for_restart(self, expected_states=['REBOOT', 'HEALTHY']): if self.test_helper.log_enable_requires_restart(): self.assert_instance_action(self.instance_info.id, expected_states) def run_test_log_publish_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_publish( self.auth_client, log_name, expected_status=[guest_log.LogStatus.Published.name, guest_log.LogStatus.Partial.name], expected_published=1, expected_pending=None) def run_test_add_data(self): self.test_helper.add_data(DataType.micro, self.get_instance_host()) def run_test_verify_data(self): self.test_helper.verify_data(DataType.micro, self.get_instance_host()) def run_test_log_publish_again_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_publish( self.auth_client, log_name, expected_status=[guest_log.LogStatus.Published.name, guest_log.LogStatus.Partial.name], expected_published=self._get_last_log_published(log_name), expected_pending=None) def run_test_log_generator_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_generator( self.auth_client, log_name, lines=2, expected_lines=2) def assert_log_generator(self, client, log_name, publish=False, lines=4, expected_lines=None, swift_client=None): self.report.log("Executing log_generator for log '%s' (publish: %s)" % (log_name, publish)) if publish: client.instances.log_action(self.instance_info.id, log_name, publish=True) log_gen = client.instances.log_generator( self.instance_info.id, log_name, lines=lines, swift=swift_client) log_contents = "".join([chunk for chunk in log_gen()]) self.report.log("Returned %d lines for log '%s': %s" % ( len(log_contents.splitlines()), log_name, log_contents)) self._set_last_log_contents(log_name, log_contents) if expected_lines: self.assert_equal(expected_lines, len(log_contents.splitlines()), "Wrong line count for '%s' log" % log_name) else: self.assert_true(len(log_contents.splitlines()) <= lines, "More than %d lines found for '%s' log" % (lines, log_name)) def run_test_log_generator_publish_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_generator( self.auth_client, log_name, publish=True, lines=3, expected_lines=3) def run_test_log_generator_swift_client_user(self): swift_client = self.swift_client for log_name in self._get_exposed_user_log_names(): self.assert_log_generator( self.auth_client, log_name, publish=True, lines=3, expected_lines=3, swift_client=swift_client) def run_test_add_data_again(self): # Add some more data so we have at least 3 log data files self.test_helper.add_data(DataType.micro2, self.get_instance_host()) def run_test_verify_data_again(self): self.test_helper.verify_data(DataType.micro2, self.get_instance_host()) def run_test_log_generator_user_by_row(self): log_name = self._get_exposed_user_log_name() self.assert_log_publish( self.auth_client, log_name, expected_status=[guest_log.LogStatus.Published.name, guest_log.LogStatus.Partial.name], expected_published=self._get_last_log_published(log_name), expected_pending=None) # Now get the full contents of the log self.assert_log_generator(self.auth_client, log_name, lines=100000) log_lines = len(self._get_last_log_contents(log_name).splitlines()) # cap at 100, so the test can't run away if something goes wrong log_lines = min(log_lines, 100) # Make sure we get the right number of log lines back each time for lines in range(1, log_lines): self.assert_log_generator( self.auth_client, log_name, lines=lines, expected_lines=lines) def run_test_log_save_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_test_log_save(self.auth_client, log_name) def run_test_log_save_publish_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_test_log_save(self.auth_client, log_name, publish=True) def assert_test_log_save(self, client, log_name, publish=False): # generate the file self.report.log("Executing log_save for log '%s' (publish: %s)" % (log_name, publish)) if publish: client.instances.log_action(self.instance_info.id, log_name=log_name, publish=True) with tempfile.NamedTemporaryFile() as temp_file: client.instances.log_save(self.instance_info.id, log_name=log_name, filename=temp_file.name) file_contents = operating_system.read_file(temp_file.name) # now grab the contents ourselves self.assert_log_generator(client, log_name, lines=100000) # and compare them self.assert_equal(self._get_last_log_contents(log_name), file_contents) def run_test_log_discard_user(self): for log_name in self._get_exposed_user_log_names(): self.assert_log_discard( self.auth_client, log_name, expected_status=guest_log.LogStatus.Ready.name, expected_published=0, expected_pending=1) def run_test_log_disable_user(self): expected_status = guest_log.LogStatus.Disabled.name if self.test_helper.log_enable_requires_restart(): expected_status = guest_log.LogStatus.Restart_Required.name for log_name in self._get_exposed_user_log_names(): self.assert_log_disable( self.auth_client, log_name, expected_status=expected_status, expected_published=0, expected_pending=1) def run_test_log_show_after_stop_details(self): log_name = self._get_exposed_user_log_name() self.stopped_log_details = self.auth_client.instances.log_show( self.instance_info.id, log_name) self.assert_is_not_none(self.stopped_log_details) def run_test_add_data_again_after_stop(self): # Add some more data to make sure logging has stopped self.test_helper.add_data(DataType.micro3, self.get_instance_host()) def run_test_verify_data_again_after_stop(self): self.test_helper.verify_data(DataType.micro3, self.get_instance_host()) def run_test_log_show_after_stop(self): log_name = self._get_exposed_user_log_name() self.assert_log_show( self.auth_client, log_name, expected_published=self.stopped_log_details.published, expected_pending=self.stopped_log_details.pending) def run_test_log_enable_user_after_stop(self): expected_status = guest_log.LogStatus.Ready.name expected_pending = 1 if self.test_helper.log_enable_requires_restart(): expected_status = guest_log.LogStatus.Restart_Required.name log_name = self._get_exposed_user_log_name() self.assert_log_enable( self.auth_client, log_name, expected_status=expected_status, expected_published=0, expected_pending=expected_pending) def run_test_add_data_again_after_stop_start(self): # Add some more data to make sure logging has started again self.test_helper.add_data(DataType.micro4, self.get_instance_host()) def run_test_verify_data_again_after_stop_start(self): self.test_helper.verify_data(DataType.micro4, self.get_instance_host()) def run_test_log_publish_after_stop_start(self): log_name = self._get_exposed_user_log_name() self.assert_log_publish( self.auth_client, log_name, expected_status=[guest_log.LogStatus.Published.name, guest_log.LogStatus.Partial.name], expected_published=self._get_last_log_published(log_name) + 1, expected_pending=None) def run_test_log_disable_user_after_stop_start(self): expected_status = guest_log.LogStatus.Disabled.name if self.test_helper.log_enable_requires_restart(): expected_status = guest_log.LogStatus.Restart_Required.name log_name = self._get_exposed_user_log_name() self.assert_log_disable( self.auth_client, log_name, discard=True, expected_status=expected_status, expected_published=0, expected_pending=1) def run_test_log_show_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_show( self.admin_client, log_name, expected_type=guest_log.LogType.SYS.name, expected_status=[guest_log.LogStatus.Ready.name, guest_log.LogStatus.Partial.name], expected_published=0, expected_pending=1, is_admin=True ) def run_test_log_publish_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_publish( self.admin_client, log_name, expected_type=guest_log.LogType.SYS.name, expected_status=guest_log.LogStatus.Partial.name, expected_published=1, expected_pending=1, is_admin=True) def run_test_log_publish_again_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_publish( self.admin_client, log_name, expected_type=guest_log.LogType.SYS.name, expected_status=guest_log.LogStatus.Partial.name, expected_published=self._get_last_log_published(log_name) + 1, expected_pending=1, is_admin=True) def run_test_log_generator_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_generator( self.admin_client, log_name, lines=4, expected_lines=4) def run_test_log_generator_publish_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_generator( self.admin_client, log_name, publish=True, lines=4, expected_lines=4) def run_test_log_generator_swift_client_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_generator( self.admin_client, log_name, publish=True, lines=4, expected_lines=4, swift_client=self.admin_swift_client) def run_test_log_save_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_test_log_save( self.admin_client, log_name) def run_test_log_save_publish_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_test_log_save( self.admin_client, log_name, publish=True) def run_test_log_discard_sys(self): log_name = self._get_unexposed_sys_log_name() self.assert_log_discard( self.admin_client, log_name, expected_type=guest_log.LogType.SYS.name, expected_status=guest_log.LogStatus.Ready.name, expected_published=0, expected_pending=1) class CassandraGuestLogRunner(GuestLogRunner): def run_test_log_show(self): log_name = self._get_exposed_user_log_name() self.assert_log_show(self.auth_client, log_name, expected_published=0, expected_pending=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/instance_actions_runners.py0000644000175000017500000001141600000000000027424 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.config import CONFIG from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner class InstanceActionsRunner(TestRunner): def __init__(self): super(InstanceActionsRunner, self).__init__() self.resize_flavor_id = self._get_resize_flavor().id def _get_resize_flavor(self): if self.EPHEMERAL_SUPPORT: flavor_name = CONFIG.values.get( 'instance_bigger_eph_flavor_name', 'eph.rd-smaller') else: flavor_name = CONFIG.values.get( 'instance_bigger_flavor_name', 'm1.rd-smaller') return self.get_flavor(flavor_name) def run_add_test_data(self): host = self.get_instance_host(self.instance_info.id) self.test_helper.add_data(DataType.small, host) def run_verify_test_data(self): host = self.get_instance_host(self.instance_info.id) self.test_helper.verify_data(DataType.small, host) def run_remove_test_data(self): host = self.get_instance_host(self.instance_info.id) self.test_helper.remove_data(DataType.small, host) def run_instance_restart( self, expected_states=['REBOOT', 'HEALTHY'], expected_http_code=202): self.assert_instance_restart(self.instance_info.id, expected_states, expected_http_code) def assert_instance_restart(self, instance_id, expected_states, expected_http_code): self.report.log("Testing restart on instance: %s" % instance_id) client = self.auth_client client.instances.restart(instance_id) self.assert_client_code(client, expected_http_code) self.assert_instance_action(instance_id, expected_states) def run_instance_resize_volume( self, resize_amount=1, expected_states=['RESIZE', 'HEALTHY'], expected_http_code=202): if self.VOLUME_SUPPORT: self.assert_instance_resize_volume( self.instance_info.id, resize_amount, expected_states, expected_http_code) else: raise SkipTest("Volume support is disabled.") def assert_instance_resize_volume(self, instance_id, resize_amount, expected_states, expected_http_code): self.report.log("Testing volume resize by '%d' on instance: %s" % (resize_amount, instance_id)) instance = self.get_instance(instance_id) old_volume_size = int(instance.volume['size']) new_volume_size = old_volume_size + resize_amount client = self.auth_client client.instances.resize_volume(instance_id, new_volume_size) self.assert_client_code(client, expected_http_code) self.assert_instance_action(instance_id, expected_states) instance = self.get_instance(instance_id) self.assert_equal(new_volume_size, instance.volume['size'], 'Unexpected new volume size') def run_instance_resize_flavor(self, expected_http_code=202): self.assert_instance_resize_flavor( self.instance_info.id, self.resize_flavor_id, expected_http_code) def assert_instance_resize_flavor(self, instance_id, resize_flavor_id, expected_http_code): self.report.log("Testing resize to '%s' on instance: %s" % (resize_flavor_id, instance_id)) client = self.auth_client client.instances.resize_instance(instance_id, resize_flavor_id) self.assert_client_code(client, expected_http_code) def run_wait_for_instance_resize_flavor( self, expected_states=['RESIZE', 'HEALTHY']): self.report.log("Waiting for resize to '%s' on instance: %s" % (self.resize_flavor_id, self.instance_info.id)) self._assert_instance_states(self.instance_info.id, expected_states) instance = self.get_instance(self.instance_info.id) self.assert_equal(self.resize_flavor_id, instance.flavor['id'], 'Unexpected resize flavor_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/instance_create_runners.py0000644000175000017500000003647600000000000027244 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.config import CONFIG from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import CheckInstance from trove.tests.scenario.runners.test_runners import InstanceTestInfo from trove.tests.scenario.runners.test_runners import TestRunner class InstanceCreateRunner(TestRunner): def __init__(self): super(InstanceCreateRunner, self).__init__() self.init_inst_info = None self.init_inst_dbs = None self.init_inst_users = None self.init_inst_host = None self.init_inst_data = None self.init_inst_config_group_id = None self.config_group_id = None def run_empty_instance_create(self, expected_states=['BUILD', 'HEALTHY'], expected_http_code=200): name = self.instance_info.name flavor = self.get_instance_flavor() volume_size = self.instance_info.volume_size instance_info = self.assert_instance_create( name, flavor, volume_size, [], [], None, None, CONFIG.dbaas_datastore, CONFIG.dbaas_datastore_version, expected_states, expected_http_code, create_helper_user=True, locality='affinity') # Update the shared instance info. self.instance_info.id = instance_info.id self.instance_info.name = instance_info.name self.instance_info.databases = instance_info.databases self.instance_info.users = instance_info.users self.instance_info.dbaas_datastore = instance_info.dbaas_datastore self.instance_info.dbaas_datastore_version = ( instance_info.dbaas_datastore_version) self.instance_info.dbaas_flavor_href = instance_info.dbaas_flavor_href self.instance_info.volume = instance_info.volume self.instance_info.helper_user = instance_info.helper_user self.instance_info.helper_database = instance_info.helper_database def run_initial_configuration_create(self, expected_http_code=200): group_id, _ = self.create_initial_configuration(expected_http_code) if group_id: self.config_group_id = group_id else: raise SkipTest("No groups defined.") def run_initialized_instance_create( self, with_dbs=True, with_users=True, configuration_id=None, expected_states=['BUILD', 'HEALTHY'], expected_http_code=200, create_helper_user=True, name_suffix='_init'): if self.is_using_existing_instance: # The user requested to run the tests using an existing instance. # We therefore skip any scenarios that involve creating new # test instances. raise SkipTest("Using an existing instance.") configuration_id = configuration_id or self.config_group_id name = self.instance_info.name + name_suffix flavor = self.get_instance_flavor() volume_size = self.instance_info.volume_size self.init_inst_dbs = (self.test_helper.get_valid_database_definitions() if with_dbs else []) self.init_inst_users = (self.test_helper.get_valid_user_definitions() if with_users else []) self.init_inst_config_group_id = configuration_id if (self.init_inst_dbs or self.init_inst_users or configuration_id): info = self.assert_instance_create( name, flavor, volume_size, self.init_inst_dbs, self.init_inst_users, configuration_id, None, CONFIG.dbaas_datastore, CONFIG.dbaas_datastore_version, expected_states, expected_http_code, create_helper_user=create_helper_user) self.init_inst_info = info else: # There is no need to run this test as it's effectively the same as # the empty instance test. raise SkipTest("No testable initial properties provided.") def assert_instance_create( self, name, flavor, trove_volume_size, database_definitions, user_definitions, configuration_id, root_password, datastore, datastore_version, expected_states, expected_http_code, create_helper_user=False, locality=None): """This assert method executes a 'create' call and verifies the server response. It neither waits for the instance to become available nor it performs any other validations itself. It has been designed this way to increase test granularity (other tests may run while the instance is building) and also to allow its reuse in other runners. """ databases = database_definitions users = [{'name': item['name'], 'password': item['password']} for item in user_definitions] instance_info = InstanceTestInfo() # Here we add helper user/database if any. if create_helper_user: helper_db_def, helper_user_def, root_def = self.build_helper_defs() if helper_db_def: self.report.log( "Appending a helper database '%s' to the instance " "definition." % helper_db_def['name']) databases.append(helper_db_def) instance_info.helper_database = helper_db_def if helper_user_def: self.report.log( "Appending a helper user '%s:%s' to the instance " "definition." % (helper_user_def['name'], helper_user_def['password'])) users.append(helper_user_def) instance_info.helper_user = helper_user_def instance_info.name = name instance_info.databases = databases instance_info.users = users instance_info.dbaas_datastore = CONFIG.dbaas_datastore instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version instance_info.dbaas_flavor_href = self.get_flavor_href(flavor) if self.VOLUME_SUPPORT: instance_info.volume = {'size': trove_volume_size} else: instance_info.volume = None instance_info.nics = self.instance_info.nics self.report.log("Testing create instance: %s" % {'name': name, 'flavor': flavor.id, 'volume': trove_volume_size, 'nics': instance_info.nics, 'databases': databases, 'users': users, 'configuration': configuration_id, 'root password': root_password, 'datastore': datastore, 'datastore version': datastore_version}) instance = self.get_existing_instance() if instance: self.report.log("Using an existing instance: %s" % instance.id) self.assert_equal(expected_states[-1], instance.status, "Given instance is in a bad state.") instance_info.name = instance.name else: self.report.log("Creating a new instance.") client = self.auth_client instance = client.instances.create( instance_info.name, instance_info.dbaas_flavor_href, instance_info.volume, instance_info.databases, instance_info.users, nics=instance_info.nics, configuration=configuration_id, availability_zone="nova", datastore=instance_info.dbaas_datastore, datastore_version=instance_info.dbaas_datastore_version, locality=locality) self.assert_client_code(client, expected_http_code) self.assert_instance_action(instance.id, expected_states[0:1]) self.register_debug_inst_ids(instance.id) instance_info.id = instance.id with CheckInstance(instance._info) as check: check.flavor() check.datastore() check.links(instance._info['links']) if self.VOLUME_SUPPORT: check.volume() self.assert_equal(trove_volume_size, instance._info['volume']['size'], "Unexpected Trove volume size") self.assert_equal(instance_info.name, instance._info['name'], "Unexpected instance name") self.assert_equal(str(flavor.id), str(instance._info['flavor']['id']), "Unexpected instance flavor") self.assert_equal(instance_info.dbaas_datastore, instance._info['datastore']['type'], "Unexpected instance datastore version") self.assert_equal(instance_info.dbaas_datastore_version, instance._info['datastore']['version'], "Unexpected instance datastore version") self.assert_configuration_group(instance_info.id, configuration_id) if locality: self.assert_equal(locality, instance._info['locality'], "Unexpected locality") return instance_info def run_wait_for_instance(self, expected_states=['BUILD', 'HEALTHY']): instances = [self.instance_info.id] self.assert_all_instance_states(instances, expected_states) self.instance_info.srv_grp_id = self.assert_server_group_exists( self.instance_info.id) self.wait_for_test_helpers(self.instance_info) def run_wait_for_init_instance(self, expected_states=['BUILD', 'HEALTHY']): if self.init_inst_info: instances = [self.init_inst_info.id] self.assert_all_instance_states(instances, expected_states) self.wait_for_test_helpers(self.init_inst_info) def wait_for_test_helpers(self, inst_info): self.report.log("Waiting for helper users and databases to be " "created on instance: %s" % inst_info.id) client = self.auth_client if inst_info.helper_user: self.wait_for_user_create(client, inst_info.id, [inst_info.helper_user]) if inst_info.helper_database: self.wait_for_database_create(client, inst_info.id, [inst_info.helper_database]) self.report.log("Test helpers are ready.") def run_add_initialized_instance_data(self): if self.init_inst_info: self.init_inst_data = DataType.small self.init_inst_host = self.get_instance_host( self.init_inst_info.id) self.test_helper.add_data(self.init_inst_data, self.init_inst_host) def run_validate_initialized_instance(self): if self.init_inst_info: self.assert_instance_properties( self.init_inst_info.id, self.init_inst_dbs, self.init_inst_users, self.init_inst_config_group_id, self.init_inst_data) def assert_instance_properties( self, instance_id, expected_dbs_definitions, expected_user_definitions, expected_config_group_id, expected_data_type): if expected_dbs_definitions: self.assert_database_list(instance_id, expected_dbs_definitions) else: self.report.log("No databases to validate for instance: %s" % instance_id) if expected_user_definitions: self.assert_user_list(instance_id, expected_user_definitions) else: self.report.log("No users to validate for instance: %s" % instance_id) self.assert_configuration_group(instance_id, expected_config_group_id) if self.init_inst_host: self.test_helper.verify_data( expected_data_type, self.init_inst_host) else: self.report.log("No data to validate for instance: %s" % instance_id) def assert_configuration_group(self, instance_id, expected_group_id): instance = self.get_instance(instance_id) if expected_group_id: self.assert_equal(expected_group_id, instance.configuration['id'], "Wrong configuration group attached") else: self.assert_false(hasattr(instance, 'configuration'), "No configuration group expected") def assert_database_list(self, instance_id, expected_databases): self.wait_for_database_create(self.auth_client, instance_id, expected_databases) def _get_names(self, definitions): return [item['name'] for item in definitions] def assert_user_list(self, instance_id, expected_users): self.wait_for_user_create(self.auth_client, instance_id, expected_users) # Verify that user definitions include only created databases. all_databases = self._get_names( self.test_helper.get_valid_database_definitions()) for user in expected_users: if 'databases' in user: self.assert_is_sublist( self._get_names(user['databases']), all_databases, "Definition of user '%s' specifies databases not included " "in the list of initial databases." % user['name']) def run_initialized_instance_delete(self, expected_http_code=202): if self.init_inst_info: client = self.auth_client client.instances.delete(self.init_inst_info.id) self.assert_client_code(client, expected_http_code) else: raise SkipTest("Cleanup is not required.") def run_wait_for_init_delete(self, expected_states=['SHUTDOWN']): delete_ids = [] if self.init_inst_info: delete_ids.append(self.init_inst_info.id) if delete_ids: self.assert_all_gone(delete_ids, expected_states[-1]) else: raise SkipTest("Cleanup is not required.") self.init_inst_info = None self.init_inst_dbs = None self.init_inst_users = None self.init_inst_host = None self.init_inst_data = None self.init_inst_config_group_id = None def run_initial_configuration_delete(self, expected_http_code=202): if self.config_group_id: client = self.auth_client client.configurations.delete(self.config_group_id) self.assert_client_code(client, expected_http_code) else: raise SkipTest("Cleanup is not required.") self.config_group_id = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/instance_delete_runners.py0000644000175000017500000000402400000000000027223 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import proboscis from trove.tests.scenario.runners.test_runners import TestRunner class InstanceDeleteRunner(TestRunner): def __init__(self): super(InstanceDeleteRunner, self).__init__() def run_instance_delete(self, expected_http_code=202): if self.has_do_not_delete_instance: self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was " "specified, skipping delete...") raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE " "was specified.") self.assert_instance_delete(self.instance_info.id, expected_http_code) def assert_instance_delete(self, instance_id, expected_http_code): self.report.log("Testing delete on instance: %s" % instance_id) client = self.auth_client client.instances.delete(instance_id) self.assert_client_code(client, expected_http_code) def run_instance_delete_wait(self, expected_states=['SHUTDOWN']): if self.has_do_not_delete_instance: self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was " "specified, skipping delete wait...") raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE " "was specified.") self.assert_all_gone(self.instance_info.id, expected_states[-1]) self.assert_server_group_gone(self.instance_info.srv_grp_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/instance_error_create_runners.py0000644000175000017500000001165500000000000030445 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.runners.test_runners import CheckInstance from trove.tests.scenario.runners.test_runners import TestRunner class InstanceErrorCreateRunner(TestRunner): def __init__(self): super(InstanceErrorCreateRunner, self).__init__(sleep_time=1) self.error_inst_id = None self.error2_inst_id = None def run_create_error_instance(self, expected_http_code=200): if self.is_using_existing_instance: raise SkipTest("Using an existing instance.") name = self.instance_info.name + '_error' flavor = self.get_instance_flavor(fault_num=1) client = self.auth_client inst = client.instances.create( name, self.get_flavor_href(flavor), self.instance_info.volume, nics=self.instance_info.nics, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) self.error_inst_id = inst.id def run_create_error2_instance(self, expected_http_code=200): if self.is_using_existing_instance: raise SkipTest("Using an existing instance.") name = self.instance_info.name + '_error2' flavor = self.get_instance_flavor(fault_num=2) client = self.auth_client inst = client.instances.create( name, self.get_flavor_href(flavor), self.instance_info.volume, nics=self.instance_info.nics, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) self.error2_inst_id = inst.id def run_wait_for_error_instances(self, expected_states=['ERROR']): error_ids = [] if self.error_inst_id: error_ids.append(self.error_inst_id) if self.error2_inst_id: error_ids.append(self.error2_inst_id) if error_ids: self.assert_all_instance_states( error_ids, expected_states, fast_fail_status=[]) def run_validate_error_instance(self): if not self.error_inst_id: raise SkipTest("No error instance created.") instance = self.get_instance( self.error_inst_id, self.auth_client) with CheckInstance(instance._info) as check: check.fault() err_msg = "disk is too small for requested image" self.assert_true(err_msg in instance.fault['message'], "Message '%s' does not contain '%s'" % (instance.fault['message'], err_msg)) def run_validate_error2_instance(self): if not self.error2_inst_id: raise SkipTest("No error2 instance created.") instance = self.get_instance( self.error2_inst_id, client=self.admin_client) with CheckInstance(instance._info) as check: check.fault(is_admin=True) err_msg = "Quota exceeded for ram" self.assert_true(err_msg in instance.fault['message'], "Message '%s' does not contain '%s'" % (instance.fault['message'], err_msg)) def run_delete_error_instances(self, expected_http_code=202): client = self.auth_client if self.error_inst_id: client.instances.delete(self.error_inst_id) self.assert_client_code(client, expected_http_code) if self.error2_inst_id: client.instances.delete(self.error2_inst_id) self.assert_client_code(client, expected_http_code) def run_wait_for_error_delete(self, expected_states=['SHUTDOWN']): delete_ids = [] if self.error_inst_id: delete_ids.append(self.error_inst_id) if self.error2_inst_id: delete_ids.append(self.error2_inst_id) if delete_ids: self.assert_all_gone(delete_ids, expected_states[-1]) else: raise SkipTest("Cleanup is not required.") # All the neutron ports should be removed. if self.error_inst_id: ports = self.neutron_client.list_ports( name='trove-%s' % self.error_inst_id ) self.assert_equal(0, len(ports.get("ports", []))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/instance_force_delete_runners.py0000644000175000017500000000441400000000000030404 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner class InstanceForceDeleteRunner(TestRunner): def __init__(self): super(InstanceForceDeleteRunner, self).__init__(sleep_time=1) self.build_inst_id = None def run_create_build_instance(self, expected_states=['NEW', 'BUILD'], expected_http_code=200): if self.is_using_existing_instance: raise SkipTest("Using an existing instance.") name = self.instance_info.name + '_build' flavor = self.get_instance_flavor() client = self.auth_client inst = client.instances.create( name, self.get_flavor_href(flavor), self.instance_info.volume, nics=self.instance_info.nics, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) self.assert_instance_action([inst.id], expected_states) self.build_inst_id = inst.id def run_delete_build_instance(self, expected_http_code=202): if self.build_inst_id: client = self.admin_client client.instances.force_delete(self.build_inst_id) self.assert_client_code(client, expected_http_code) def run_wait_for_force_delete(self): raise SkipKnownBug(runners.BUG_FORCE_DELETE_FAILS) # if self.build_inst_id: # self.assert_all_gone([self.build_inst_id], ['SHUTDOWN']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/instance_upgrade_runners.py0000644000175000017500000000533500000000000027416 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_service import loopingcall from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario.runners.test_runners import TestRunner class InstanceUpgradeRunner(TestRunner): def __init__(self): super(InstanceUpgradeRunner, self).__init__() def run_add_test_data(self): host = self.get_instance_host(self.instance_info.id) self.test_helper.add_data(DataType.small, host) def run_verify_test_data(self): host = self.get_instance_host(self.instance_info.id) self.test_helper.verify_data(DataType.small, host) def run_remove_test_data(self): host = self.get_instance_host(self.instance_info.id) self.test_helper.remove_data(DataType.small, host) def run_instance_upgrade(self, expected_states=['UPGRADE', 'HEALTHY'], expected_http_code=202): instance_id = self.instance_info.id self.report.log("Testing upgrade on instance: %s" % instance_id) target_version = self.instance_info.dbaas_datastore_version client = self.auth_client client.instances.upgrade(instance_id, target_version) self.assert_client_code(client, expected_http_code) self.assert_instance_action(instance_id, expected_states) def _wait_for_user_list(): try: all_users = self.get_user_names(client, instance_id) self.report.log("Users in the db instance %s: %s" % (instance_id, all_users)) except Exception as e: self.report.log( "Failed to list users in db instance %s(will continue), " "error: %s" % (instance_id, str(e)) ) else: raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_for_user_list) try: timer.start(interval=3, timeout=120).wait() except loopingcall.LoopingCallTimeOut: self.fail("Timed out: Cannot list users in the db instance %s" % instance_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/module_runners.py0000644000175000017500000021101000000000000025355 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from proboscis import SkipTest import re import six import tempfile import time from troveclient.compat import exceptions from trove.common import exception from trove.common.utils import poll_until from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.module import models from trove.tests.scenario.runners.test_runners import TestRunner class ModuleRunner(TestRunner): def __init__(self): super(ModuleRunner, self).__init__() self.MODULE_CONTENTS_PATTERN = 'Message=%s\n' self.MODULE_MESSAGE_PATTERN = 'Hello World from: %s' self.MODULE_NAME = 'test_module_1' self.MODULE_DESC = 'test description' self.MODULE_NEG_CONTENTS = 'contents for negative tests' self.MODULE_BINARY_SUFFIX = '_bin_auto' self.MODULE_BINARY_SUFFIX2 = self.MODULE_BINARY_SUFFIX + '_2' self.MODULE_BINARY_CONTENTS = os.urandom(20) self.MODULE_BINARY_CONTENTS2 = b'\x00\xFF\xea\x9c\x11\xfeok\xb1\x8ax' self.module_name_order = [ # 0 {'suffix': self.MODULE_BINARY_SUFFIX, 'priority': True, 'order': 1}, # 1 {'suffix': self.MODULE_BINARY_SUFFIX2, 'priority': True, 'order': 2}, # 2 {'suffix': '_hidden_all_tenant_auto_priority', 'priority': True, 'order': 3}, # 3 {'suffix': '_hidden', 'priority': True, 'order': 4}, # 4 {'suffix': '_auto', 'priority': True, 'order': 5}, # 5 {'suffix': '_live', 'priority': True, 'order': 6}, # 6 {'suffix': '_priority', 'priority': True, 'order': 7}, # 7 {'suffix': '_ds', 'priority': False, 'order': 1}, # 8 {'suffix': '_ds_ver', 'priority': False, 'order': 2}, # 9 {'suffix': '_all_tenant_ds_ver', 'priority': False, 'order': 3}, # 10 {'suffix': '', 'priority': False, 'order': 4}, # 11 {'suffix': '_ds_diff', 'priority': False, 'order': 5}, # 12 {'suffix': '_diff_tenant', 'priority': False, 'order': 6}, # 13 {'suffix': '_full_access', 'priority': False, 'order': 7}, # 14 {'suffix': '_for_update', 'priority': False, 'order': 8}, # 15 {'suffix': '_updated', 'priority': False, 'order': 8}, ] self.apply_count = 0 self.mod_inst_id = None self.mod_inst_apply_count = 0 self.temp_module = None self.live_update_orig_md5 = None self.reapply_max_upd_date = None self._module_type = None self.test_modules = [] self.module_count_prior_to_create = 0 self.module_ds_count_prior_to_create = 0 self.module_ds_all_count_prior_to_create = 0 self.module_auto_apply_count_prior_to_create = 0 self.module_admin_count_prior_to_create = 0 self.module_other_count_prior_to_create = 0 self.module_create_count = 0 self.module_ds_create_count = 0 self.module_ds_all_create_count = 0 self.module_all_tenant_create_count = 0 self.module_auto_apply_create_count = 0 self.module_admin_create_count = 0 self.module_other_create_count = 0 @property def module_type(self): if not self._module_type: self._module_type = self.test_helper.get_valid_module_type() return self._module_type def _get_test_module(self, index): if not self.test_modules or len(self.test_modules) < (index + 1): raise SkipTest("Requested module not created") return self.test_modules[index] @property def main_test_module(self): # The module named "test_module_1" return self._get_test_module(0) @property def update_test_module(self): # The module named "test_module_1_updated" return self._get_test_module(1) @property def live_update_test_module(self): return self._find_live_update_module() def build_module_args(self, name_order=None): suffix = "_unknown" priority = False order = 5 if name_order is not None: name_rec = self.module_name_order[name_order] suffix = name_rec['suffix'] priority = name_rec['priority'] order = name_rec['order'] name = self.MODULE_NAME + suffix description = self.MODULE_DESC + suffix.replace('_', ' ') contents = self.get_module_contents(name) return name, description, contents, priority, order def get_module_contents(self, name=None): message = self.get_module_message(name=name) return self.MODULE_CONTENTS_PATTERN % message def get_module_message(self, name=None): name = name or self.MODULE_NAME return self.MODULE_MESSAGE_PATTERN % name def _find_invisible_module(self): def _match(mod): return not mod.visible and mod.tenant_id and not mod.auto_apply return self._find_module(_match, "Could not find invisible module") def _find_module(self, match_fn, not_found_message, find_all=False, fail_on_not_found=True): found = [] if find_all else None for test_module in self.test_modules: if match_fn(test_module): if find_all: found.append(test_module) else: found = test_module break if not found: if fail_on_not_found: self.fail(not_found_message) else: SkipTest(not_found_message) return found def _find_auto_apply_module(self): def _match(mod): return mod.auto_apply and mod.tenant_id and mod.visible return self._find_module(_match, "Could not find auto-apply module") def _find_live_update_module(self): def _match(mod): return mod.live_update and mod.tenant_id and mod.visible return self._find_module(_match, "Could not find live-update module") def _find_all_tenant_module(self): def _match(mod): return mod.tenant_id is None and mod.visible return self._find_module(_match, "Could not find all tenant module") def _find_priority_apply_module(self): def _match(mod): return mod.priority_apply and mod.tenant_id and mod.visible return self._find_module(_match, "Could not find priority-apply module") def _find_diff_datastore_module(self): def _match(mod): return (mod.datastore and mod.datastore != models.Modules.MATCH_ALL_NAME and mod.datastore != self.instance_info.dbaas_datastore) return self._find_module(_match, "Could not find different datastore module", fail_on_not_found=False) def _find_all_auto_apply_modules(self, visible=None): def _match(mod): return mod.auto_apply and ( visible is None or mod.visible == visible) return self._find_module( _match, "Could not find all auto apply modules", find_all=True) def _find_module_by_id(self, module_id): def _match(mod): return mod.id == module_id return self._find_module(_match, "Could not find module with id %s" % module_id) # Tests start here def run_module_delete_existing(self): """Delete all the testing modules if exist.""" modules = self.admin_client.modules.list() for module in modules: if module.name.startswith(self.MODULE_NAME): self.admin_client.modules.delete(module.id) def run_module_create_bad_type( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, 'invalid-type', self.MODULE_NEG_CONTENTS) def run_module_create_non_admin_auto( self, expected_exception=exceptions.Forbidden, expected_http_code=403): """Non-admin cannot create modules by specifying auto_apply.""" client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, auto_apply=True) def run_module_create_non_admin_all_tenant( self, expected_exception=exceptions.Forbidden, expected_http_code=403): """Non-admin cannot create modules by specifying all_tenants.""" client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, all_tenants=True) def run_module_create_non_admin_hidden( self, expected_exception=exceptions.Forbidden, expected_http_code=403): """Non-admin cannot create modules by specifying visible.""" client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, visible=False) def run_module_create_non_admin_priority( self, expected_exception=exceptions.Forbidden, expected_http_code=403): """Non-admin cannot create modules by specifying priority_apply.""" client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, priority_apply=True) def run_module_create_non_admin_no_full_access( self, expected_exception=exceptions.Forbidden, expected_http_code=403): """Non-admin cannot create modules by specifying full_access.""" client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, full_access=False) def run_module_create_full_access_with_admin_opt( self, expected_exception=exceptions.BadRequest, expected_http_code=400): """full_access cannot be used together with auto_apply.""" client = self.admin_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, full_access=True, auto_apply=True) def run_module_create_bad_datastore( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, datastore='bad-datastore') def run_module_create_bad_datastore_version( self, expected_exception=exceptions.BadRequest, expected_http_code=400): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, datastore=self.instance_info.dbaas_datastore, datastore_version='bad-datastore-version') def run_module_create_missing_datastore( self, expected_exception=exceptions.BadRequest, expected_http_code=400): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS, datastore_version=self.instance_info.dbaas_datastore_version) def run_module_create(self): # Necessary to test that the count increases. self.module_count_prior_to_create = len( self.auth_client.modules.list()) self.module_ds_count_prior_to_create = len( self.auth_client.modules.list( datastore=self.instance_info.dbaas_datastore)) self.module_ds_all_count_prior_to_create = len( self.auth_client.modules.list( datastore=models.Modules.MATCH_ALL_NAME)) self.module_auto_apply_count_prior_to_create = len( [module for module in self.admin_client.modules.list() if module.auto_apply]) self.module_admin_count_prior_to_create = len( self.admin_client.modules.list()) self.module_other_count_prior_to_create = len( self.unauth_client.modules.list()) # Create module "test_module_1" for datastore "all" self.assert_module_create(self.auth_client, 10) def assert_module_create(self, client, name_order, name=None, module_type=None, contents=None, description=None, all_tenants=False, datastore=None, datastore_version=None, auto_apply=False, live_update=False, visible=True, priority_apply=None, apply_order=None, full_access=None): (temp_name, temp_description, temp_contents, temp_priority, temp_order) = self.build_module_args(name_order) name = name if name is not None else temp_name description = ( description if description is not None else temp_description) contents = contents if contents is not None else temp_contents priority_apply = ( priority_apply if priority_apply is not None else temp_priority) apply_order = apply_order if apply_order is not None else temp_order module_type = module_type or self.module_type result = client.modules.create( name, module_type, contents, description=description, all_tenants=all_tenants, datastore=datastore, datastore_version=datastore_version, auto_apply=auto_apply, live_update=live_update, visible=visible, priority_apply=priority_apply, apply_order=apply_order, full_access=full_access) username = client.real_client.client.username if username == self.instance_info.user.auth_user: self.module_create_count += 1 if datastore: if datastore == self.instance_info.dbaas_datastore: self.module_ds_create_count += 1 else: self.module_ds_all_create_count += 1 elif (username != self.instance_info.admin_user.auth_user and username != self.instance_info.user.auth_user): self.module_other_create_count += 1 else: self.module_admin_create_count += 1 if all_tenants and visible: self.module_all_tenant_create_count += 1 if datastore: if datastore == self.instance_info.dbaas_datastore: self.module_ds_create_count += 1 else: self.module_ds_all_create_count += 1 if auto_apply and visible: self.module_auto_apply_create_count += 1 self.test_modules.append(result) tenant_id = None tenant = models.Modules.MATCH_ALL_NAME if not all_tenants: tenant, tenant_id = self.get_client_tenant(client) # If we find a way to grab the tenant name in the module # stuff, the line below can be removed tenant = tenant_id datastore = datastore or models.Modules.MATCH_ALL_NAME datastore_version = datastore_version or models.Modules.MATCH_ALL_NAME self.validate_module( result, validate_all=False, expected_name=name, expected_module_type=module_type, expected_description=description, expected_tenant=tenant, expected_tenant_id=tenant_id, expected_datastore=datastore, expected_datastore_version=datastore_version, expected_auto_apply=auto_apply, expected_contents=contents, expected_is_admin=( username == self.instance_info.admin_user.auth_user and not full_access ) ) def validate_module(self, module, validate_all=False, expected_name=None, expected_module_type=None, expected_description=None, expected_tenant=None, expected_tenant_id=None, expected_datastore=None, expected_datastore_id=None, expected_all_datastores=None, expected_datastore_version=None, expected_datastore_version_id=None, expected_all_datastore_versions=None, expected_all_tenants=None, expected_auto_apply=None, expected_live_update=None, expected_visible=None, expected_contents=None, expected_priority_apply=None, expected_apply_order=None, expected_is_admin=None, expected_full_access=None): if expected_all_tenants: expected_tenant = expected_tenant or models.Modules.MATCH_ALL_NAME if expected_all_datastores: expected_datastore = models.Modules.MATCH_ALL_NAME expected_datastore_id = None if expected_all_datastore_versions: expected_datastore_version = models.Modules.MATCH_ALL_NAME expected_datastore_version_id = None if expected_name: self.assert_equal(expected_name, module.name, 'Unexpected module name') if expected_module_type: self.assert_equal(expected_module_type.lower(), module.type, 'Unexpected module type') if expected_description: self.assert_equal(expected_description, module.description, 'Unexpected module description') if expected_tenant_id: self.assert_equal(expected_tenant_id, module.tenant_id, 'Unexpected tenant id') if expected_tenant: self.assert_equal(expected_tenant, module.tenant, 'Unexpected tenant name') if expected_datastore: self.assert_equal(expected_datastore, module.datastore, 'Unexpected datastore') if expected_datastore_version: self.assert_equal(expected_datastore_version, module.datastore_version, 'Unexpected datastore version') if expected_auto_apply is not None: self.assert_equal(expected_auto_apply, module.auto_apply, 'Unexpected auto_apply') if expected_priority_apply is not None: self.assert_equal(expected_priority_apply, module.priority_apply, 'Unexpected priority_apply') if expected_apply_order is not None: self.assert_equal(expected_apply_order, module.apply_order, 'Unexpected apply_order') if expected_is_admin is not None: self.assert_equal(expected_is_admin, module.is_admin, 'Unexpected is_admin') if expected_full_access is not None: self.assert_equal(expected_full_access, not module.is_admin, 'Unexpected full_access') if validate_all: if expected_datastore_id: self.assert_equal(expected_datastore_id, module.datastore_id, 'Unexpected datastore id') if expected_datastore_version_id: self.assert_equal(expected_datastore_version_id, module.datastore_version_id, 'Unexpected datastore version id') if expected_live_update is not None: self.assert_equal(expected_live_update, module.live_update, 'Unexpected live_update') if expected_visible is not None: self.assert_equal(expected_visible, module.visible, 'Unexpected visible') def run_module_create_for_update(self): # Create module "test_module_1_updated" self.assert_module_create(self.auth_client, 14) def run_module_create_dupe( self, expected_exception=exceptions.BadRequest, expected_http_code=400): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.create, self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS) def run_module_update_missing_datastore( self, expected_exception=exceptions.BadRequest, expected_http_code=400): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, self.update_test_module.id, datastore_version=self.instance_info.dbaas_datastore_version) def run_module_create_bin(self): self.assert_module_create( self.admin_client, 0, contents=self.MODULE_BINARY_CONTENTS, auto_apply=True, visible=False) def run_module_create_bin2(self): self.assert_module_create( self.admin_client, 1, contents=self.MODULE_BINARY_CONTENTS2, auto_apply=True, visible=False) def run_module_show(self): test_module = self.main_test_module result = self.auth_client.modules.get(test_module.id) self.validate_module( result, validate_all=True, expected_name=test_module.name, expected_module_type=test_module.type, expected_description=test_module.description, expected_tenant=test_module.tenant, expected_datastore=test_module.datastore, expected_datastore_version=test_module.datastore_version, expected_auto_apply=test_module.auto_apply, expected_live_update=False, expected_visible=True, expected_priority_apply=test_module.priority_apply, expected_apply_order=test_module.apply_order, expected_is_admin=test_module.is_admin) def run_module_show_unauth_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.unauth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.get, self.main_test_module.id) def run_module_list(self): self.assert_module_list( self.auth_client, ( self.module_count_prior_to_create + self.module_create_count + self.module_all_tenant_create_count ) ) def assert_module_list(self, client, expected_count, datastore=None): if datastore: module_list = client.modules.list(datastore=datastore) else: module_list = client.modules.list() self.assert_equal(expected_count, len(module_list), "Wrong number of modules for list") for module in module_list: # only validate the test modules if module.name.startswith(self.MODULE_NAME): test_module = self._find_module_by_id(module.id) self.validate_module( module, validate_all=True, expected_name=test_module.name, expected_module_type=test_module.type, expected_description=test_module.description, expected_tenant=test_module.tenant, expected_datastore=test_module.datastore, expected_datastore_version=test_module.datastore_version, expected_auto_apply=test_module.auto_apply, expected_priority_apply=test_module.priority_apply, expected_apply_order=test_module.apply_order, expected_is_admin=test_module.is_admin) def run_module_list_unauth_user(self): self.assert_module_list( self.unauth_client, (self.module_other_count_prior_to_create + self.module_all_tenant_create_count + self.module_other_create_count)) def run_module_create_admin_all(self): self.assert_module_create( self.admin_client, 2, all_tenants=True, visible=False, auto_apply=True) def run_module_create_admin_hidden(self): self.assert_module_create( self.admin_client, 3, visible=False) def run_module_create_admin_auto(self): self.assert_module_create( self.admin_client, 4, auto_apply=True) def run_module_create_admin_live_update(self): self.assert_module_create( self.admin_client, 5, live_update=True) self.live_update_orig_md5 = self.test_modules[-1].md5 def run_module_create_admin_priority_apply(self): self.assert_module_create( self.admin_client, 6) def run_module_create_datastore(self): self.assert_module_create( self.admin_client, 7, datastore=self.instance_info.dbaas_datastore) def run_module_create_different_datastore(self): diff_datastore = self._get_different_datastore() if not diff_datastore: raise SkipTest("Could not find a different datastore") self.assert_module_create( self.auth_client, 11, datastore=diff_datastore) def _get_different_datastore(self): different_datastore = None datastores = self.admin_client.datastores.list() for datastore in datastores: self.report.log("Found datastore: %s" % datastore.name) if datastore.name != self.instance_info.dbaas_datastore: different_datastore = datastore.name break return different_datastore def run_module_create_ds_version(self): self.assert_module_create( self.admin_client, 8, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) def run_module_create_all_tenant(self): self.assert_module_create( self.admin_client, 9, all_tenants=True, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) def run_module_create_different_tenant(self): self.assert_module_create( self.unauth_client, 12) def run_module_create_full_access(self): self.assert_module_create( self.admin_client, 13, full_access=True) def run_module_full_access_toggle(self): self.assert_module_update( self.admin_client, self.main_test_module.id, full_access=False) self.assert_module_update( self.admin_client, self.main_test_module.id, full_access=True) def run_module_list_again(self): self.assert_module_list( self.auth_client, ( self.module_count_prior_to_create + self.module_create_count + self.module_all_tenant_create_count ) ) def run_module_list_ds(self): self.assert_module_list( self.auth_client, self.module_ds_count_prior_to_create + self.module_ds_create_count, datastore=self.instance_info.dbaas_datastore) def run_module_list_ds_all(self): self.assert_module_list( self.auth_client, (self.module_ds_all_count_prior_to_create + self.module_ds_all_create_count), datastore=models.Modules.MATCH_ALL_NAME) def run_module_show_invisible( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.get, module.id) def run_module_list_admin(self): self.assert_module_list( self.admin_client, (self.module_admin_count_prior_to_create + self.module_create_count + self.module_admin_create_count + self.module_other_create_count)) def run_module_update(self): self.assert_module_update_description( self.auth_client, self.main_test_module.id, description=self.MODULE_DESC + " modified") def assert_module_update(self, client, module_id, **kwargs): result = client.modules.update(module_id, **kwargs) found = False index = -1 for test_module in self.test_modules: index += 1 if test_module.id == module_id: found = True break if not found: self.fail("Could not find updated module in module list") self.test_modules[index] = result expected_args = {} for key, value in kwargs.items(): new_key = 'expected_' + key expected_args[new_key] = value self.validate_module(result, **expected_args) def run_module_update_same_contents(self): old_md5 = self.main_test_module.md5 self.assert_module_update( self.auth_client, self.main_test_module.id, contents=self.get_module_contents(self.main_test_module.name)) self.assert_equal(old_md5, self.main_test_module.md5, "MD5 changed with same contents") def run_module_update_auto_toggle(self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_auto_apply_module() toggle_off_args = {'auto_apply': False} toggle_on_args = {'auto_apply': True} self.assert_module_toggle(module, toggle_off_args, toggle_on_args, expected_exception=expected_exception, expected_http_code=expected_http_code) def assert_module_toggle(self, module, toggle_off_args, toggle_on_args, expected_exception, expected_http_code): # First try to update the module based on the change # (this should toggle the state but still not allow non-admin access) client = self.admin_client self.assert_module_update(client, module.id, **toggle_off_args) # The non-admin client should fail to update non_admin_client = self.auth_client self.assert_raises( expected_exception, expected_http_code, non_admin_client, non_admin_client.modules.update, module.id, description='Updated by non-admin') # Make sure we can still update with the admin client self.assert_module_update( client, module.id, description='Updated by admin') # Now set it back self.assert_module_update( client, module.id, description=module.description, **toggle_on_args) def run_module_update_all_tenant_toggle( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_all_tenant_module() toggle_off_args = {'all_tenants': False} toggle_on_args = {'all_tenants': True} self.assert_module_toggle(module, toggle_off_args, toggle_on_args, expected_exception=expected_exception, expected_http_code=expected_http_code) def run_module_update_invisible_toggle( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() toggle_off_args = {'visible': True} toggle_on_args = {'visible': False} self.assert_module_toggle(module, toggle_off_args, toggle_on_args, expected_exception=expected_exception, expected_http_code=expected_http_code) def run_module_update_priority_toggle( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_priority_apply_module() toggle_off_args = {'priority_apply': False} toggle_on_args = {'priority_apply': True} self.assert_module_toggle(module, toggle_off_args, toggle_on_args, expected_exception=expected_exception, expected_http_code=expected_http_code) def run_module_update_unauth( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.unauth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, self.main_test_module.id, description='Upd') def run_module_update_non_admin_auto( self, expected_exception=exceptions.Forbidden, expected_http_code=403): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, self.main_test_module.id, visible=False) def run_module_update_non_admin_auto_off( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_auto_apply_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, module.id, auto_apply=False) def run_module_update_non_admin_auto_any( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_auto_apply_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, module.id, description='Upd') def run_module_update_non_admin_all_tenant( self, expected_exception=exceptions.Forbidden, expected_http_code=403): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, self.main_test_module.id, all_tenants=True) def run_module_update_non_admin_all_tenant_off( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_all_tenant_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, module.id, all_tenants=False) def run_module_update_non_admin_all_tenant_any( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_all_tenant_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, module.id, description='Upd') def run_module_update_non_admin_invisible( self, expected_exception=exceptions.Forbidden, expected_http_code=403): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, self.main_test_module.id, visible=False) def run_module_update_non_admin_invisible_off( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, module.id, visible=True) def run_module_update_non_admin_invisible_any( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, module.id, description='Upd') # ModuleInstanceGroup methods def run_module_list_instance_empty(self): self.assert_module_list_instance( self.auth_client, self.instance_info.id, self.module_auto_apply_count_prior_to_create) def assert_module_list_instance(self, client, instance_id, expected_count, expected_http_code=200): module_list = client.instances.modules(instance_id) self.assert_client_code(client, expected_http_code) count = len(module_list) self.assert_equal(expected_count, count, "Wrong number of modules from list instance") for module in module_list: self.validate_module(module) def run_module_instances_empty(self): self.assert_module_instances( self.auth_client, self.main_test_module.id, 0) def assert_module_instances(self, client, module_id, expected_count, expected_http_code=200): instance_list = client.modules.instances(module_id) self.assert_client_code(client, expected_http_code) count = len(instance_list) self.assert_equal(expected_count, count, "Wrong number of instances applied from module") def run_module_instance_count_empty(self): self.assert_module_instance_count( self.auth_client, self.main_test_module.id, 0) def assert_module_instance_count(self, client, module_id, expected_rows, expected_count=None, expected_http_code=200): instance_count_list = client.modules.instances(module_id, count_only=True) self.assert_client_code(client, expected_http_code) rowcount = len(instance_count_list) self.assert_equal(expected_rows, rowcount, "Wrong number of instance count records from module") # expected_count is a dict of md5->count pairs. if expected_rows and expected_count: for row in instance_count_list: self.assert_equal( expected_count[row.module_md5], row.instance_count, "Wrong count in record from module instances; md5: %s" % row.module_md5) def run_module_query_empty(self): self.assert_module_query( self.auth_client, self.instance_info.id, self.module_auto_apply_count_prior_to_create) def assert_module_query(self, client, instance_id, expected_count, expected_http_code=200, expected_results=None): modquery_list = client.instances.module_query(instance_id) self.assert_client_code(client, expected_http_code) count = len(modquery_list) self.assert_equal(expected_count, count, "Wrong number of modules from query") expected_results = expected_results or {} name_index = len(self.module_name_order) for modquery in modquery_list: if modquery.name in expected_results: self.report.log("Validating module '%s'" % modquery.name) expected = expected_results[modquery.name] self.validate_module_apply_info( modquery, expected_status=expected['status'], expected_message=expected['message']) # make sure we're in the correct order found = False while name_index > 0: name_index -= 1 name_order_rec = self.module_name_order[name_index] order_name = self.MODULE_NAME + name_order_rec['suffix'] self.report.log("Next module order '%s'" % order_name) if order_name == modquery.name: self.report.log("Match found") found = True break if name_index == 0 and not found: self.fail("Module '%s' was not found in the correct order" % modquery.name) def run_module_apply(self): self.assert_module_apply(self.auth_client, self.instance_info.id, self.main_test_module) self.apply_count += 1 def assert_module_apply(self, client, instance_id, module, expected_is_admin=False, expected_status=None, expected_message=None, expected_contents=None, expected_http_code=200): module_apply_list = client.instances.module_apply( instance_id, [module.id]) self.assert_client_code(client, expected_http_code) expected_status = expected_status or 'OK' expected_message = (expected_message or self.get_module_message(module.name)) for module_apply in module_apply_list: self.validate_module_apply_info( module_apply, expected_name=module.name, expected_module_type=module.type, expected_datastore=module.datastore, expected_datastore_version=module.datastore_version, expected_auto_apply=module.auto_apply, expected_visible=module.visible, expected_contents=expected_contents, expected_status=expected_status, expected_message=expected_message, expected_is_admin=expected_is_admin) def validate_module_apply_info(self, module_apply, expected_name=None, expected_module_type=None, expected_datastore=None, expected_datastore_version=None, expected_auto_apply=None, expected_visible=None, expected_contents=None, expected_message=None, expected_status=None, expected_is_admin=None): prefix = "Module: %s -" % expected_name if expected_name: self.assert_equal(expected_name, module_apply.name, '%s Unexpected module name' % prefix) if expected_module_type: self.assert_equal(expected_module_type, module_apply.type, '%s Unexpected module type' % prefix) if expected_datastore: self.assert_equal(expected_datastore, module_apply.datastore, '%s Unexpected datastore' % prefix) if expected_datastore_version: self.assert_equal(expected_datastore_version, module_apply.datastore_version, '%s Unexpected datastore version' % prefix) if expected_auto_apply is not None: self.assert_equal(expected_auto_apply, module_apply.auto_apply, '%s Unexpected auto_apply' % prefix) if expected_visible is not None: self.assert_equal(expected_visible, module_apply.visible, '%s Unexpected visible' % prefix) if expected_contents is not None: self.assert_equal(expected_contents, module_apply.contents, '%s Unexpected contents' % prefix) if expected_message is not None: regex = re.compile(expected_message) self.assert_true(regex.match(module_apply.message), "%s Unexpected message '%s', expected '%s'" % (prefix, module_apply.message, expected_message)) if expected_status is not None: self.assert_equal(expected_status, module_apply.status, '%s Unexpected status' % prefix) if expected_is_admin is not None: self.assert_equal(expected_is_admin, module_apply.is_admin, '%s Unexpected is_admin' % prefix) def run_module_apply_wrong_module( self, expected_exception=exceptions.BadRequest, expected_http_code=400): module = self._find_diff_datastore_module() self.report.log("Found 'wrong' module: %s" % module.name) client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.instances.module_apply, self.instance_info.id, [module.id]) def run_module_list_instance_after_apply(self): self.assert_module_list_instance( self.auth_client, self.instance_info.id, self.apply_count + self.module_auto_apply_count_prior_to_create ) def run_module_apply_another(self): self.assert_module_apply(self.auth_client, self.instance_info.id, self.update_test_module) self.apply_count += 1 def run_module_list_instance_after_apply_another(self): self.assert_module_list_instance( self.auth_client, self.instance_info.id, self.apply_count + self.module_auto_apply_count_prior_to_create ) def run_module_update_after_remove(self): name, description, contents, priority, order = ( self.build_module_args(15)) self.assert_module_update( self.auth_client, self.update_test_module.id, name=name, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, contents=contents) def run_module_instances_after_apply(self): self.assert_module_instances( self.auth_client, self.main_test_module.id, 1) def run_module_instance_count_after_apply(self): self.assert_module_instance_count( self.auth_client, self.main_test_module.id, 1, {self.main_test_module.md5: 1}) def run_module_query_after_apply(self): expected_count = (self.module_auto_apply_count_prior_to_create + self.apply_count) expected_results = self.create_default_query_expected_results( [self.main_test_module]) self.assert_module_query(self.auth_client, self.instance_info.id, expected_count=expected_count, expected_results=expected_results) def create_default_query_expected_results(self, modules, is_admin=False): expected_results = {} for module in modules: status = 'OK' message = self.get_module_message(module.name) contents = self.get_module_contents(module.name) if not is_admin and (not module.visible or module.auto_apply or not module.tenant_id): contents = ('Must be admin to retrieve contents for module %s' % module.name) elif self.MODULE_BINARY_SUFFIX in module.name: status = 'ERROR' message = ('^(Could not extract ping message|' 'Message not found in contents file).*') contents = self.MODULE_BINARY_CONTENTS if self.MODULE_BINARY_SUFFIX2 in module.name: contents = self.MODULE_BINARY_CONTENTS2 expected_results[module.name] = { 'status': status, 'message': message, 'datastore': module.datastore, 'datastore_version': module.datastore_version, 'contents': contents, } return expected_results def run_module_instances_after_apply_another(self): self.assert_module_instances( self.auth_client, self.main_test_module.id, 1) def run_module_instance_count_after_apply_another(self): self.assert_module_instance_count( self.auth_client, self.main_test_module.id, 1, {self.main_test_module.md5: 1}) def run_module_query_after_apply_another(self): expected_count = (self.module_auto_apply_count_prior_to_create + self.apply_count) expected_results = self.create_default_query_expected_results( [self.main_test_module, self.update_test_module]) self.assert_module_query(self.auth_client, self.instance_info.id, expected_count=expected_count, expected_results=expected_results) def run_module_update_not_live( self, expected_exception=exceptions.Forbidden, expected_http_code=403): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.update, self.main_test_module.id, description='Do not allow this change') def run_module_apply_live_update(self): module = self.live_update_test_module self.assert_module_apply(self.auth_client, self.instance_info.id, module, expected_is_admin=module.is_admin) self.apply_count += 1 def run_module_list_instance_after_apply_live(self): self.assert_module_list_instance( self.auth_client, self.instance_info.id, self.apply_count + self.module_auto_apply_count_prior_to_create ) def run_module_update_live_update(self): module = self.live_update_test_module new_contents = self.get_module_contents(name=module.name + '_upd') self.assert_module_update( self.admin_client, module.id, contents=new_contents) def run_module_update_after_remove_again(self): self.assert_module_update( self.auth_client, self.update_test_module.id, name=self.MODULE_NAME + '_updated_back', all_datastores=True, all_datastore_versions=True) def run_create_inst_with_mods(self, expected_http_code=200): live_update = self.live_update_test_module self.mod_inst_id = self.assert_inst_mod_create( [self.main_test_module.id, live_update.id], '_module', expected_http_code) self.mod_inst_apply_count += 2 def assert_inst_mod_create(self, module_ids, name_suffix, expected_http_code): client = self.auth_client inst = client.instances.create( self.instance_info.name + name_suffix, self.instance_info.dbaas_flavor_href, self.instance_info.volume, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, modules=module_ids, ) self.assert_client_code(client, expected_http_code) self.register_debug_inst_ids(inst.id) return inst.id def run_create_inst_with_wrong_module( self, expected_exception=exceptions.BadRequest, expected_http_code=400): module = self._find_diff_datastore_module() self.report.log("Found 'wrong' module: %s" % module.name) client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.instances.create, self.instance_info.name + '_wrong_ds', self.instance_info.dbaas_flavor_href, self.instance_info.volume, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, modules=[module.id]) def run_module_delete_applied( self, expected_exception=exceptions.Forbidden, expected_http_code=403): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.delete, self.main_test_module.id) def run_module_remove(self): self.assert_module_remove(self.auth_client, self.instance_info.id, self.update_test_module.id) self.apply_count -= 1 def assert_module_remove(self, client, instance_id, module_id, expected_http_code=200): client.instances.module_remove(instance_id, module_id) self.assert_client_code(client, expected_http_code) def run_wait_for_inst_with_mods(self, expected_states=['BUILD', 'HEALTHY']): self.assert_instance_action(self.mod_inst_id, expected_states) def run_module_query_after_inst_create(self): auto_modules = self._find_all_auto_apply_modules(visible=True) expected_count = self.mod_inst_apply_count + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules) self.assert_module_query(self.auth_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def run_module_retrieve_after_inst_create(self): auto_modules = self._find_all_auto_apply_modules(visible=True) expected_count = self.mod_inst_apply_count + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules) self.assert_module_retrieve(self.auth_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def assert_module_retrieve(self, client, instance_id, expected_count, expected_http_code=200, expected_results=None): try: temp_dir = tempfile.mkdtemp() prefix = 'contents' modretrieve_list = client.instances.module_retrieve( instance_id, directory=temp_dir, prefix=prefix) self.assert_client_code(client, expected_http_code) count = len(modretrieve_list) self.assert_equal(expected_count, count, "Wrong number of modules from retrieve") expected_results = expected_results or {} for module_name, filename in modretrieve_list.items(): if module_name in expected_results: expected = expected_results[module_name] contents_name = '%s_%s_%s_%s' % ( prefix, module_name, expected['datastore'], expected['datastore_version']) expected_filename = guestagent_utils.build_file_path( temp_dir, contents_name, 'dat') self.assert_equal(expected_filename, filename, 'Unexpected retrieve filename') if 'contents' in expected and expected['contents']: with open(filename, 'rb') as fh: contents = fh.read() expected = expected['contents'] if isinstance(expected, six.string_types): expected = expected.encode() self.assert_equal(expected, contents, "Unexpected contents for %s" % module_name) finally: operating_system.remove(temp_dir) def run_module_query_after_inst_create_admin(self): auto_modules = self._find_all_auto_apply_modules() expected_count = self.mod_inst_apply_count + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules, is_admin=True) self.assert_module_query(self.admin_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def run_module_retrieve_after_inst_create_admin(self): auto_modules = self._find_all_auto_apply_modules() expected_count = self.mod_inst_apply_count + len(auto_modules) expected_results = self.create_default_query_expected_results( [self.main_test_module] + auto_modules, is_admin=True) self.assert_module_retrieve(self.admin_client, self.mod_inst_id, expected_count=expected_count, expected_results=expected_results) def run_module_delete_auto_applied( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_auto_apply_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.delete, module.id) def run_module_list_instance_after_mod_inst(self): self.assert_module_list_instance( self.auth_client, self.mod_inst_id, self.module_auto_apply_create_count + 2) def run_module_instances_after_mod_inst(self): self.assert_module_instances( self.auth_client, self.live_update_test_module.id, 2) def run_module_instance_count_after_mod_inst(self): self.assert_module_instance_count( self.auth_client, self.live_update_test_module.id, 2, {self.live_update_test_module.md5: 1, self.live_update_orig_md5: 1}) def run_module_reapply_with_md5(self, expected_http_code=202): self.assert_module_reapply( self.auth_client, self.live_update_test_module, expected_http_code=expected_http_code, md5=self.live_update_test_module.md5) def assert_module_reapply(self, client, module, expected_http_code, md5=None, force=False): self.reapply_max_upd_date = self.get_updated(client, module.id) client.modules.reapply(module.id, md5=md5, force=force) self.assert_client_code(client, expected_http_code) def run_module_reapply_with_md5_verify(self): # since this isn't supposed to do anything, we can't 'wait' for it to # finish, since we'll never know. So just sleep for a couple seconds # just to make sure. time.sleep(2) # Now we check that the max_updated_date field didn't change module_id = self.live_update_test_module.id instance_count_list = self.auth_client.modules.instances( module_id, count_only=True) mismatch = False for instance_count in instance_count_list: if self.reapply_max_upd_date != instance_count.max_updated_date: mismatch = True self.assert_true( mismatch, "Could not find record having max_updated_date different from %s" % self.reapply_max_upd_date) def run_module_list_instance_after_reapply_md5(self): self.assert_module_list_instance( self.auth_client, self.mod_inst_id, self.module_auto_apply_create_count + 2) def run_module_instances_after_reapply_md5(self): self.assert_module_instances( self.auth_client, self.live_update_test_module.id, 2) def run_module_instance_count_after_reapply_md5(self): self.assert_module_instance_count( self.auth_client, self.live_update_test_module.id, 2, {self.live_update_test_module.md5: 1, self.live_update_orig_md5: 1}) def run_module_reapply_all(self, expected_http_code=202): module_id = self.live_update_test_module.id client = self.auth_client self.reapply_max_upd_date = self.get_updated(client, module_id) self.assert_module_reapply( client, self.live_update_test_module, expected_http_code=expected_http_code) def run_module_reapply_all_wait(self): self.wait_for_reapply( self.auth_client, self.live_update_test_module.id, md5=self.live_update_orig_md5) def wait_for_reapply(self, client, module_id, updated=None, md5=None): """Reapply is done when all the counts for 'md5' are gone. If updated is passed in, the min_updated_date must all be greater than it. """ if not updated and not md5: raise RuntimeError("Code error: Must pass in 'updated' or 'md5'.") self.report.log("Waiting for all md5:%s modules to have an updated " "date greater than %s" % (md5, updated)) def _all_updated(): min_updated = self.get_updated( client, module_id, max=False, md5=md5) if md5: return min_updated is None return min_updated > updated timeout = 60 try: poll_until(_all_updated, time_out=timeout, sleep_time=5) self.report.log("All instances now have the current module " "for md5: %s." % md5) except exception.PollTimeOut: self.fail("Some instances were not updated with the " "timeout: %ds" % timeout) def get_updated(self, client, module_id, max=True, md5=None): updated = None instance_count_list = client.modules.instances( module_id, count_only=True) for instance_count in instance_count_list: if not md5 or md5 == instance_count.module_md5: if not updated or ( (max and instance_count.max_updated_date > updated) or (not max and instance_count.min_updated_date < updated)): updated = (instance_count.max_updated_date if max else instance_count.min_updated_date) return updated def run_module_list_instance_after_reapply(self): self.assert_module_list_instance( self.auth_client, self.mod_inst_id, self.module_auto_apply_create_count + 2) def run_module_instances_after_reapply(self): self.assert_module_instances( self.auth_client, self.live_update_test_module.id, 2) def run_module_instance_count_after_reapply(self): self.assert_module_instance_count( self.auth_client, self.live_update_test_module.id, 1, {self.live_update_test_module.md5: 2}) def run_module_reapply_with_force(self, expected_http_code=202): self.assert_module_reapply( self.auth_client, self.live_update_test_module, expected_http_code=expected_http_code, force=True) def run_module_reapply_with_force_wait(self): self.wait_for_reapply( self.auth_client, self.live_update_test_module.id, updated=self.reapply_max_upd_date) def run_delete_inst_with_mods(self, expected_http_code=202): self.assert_delete_instance(self.mod_inst_id, expected_http_code) def assert_delete_instance(self, instance_id, expected_http_code): client = self.auth_client client.instances.delete(instance_id) self.assert_client_code(client, expected_http_code) def run_remove_mods_from_main_inst(self, expected_http_code=200): client = self.auth_client modquery_list = client.instances.module_query(self.instance_info.id) self.assert_client_code(client, expected_http_code) for modquery in modquery_list: client.instances.module_remove(self.instance_info.id, modquery.id) self.assert_client_code(client, expected_http_code) def run_wait_for_delete_inst_with_mods( self, expected_last_state=['SHUTDOWN']): self.assert_all_gone(self.mod_inst_id, expected_last_state) # ModuleDeleteGroup methods def run_module_delete_non_existent( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.delete, 'bad_id') def run_module_delete_unauth_user( self, expected_exception=exceptions.NotFound, expected_http_code=404): client = self.unauth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.delete, self.main_test_module.id) def run_module_delete_hidden_by_non_admin( self, expected_exception=exceptions.NotFound, expected_http_code=404): module = self._find_invisible_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.delete, module.id) def run_module_delete_all_tenant_by_non_admin( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_all_tenant_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.delete, module.id) def run_module_delete_auto_by_non_admin( self, expected_exception=exceptions.Forbidden, expected_http_code=403): module = self._find_auto_apply_module() client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.modules.delete, module.id) def run_module_delete(self): expected_count = len(self.auth_client.modules.list()) - 1 test_module = self.test_modules.pop(0) self.assert_module_delete(self.auth_client, test_module.id, expected_count) def run_module_delete_admin(self): start_count = count = len(self.admin_client.modules.list()) for test_module in self.test_modules: count -= 1 self.report.log("Deleting module '%s' (tenant: %s)" % ( test_module.name, test_module.tenant_id)) self.assert_module_delete(self.admin_client, test_module.id, count) self.assert_not_equal(start_count, count, "Nothing was deleted") count = len(self.admin_client.modules.list()) self.assert_equal(self.module_admin_count_prior_to_create, count, "Wrong number of admin modules after deleting all") count = len(self.auth_client.modules.list()) self.assert_equal(self.module_count_prior_to_create, count, "Wrong number of modules after deleting all") def assert_module_delete(self, client, module_id, expected_count): client.modules.delete(module_id) count = len(client.modules.list()) self.assert_equal(expected_count, count, "Wrong number of modules after delete") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/negative_cluster_actions_runners.py0000644000175000017500000001063200000000000031162 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class NegativeClusterActionsRunner(TestRunner): def __init__(self): super(NegativeClusterActionsRunner, self).__init__() def run_create_constrained_size_cluster(self, min_nodes=2, max_nodes=None, expected_http_code=400): self.assert_create_constrained_size_cluster('negative_cluster', min_nodes, max_nodes, expected_http_code) def assert_create_constrained_size_cluster(self, cluster_name, min_nodes, max_nodes, expected_http_code): # Create a cluster with less than 'min_nodes'. if min_nodes: instances_def = [self.build_flavor()] * (min_nodes - 1) self._assert_cluster_create_raises(cluster_name, instances_def, expected_http_code) # Create a cluster with mare than 'max_nodes'. if max_nodes: instances_def = [self.build_flavor()] * (max_nodes + 1) self._assert_cluster_create_raises(cluster_name, instances_def, expected_http_code) def run_create_heterogeneous_cluster(self, expected_http_code=400): # Create a cluster with different node flavors. instances_def = [self.build_flavor(flavor_id=2, volume_size=1), self.build_flavor(flavor_id=3, volume_size=1)] self._assert_cluster_create_raises('heterocluster', instances_def, expected_http_code) # Create a cluster with different volume sizes. instances_def = [self.build_flavor(flavor_id=2, volume_size=1), self.build_flavor(flavor_id=2, volume_size=2)] self._assert_cluster_create_raises('heterocluster', instances_def, expected_http_code) def _assert_cluster_create_raises(self, cluster_name, instances_def, expected_http_code): client = self.auth_client self.assert_raises(exceptions.BadRequest, expected_http_code, client, client.clusters.create, cluster_name, self.instance_info.dbaas_datastore, self.instance_info.dbaas_datastore_version, instances=instances_def) class MongodbNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): super(MongodbNegativeClusterActionsRunner, self).run_create_constrained_size_cluster(min_nodes=3, max_nodes=3) class CassandraNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): raise SkipTest("No constraints apply to the number of cluster nodes.") def run_create_heterogeneous_cluster(self): raise SkipTest("No constraints apply to the size of cluster nodes.") class RedisNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): raise SkipTest("No constraints apply to the number of cluster nodes.") def run_create_heterogeneous_cluster(self): raise SkipTest("No constraints apply to the size of cluster nodes.") class PxcNegativeClusterActionsRunner(NegativeClusterActionsRunner): def run_create_constrained_size_cluster(self): raise SkipTest("No constraints apply to the number of cluster nodes.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/replication_runners.py0000644000175000017500000005052200000000000026412 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import utils from trove.tests.scenario.helpers.test_helper import DataType from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import CheckInstance from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class ReplicationRunner(TestRunner): def __init__(self): super(ReplicationRunner, self).__init__() self.master_id = self.instance_info.id self.replica_1_id = 0 self.master_host = self.get_instance_host(self.master_id) self.replica_1_host = None self.master_backup_count = None self.used_data_sets = set() self.non_affinity_master_id = None self.non_affinity_srv_grp_id = None self.non_affinity_repl_id = None self.locality = 'affinity' def run_add_data_for_replication(self, data_type=DataType.small): self.assert_add_replication_data(data_type, self.master_host) def assert_add_replication_data(self, data_type, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'add_actual_data' method. """ self.test_helper.add_data(data_type, host) self.used_data_sets.add(data_type) def run_add_data_after_replica(self, data_type=DataType.micro): self.assert_add_replication_data(data_type, self.master_host) def run_verify_data_for_replication(self, data_type=DataType.small): self.assert_verify_replication_data(data_type, self.master_host) def assert_verify_replication_data(self, data_type, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'verify_actual_data' method. """ self.test_helper.verify_data(data_type, host) def run_create_non_affinity_master(self, expected_http_code=200): client = self.auth_client self.non_affinity_master_id = client.instances.create( self.instance_info.name + '_non-affinity', self.instance_info.dbaas_flavor_href, self.instance_info.volume, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, locality='anti-affinity').id self.assert_client_code(client, expected_http_code) self.register_debug_inst_ids(self.non_affinity_master_id) def run_create_single_replica(self, expected_http_code=200): self.master_backup_count = len( self.auth_client.instances.backups(self.master_id)) self.replica_1_id = self.assert_replica_create( self.master_id, 'replica1', 1, expected_http_code)[0] def assert_replica_create( self, master_id, replica_name, replica_count, expected_http_code): # When creating multiple replicas, only one replica info will be # returned, so we should compare the replica set members before and # after the creation to get the correct new replica ids. original_replicas = self._get_replica_set(master_id) client = self.auth_client client.instances.create( self.instance_info.name + '_' + replica_name, self.instance_info.dbaas_flavor_href, self.instance_info.volume, replica_of=master_id, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, replica_count=replica_count) self.assert_client_code(client, expected_http_code) new_replicas = self._get_replica_set(master_id) - original_replicas self.register_debug_inst_ids(new_replicas) return list(new_replicas) def run_wait_for_single_replica(self, expected_states=['BUILD', 'HEALTHY']): self.assert_instance_action(self.replica_1_id, expected_states) self._assert_is_master(self.master_id, [self.replica_1_id]) self._assert_is_replica(self.replica_1_id, self.master_id) self._assert_locality(self.master_id) self.replica_1_host = self.get_instance_host(self.replica_1_id) def _assert_is_master(self, instance_id, replica_ids): client = self.admin_client instance = self.get_instance(instance_id, client=client) self.assert_client_code(client, 200) CheckInstance(instance._info).slaves() self.assert_true( set(replica_ids).issubset(self._get_replica_set(instance_id))) self._validate_master(instance_id) def _get_replica_set(self, master_id): instance = self.get_instance(master_id) # Return an empty set before the first replia is created return set([replica['id'] for replica in instance._info.get('replicas', [])]) def _assert_is_replica(self, instance_id, master_id): client = self.admin_client instance = self.get_instance(instance_id, client=client) self.assert_client_code(client, 200) CheckInstance(instance._info).replica_of() self.assert_equal(master_id, instance._info['replica_of']['id'], 'Unexpected replication master ID') self._validate_replica(instance_id) def _assert_locality(self, instance_id): replica_ids = self._get_replica_set(instance_id) instance = self.get_instance(instance_id) self.assert_equal(self.locality, instance.locality, "Unexpected locality for instance '%s'" % instance_id) for replica_id in replica_ids: replica = self.get_instance(replica_id) self.assert_equal(self.locality, replica.locality, "Unexpected locality for instance '%s'" % replica_id) def run_wait_for_non_affinity_master(self, expected_states=['BUILD', 'HEALTHY']): self._assert_instance_states(self.non_affinity_master_id, expected_states) self.non_affinity_srv_grp_id = self.assert_server_group_exists( self.non_affinity_master_id) def run_create_non_affinity_replica(self, expected_http_code=200): client = self.auth_client self.non_affinity_repl_id = client.instances.create( self.instance_info.name + '_non-affinity-repl', self.instance_info.dbaas_flavor_href, self.instance_info.volume, datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version, nics=self.instance_info.nics, replica_of=self.non_affinity_master_id, replica_count=1).id self.assert_client_code(client, expected_http_code) self.register_debug_inst_ids(self.non_affinity_repl_id) def run_create_multiple_replicas(self, expected_http_code=200): self.assert_replica_create(self.master_id, 'replica2', 2, expected_http_code) def run_wait_for_multiple_replicas( self, expected_states=['BUILD', 'HEALTHY']): replica_ids = self._get_replica_set(self.master_id) self.report.log("Waiting for replicas: %s" % replica_ids) self.assert_instance_action(replica_ids, expected_states) self._assert_is_master(self.master_id, replica_ids) for replica_id in replica_ids: self._assert_is_replica(replica_id, self.master_id) self._assert_locality(self.master_id) def run_wait_for_non_affinity_replica_fail( self, expected_states=['BUILD', 'ERROR']): self._assert_instance_states(self.non_affinity_repl_id, expected_states, fast_fail_status=['HEALTHY']) def run_delete_non_affinity_repl(self, expected_http_code=202): self.assert_delete_instances( self.non_affinity_repl_id, expected_http_code=expected_http_code) def assert_delete_instances(self, instance_ids, expected_http_code): instance_ids = (instance_ids if utils.is_collection(instance_ids) else [instance_ids]) client = self.auth_client for instance_id in instance_ids: client.instances.delete(instance_id) self.assert_client_code(client, expected_http_code) def run_wait_for_delete_non_affinity_repl( self, expected_last_status=['SHUTDOWN']): self.assert_all_gone([self.non_affinity_repl_id], expected_last_status=expected_last_status) def run_delete_non_affinity_master(self, expected_http_code=202): self.assert_delete_instances( self.non_affinity_master_id, expected_http_code=expected_http_code) def run_wait_for_delete_non_affinity_master( self, expected_last_status=['SHUTDOWN']): self.assert_all_gone([self.non_affinity_master_id], expected_last_status=expected_last_status) self.assert_server_group_gone(self.non_affinity_srv_grp_id) def run_add_data_to_replicate(self): self.assert_add_replication_data(DataType.tiny, self.master_host) def run_verify_data_to_replicate(self): self.assert_verify_replication_data(DataType.tiny, self.master_host) def run_verify_replica_data_orig(self): self.assert_verify_replica_data(self.instance_info.id, DataType.small) def assert_verify_replica_data(self, master_id, data_type): replica_ids = self._get_replica_set(master_id) for replica_id in replica_ids: host = self.get_instance_host(replica_id) self.report.log("Checking data on host %s" % host) self.assert_verify_replication_data(data_type, host) def run_verify_replica_data_after_single(self): self.assert_verify_replica_data(self.instance_info.id, DataType.micro) def run_verify_replica_data_new(self): self.assert_verify_replica_data(self.instance_info.id, DataType.tiny) def run_promote_master(self, expected_exception=exceptions.BadRequest, expected_http_code=400): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.instances.promote_to_replica_source, self.instance_info.id) def run_eject_replica(self, expected_exception=exceptions.BadRequest, expected_http_code=400): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.instances.eject_replica_source, self.replica_1_id) def run_eject_valid_master(self, expected_exception=exceptions.BadRequest, expected_http_code=400): # client = self.auth_client # self.assert_raises( # expected_exception, expected_http_code, # client, client.instances.eject_replica_source, # self.instance_info.id) # Uncomment once BUG_EJECT_VALID_MASTER is fixed raise SkipKnownBug(runners.BUG_EJECT_VALID_MASTER) def run_delete_valid_master(self, expected_exception=exceptions.Forbidden, expected_http_code=403): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.instances.delete, self.instance_info.id) def run_promote_to_replica_source(self, expected_states=['PROMOTE', 'HEALTHY'], expected_http_code=202): self.assert_promote_to_replica_source( self.replica_1_id, self.instance_info.id, expected_states, expected_http_code) def assert_promote_to_replica_source( self, new_master_id, old_master_id, expected_states, expected_http_code): original_replica_ids = self._get_replica_set(old_master_id) other_replica_ids = list(original_replica_ids) other_replica_ids.remove(new_master_id) # Promote replica self.assert_replica_promote(new_master_id, expected_states, expected_http_code) current_replica_ids = list(other_replica_ids) current_replica_ids.append(old_master_id) self._assert_is_master(new_master_id, current_replica_ids) self._assert_is_replica(old_master_id, new_master_id) def assert_replica_promote( self, new_master_id, expected_states, expected_http_code): client = self.auth_client client.instances.promote_to_replica_source(new_master_id) self.assert_client_code(client, expected_http_code) self.assert_instance_action(new_master_id, expected_states) def run_verify_replica_data_new_master(self): self.assert_verify_replication_data( DataType.small, self.replica_1_host) self.assert_verify_replication_data( DataType.tiny, self.replica_1_host) def run_add_data_to_replicate2(self): self.assert_add_replication_data(DataType.tiny2, self.replica_1_host) def run_verify_data_to_replicate2(self): self.assert_verify_replication_data(DataType.tiny2, self.replica_1_host) def run_verify_replica_data_new2(self): self.assert_verify_replica_data(self.replica_1_id, DataType.tiny2) def run_promote_original_source(self, expected_states=['PROMOTE', 'HEALTHY'], expected_http_code=202): self.assert_promote_to_replica_source( self.instance_info.id, self.replica_1_id, expected_states, expected_http_code) def run_add_final_data_to_replicate(self): self.assert_add_replication_data(DataType.tiny3, self.master_host) def run_verify_data_to_replicate_final(self): self.assert_verify_replication_data(DataType.tiny3, self.master_host) def run_verify_final_data_replicated(self): self.assert_verify_replica_data(self.master_id, DataType.tiny3) def run_remove_replicated_data(self): self.assert_remove_replicated_data(self.master_host) def assert_remove_replicated_data(self, host): """In order for this to work, the corresponding datastore 'helper' class should implement the 'remove_actual_data' method. """ for data_set in self.used_data_sets: self.report.log("Removing replicated data set: %s" % data_set) self.test_helper.remove_data(data_set, host) def run_detach_replica_from_source(self, expected_states=['DETACH', 'HEALTHY'], expected_http_code=202): self.assert_detach_replica_from_source( self.instance_info.id, self.replica_1_id, expected_states, expected_http_code) def assert_detach_replica_from_source( self, master_id, replica_id, expected_states, expected_http_code): other_replica_ids = self._get_replica_set(master_id) other_replica_ids.remove(replica_id) self.assert_detach_replica( replica_id, expected_states, expected_http_code) self._assert_is_master(master_id, other_replica_ids) self._assert_is_not_replica(replica_id) def assert_detach_replica( self, replica_id, expected_states, expected_http_code): client = self.auth_client client.instances.edit(replica_id, detach_replica_source=True) self.assert_client_code(client, expected_http_code) self.assert_instance_action(replica_id, expected_states) def _assert_is_not_replica(self, instance_id): client = self.admin_client instance = self.get_instance(instance_id, client=client) self.assert_client_code(client, 200) if 'replica_of' not in instance._info: try: self._validate_replica(instance_id) self.fail("The instance is still configured as a replica " "after detached: %s" % instance_id) except AssertionError: pass else: self.fail("Unexpected replica_of ID.") def run_delete_detached_replica(self, expected_http_code=202): self.assert_delete_instances( self.replica_1_id, expected_http_code=expected_http_code) def run_delete_all_replicas(self, expected_http_code=202): self.assert_delete_all_replicas( self.instance_info.id, expected_http_code) def assert_delete_all_replicas( self, master_id, expected_http_code): self.report.log("Deleting a replication set: %s" % master_id) replica_ids = self._get_replica_set(master_id) self.assert_delete_instances(replica_ids, expected_http_code) def run_wait_for_delete_replicas( self, expected_last_status=['SHUTDOWN']): replica_ids = self._get_replica_set(self.master_id) replica_ids.add(self.replica_1_id) self.assert_all_gone(replica_ids, expected_last_status=expected_last_status) def run_test_backup_deleted(self): backup = self.auth_client.instances.backups(self.master_id) self.assert_equal(self.master_backup_count, len(backup)) def run_cleanup_master_instance(self): pass def _validate_master(self, instance_id): """This method is intended to be overridden by each datastore as needed. It is to be used for any database specific master instance validation. """ pass def _validate_replica(self, instance_id): """This method is intended to be overridden by each datastore as needed. It is to be used for any database specific replica instance validation. """ pass class MysqlReplicationRunner(ReplicationRunner): def run_cleanup_master_instance(self): for user in self.auth_client.users.list(self.master_id): if user.name.startswith("slave_"): self.auth_client.users.delete(self.master_id, user.name, user.host) def _validate_master(self, instance_id): """For Mysql validate that the master has its binlog_format set to MIXED. """ host = self.get_instance_host(instance_id) self._validate_binlog_fmt(instance_id, host) def _validate_replica(self, instance_id): """For Mysql validate that any replica has its binlog_format set to MIXED and it is in read_only mode. """ host = self.get_instance_host(instance_id) self._validate_binlog_fmt(instance_id, host) self._validate_read_only(instance_id, host) def _validate_binlog_fmt(self, instance_id, host): binlog_fmt = self.test_helper.get_configuration_value('binlog_format', host) self.assert_equal(self._get_expected_binlog_format(), binlog_fmt, 'Wrong binlog format detected for %s' % instance_id) def _get_expected_binlog_format(self): return 'MIXED' def _validate_read_only(self, instance_id, host): read_only = self.test_helper.get_configuration_value('read_only', host) self.assert_equal('ON', read_only, 'Wrong read only mode detected ' 'for %s' % instance_id) class PerconaReplicationRunner(MysqlReplicationRunner): pass class MariadbReplicationRunner(MysqlReplicationRunner): def _get_expected_binlog_format(self): return 'STATEMENT' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/root_actions_runners.py0000644000175000017500000002374700000000000026615 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from proboscis import SkipTest from trove.common import utils from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class RootActionsRunner(TestRunner): def __init__(self): self.current_root_creds = None self.restored_root_creds = None self.restored_root_creds2 = None super(RootActionsRunner, self).__init__() def run_check_root_never_enabled(self, expected_http_code=200): self.assert_root_disabled(self.instance_info.id, expected_http_code) def assert_root_disabled(self, instance_id, expected_http_code): self._assert_root_state(instance_id, False, expected_http_code, "The root has already been enabled on the " "instance.") def _assert_root_state(self, instance_id, expected_state, expected_http_code, message): # The call returns a nameless user object with 'rootEnabled' attribute. client = self.auth_client response = client.root.is_root_enabled(instance_id) self.assert_client_code(client, expected_http_code) actual_state = getattr(response, 'rootEnabled', None) self.assert_equal(expected_state, actual_state, message) def run_disable_root_before_enabled( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_root_disable_failure( self.instance_info.id, expected_exception, expected_http_code) def assert_root_disable_failure(self, instance_id, expected_exception, expected_http_code): client = self.auth_client self.assert_raises(expected_exception, expected_http_code, client, client.root.delete, instance_id) def run_enable_root_no_password(self, expected_http_code=200): root_credentials = self.test_helper.get_helper_credentials_root() self.current_root_creds = self.assert_root_create( self.instance_info.id, None, root_credentials['name'], expected_http_code) self.restored_root_creds = list(self.current_root_creds) def assert_root_create(self, instance_id, root_password, expected_root_name, expected_http_code): client = self.auth_client if root_password is not None: root_creds = client.root.create_instance_root( instance_id, root_password) self.assert_equal(root_password, root_creds[1]) else: root_creds = client.root.create(instance_id) self.assert_client_code(client, expected_http_code) if expected_root_name is not None: self.assert_equal(expected_root_name, root_creds[0]) self.assert_can_connect(instance_id, root_creds) return root_creds def assert_can_connect(self, instance_id, test_connect_creds): self._assert_connect(instance_id, True, test_connect_creds) def _assert_connect(self, instance_id, expected_response, test_connect_creds): host = self.get_instance_host(instance_id=instance_id) self.report.log( "Pinging instance %s with credentials: %s, database: %s" % (instance_id, test_connect_creds, self.test_helper.credentials.get("database")) ) ping_response = self.test_helper.ping( host, username=test_connect_creds[0], password=test_connect_creds[1], database=self.test_helper.credentials.get("database") ) self.assert_equal(expected_response, ping_response) def run_check_root_enabled(self, expected_http_code=200): self.assert_root_enabled(self.instance_info.id, expected_http_code) def assert_root_enabled(self, instance_id, expected_http_code): self._assert_root_state(instance_id, True, expected_http_code, "The root has not been enabled on the " "instance yet.") def run_enable_root_with_password(self, expected_http_code=200): root_credentials = self.test_helper.get_helper_credentials_root() password = root_credentials['password'] if password is not None: self.current_root_creds = self.assert_root_create( self.instance_info.id, password, root_credentials['name'], expected_http_code) else: raise SkipTest("No valid root password defined in %s." % self.test_helper.get_class_name()) def run_disable_root(self, expected_http_code=204): self.restored_root_creds2 = list(self.current_root_creds) self.assert_root_disable(self.instance_info.id, expected_http_code) def assert_root_disable(self, instance_id, expected_http_code): client = self.auth_client client.root.delete(instance_id) self.assert_client_code(client, expected_http_code) self.assert_cannot_connect(self.instance_info.id, self.current_root_creds) def assert_cannot_connect(self, instance_id, test_connect_creds): self._assert_connect(instance_id, False, test_connect_creds) def run_check_root_still_enabled_after_disable( self, expected_http_code=200): self.assert_root_enabled(self.instance_info.id, expected_http_code) def run_delete_root(self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_root_delete_failure( self.instance_info.id, expected_exception, expected_http_code) def assert_root_delete_failure(self, instance_id, expected_exception, expected_http_code): root_user_name = self.current_root_creds[0] client = self.auth_client self.assert_raises(expected_exception, expected_http_code, client, client.users.delete, instance_id, root_user_name) def run_check_root_enabled_after_restore( self, restored_instance_id, restored_creds, expected_http_code=200): self.assert_root_enabled_after_restore( restored_instance_id, restored_creds, True, expected_http_code) def run_check_root_enabled_after_restore2( self, restored_instance_id, restored_creds, expected_http_code=200): self.assert_root_enabled_after_restore( restored_instance_id, restored_creds, False, expected_http_code) def assert_root_enabled_after_restore( self, restored_instance_id, restored_creds, expected_connect_response, expected_http_code): if restored_instance_id: self.assert_root_enabled(restored_instance_id, expected_http_code) self._assert_connect(restored_instance_id, expected_connect_response, restored_creds) else: raise SkipTest("No restored instance.") def check_root_disable_supported(self): """Throw SkipTest if root-disable is not supported.""" pass def check_inherit_root_state_supported(self): """Throw SkipTest if inherting root state is not supported.""" pass class PerconaRootActionsRunner(RootActionsRunner): def check_root_disable_supported(self): raise SkipTest("Operation is currently not supported.") class MariadbRootActionsRunner(RootActionsRunner): def check_root_disable_supported(self): raise SkipTest("Operation is currently not supported.") class PxcRootActionsRunner(RootActionsRunner): def check_root_disable_supported(self): raise SkipTest("Operation is currently not supported.") class PostgresqlRootActionsRunner(RootActionsRunner): def check_root_disable_supported(self): raise SkipTest("Operation is currently not supported.") def run_enable_root_with_password(self): raise SkipTest("Operation is currently not supported.") def run_delete_root(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) class CouchbaseRootActionsRunner(RootActionsRunner): def _assert_connect( self, instance_id, expected_response, test_connect_creds): host = self.get_instance_host(instance_id=instance_id) self.report.log("Pinging instance %s with credentials: %s" % (instance_id, test_connect_creds)) mgmt_port = 8091 mgmt_creds = '%s:%s' % (test_connect_creds[0], test_connect_creds[1]) rest_endpoint = ('http://%s:%d/pools/nodes' % (host, mgmt_port)) out, err = utils.execute_with_timeout( 'curl', '-u', mgmt_creds, rest_endpoint) self.assert_equal(expected_response, out and len(out) > 0) def check_root_disable_supported(self): raise SkipTest("Operation is currently not supported.") def run_enable_root_with_password(self): raise SkipTest("Operation is currently not supported.") def run_delete_root(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) class RedisRootActionsRunner(RootActionsRunner): def check_inherit_root_state_supported(self): raise SkipTest("Redis instances does not inherit root state " "from backups.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/test_runners.py0000644000175000017500000012332500000000000025062 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import inspect import json import netaddr import os import proboscis import six import sys import time as timer import types from oslo_config.cfg import NoSuchOptError from proboscis import asserts import swiftclient from troveclient.compat import exceptions from trove.common import cfg from trove.common import exception from trove.common.strategies.strategy import Strategy from trove.common import timeutils from trove.common import utils from trove.common.utils import poll_until, build_polling_task from trove.tests.config import CONFIG from trove.tests import util as test_util from trove.tests.util.check import AttrCheck from trove.tests.util import create_dbaas_client from trove.tests.util.users import Requirements CONF = cfg.CONF TEST_RUNNERS_NS = 'trove.tests.scenario.runners' TEST_HELPERS_NS = 'trove.tests.scenario.helpers' TEST_HELPER_MODULE_NAME = 'test_helper' TEST_HELPER_BASE_NAME = 'TestHelper' class SkipKnownBug(proboscis.SkipTest): """Skip test failures due to known bug(s). These should get fixed sometime in the future. """ def __init__(self, *bugs): """ :param bugs: One or more bug references (e.g. link, bug #). """ bug_ref = '; '.join(map(str, bugs)) super(SkipKnownBug, self).__init__("Known bug: %s" % bug_ref) class RunnerFactory(object): _test_runner = None _runner_ns = None _runner_cls = None @classmethod def instance(cls): """Returns the current instance of the runner, or creates a new one if none exists. This is useful to have multiple 'group' classes use the same runner so that state is maintained. """ if not cls._test_runner: cls._test_runner = cls.create() return cls._test_runner @classmethod def create(cls): """Returns a new instance of the runner. Tests that require a 'fresh' runner (typically from a different 'group') can call this. """ return cls._get_runner(cls._runner_ns, cls._runner_cls) @classmethod def _get_runner(cls, runner_module_name, runner_base_name, *args, **kwargs): class_prefix = cls._get_test_datastore() runner_cls = cls._load_dynamic_class( runner_module_name, class_prefix, runner_base_name, TEST_RUNNERS_NS) runner = runner_cls(*args, **kwargs) runner._test_helper = cls._get_helper(runner.report) return runner @classmethod def _get_helper(cls, report): class_prefix = cls._get_test_datastore() helper_cls = cls._load_dynamic_class( TEST_HELPER_MODULE_NAME, class_prefix, TEST_HELPER_BASE_NAME, TEST_HELPERS_NS) return helper_cls( cls._build_class_name(class_prefix, TEST_HELPER_BASE_NAME, strip_test=True), report) @classmethod def _get_test_datastore(cls): return CONFIG.dbaas_datastore @classmethod def _load_dynamic_class(cls, module_name, class_prefix, base_name, namespace): """Try to load a datastore specific class if it exists; use the default otherwise. """ # This is for overridden Runner classes impl = cls._build_class_path(module_name, class_prefix, base_name) clazz = cls._load_class('runner', impl, namespace) if not clazz: # This is for overridden Helper classes module = module_name.replace('test', class_prefix.lower()) impl = cls._build_class_path( module, class_prefix, base_name, strip_test=True) clazz = cls._load_class('helper', impl, namespace) if not clazz: # Just import the base class impl = cls._build_class_path(module_name, '', base_name) clazz = cls._load_class(None, impl, namespace) return clazz @classmethod def _load_class(cls, load_type, impl, namespace): clazz = None if not load_type or load_type in impl.lower(): try: clazz = Strategy.get_strategy(impl, namespace) except ImportError as ie: # Only fail silently if it's something we expect, # such as a missing override class. Anything else # shouldn't be suppressed. l_msg = str(ie).lower() if (load_type and load_type not in l_msg) or ( 'no module named' not in l_msg and 'cannot be found' not in l_msg): raise return clazz @classmethod def _build_class_path(cls, module_name, class_prefix, class_base, strip_test=False): class_name = cls._build_class_name( class_prefix, class_base, strip_test) return '%s.%s' % (module_name, class_name) @classmethod def _build_class_name(cls, class_prefix, base_name, strip_test=False): base = (base_name.replace('Test', '') if strip_test else base_name) return '%s%s' % (class_prefix.capitalize(), base) class InstanceTestInfo(object): """Stores new instance information used by dependent tests.""" def __init__(self): self.id = None # The ID of the instance in the database. self.name = None # Test name, generated each test run. self.dbaas_flavor_href = None # The flavor of the instance. self.dbaas_datastore = None # The datastore id self.dbaas_datastore_version = None # The datastore version id self.volume_size = None # The size of volume the instance will have. self.volume = None # The volume the instance will have. self.nics = None # The dict of type/id for nics used on the intance. self.user = None # The user instance who owns the instance. self.users = None # The users created on the instance. self.databases = None # The databases created on the instance. self.helper_user = None # Test helper user if exists. self.helper_database = None # Test helper database if exists. self.admin_user = None class LogOnFail(type): """Class to log info on failure. This will decorate all methods that start with 'run_' with a log wrapper that will do a show and attempt to pull back the guest log on all registered IDs. Use by setting up as a metaclass and calling the following: add_inst_ids(): Instance ID or list of IDs to report on set_client(): Admin client object set_report(): Report object The TestRunner class shows how this can be done in register_debug_inst_ids. """ _data = {} def __new__(mcs, name, bases, attrs): for attr_name, attr_value in attrs.items(): if (isinstance(attr_value, types.FunctionType) and attr_name.startswith('run_')): attrs[attr_name] = mcs.log(attr_value) return super(LogOnFail, mcs).__new__(mcs, name, bases, attrs) @classmethod def get_inst_ids(mcs): return set(mcs._data.get('inst_ids', [])) @classmethod def add_inst_ids(mcs, inst_ids): if not utils.is_collection(inst_ids): inst_ids = [inst_ids] debug_inst_ids = mcs.get_inst_ids() debug_inst_ids |= set(inst_ids) mcs._data['inst_ids'] = debug_inst_ids @classmethod def reset_inst_ids(mcs): mcs._data['inst_ids'] = [] @classmethod def set_client(mcs, client): mcs._data['client'] = client @classmethod def get_client(mcs): return mcs._data['client'] @classmethod def set_report(mcs, report): mcs._data['report'] = report @classmethod def get_report(mcs): return mcs._data['report'] @classmethod def log(mcs, fn): def wrapper(*args, **kwargs): inst_ids = mcs.get_inst_ids() client = mcs.get_client() report = mcs.get_report() try: return fn(*args, **kwargs) except proboscis.SkipTest: raise except Exception: (extype, exvalue, extb) = sys.exc_info() msg_prefix = "*** LogOnFail: " if inst_ids: report.log(msg_prefix + "Exception detected, " "dumping info for IDs: %s." % inst_ids) else: report.log(msg_prefix + "Exception detected, " "but no instance IDs are registered to log.") if CONFIG.instance_log_on_failure: for inst_id in inst_ids: try: client.instances.get(inst_id) except Exception as ex: report.log("%s Error in instance show for %s:\n%s" % (msg_prefix, inst_id, ex)) try: client.instances.log_action(inst_id, 'guest', publish=True) log_gen = client.instances.log_generator( inst_id, 'guest', lines=0, swift=None) log_contents = "".join( [chunk for chunk in log_gen()]) report.log("%s Guest log for %s:\n%s" % (msg_prefix, inst_id, log_contents)) except Exception as ex: report.log("%s Error in guest log retrieval for " "%s:\n%s" % (msg_prefix, inst_id, ex)) # Only report on the first error that occurs mcs.reset_inst_ids() six.reraise(extype, exvalue, extb) return wrapper @six.add_metaclass(LogOnFail) class TestRunner(object): """ Base class for all 'Runner' classes. The Runner classes are those that actually do the work. The 'Group' classes are set up with decorators that control how the tests flow, and are used to organized the tests - however they are typically set up to just call a corresponding method in a Runner class. A Runner class can be overridden if a particular set of tests needs to have DataStore specific coding. The corresponding Group class will try to first load a DataStore specific class, and then fall back to the generic one if need be. For example, the NegativeClusterActionsGroup class specifies a runner_base_name of NegativeClusterActionsRunner. If the manager of the default datastore is mongodb, then the MongodbNegativeClusterActionsRunner is used instead. The prefix is created by capitalizing the name of the manager - overriding classes *must* follow this naming convention to be automatically used. The main assumption made here is that if a manager is used for different datastore versions, then the overriding runner should also be valid for the same datastore versions. """ USE_INSTANCE_ID_FLAG = 'TESTS_USE_INSTANCE_ID' DO_NOT_DELETE_INSTANCE_FLAG = 'TESTS_DO_NOT_DELETE_INSTANCE' VOLUME_SUPPORT = CONFIG.get('trove_volume_support', True) EPHEMERAL_SUPPORT = not VOLUME_SUPPORT and CONFIG.get('device_path', None) ROOT_PARTITION = not (VOLUME_SUPPORT or CONFIG.get('device_path', None)) GUEST_CAST_WAIT_TIMEOUT_SEC = 60 # Here's where the info for the 'main' test instance goes instance_info = InstanceTestInfo() report = CONFIG.get_report() def __init__(self, sleep_time=10, timeout=1200): self.def_sleep_time = sleep_time self.def_timeout = timeout self.instance_info.name = "TEST_" + datetime.datetime.strftime( timeutils.utcnow(), '%Y_%m_%d__%H_%M_%S') self.instance_info.dbaas_datastore = CONFIG.dbaas_datastore self.instance_info.dbaas_datastore_version = ( CONFIG.dbaas_datastore_version) self.instance_info.user = CONFIG.users.find_user_by_name("alt_demo") self.instance_info.admin_user = CONFIG.users.find_user( Requirements(is_admin=True) ) if self.VOLUME_SUPPORT: self.instance_info.volume_size = CONFIG.get('trove_volume_size', 1) self.instance_info.volume = { 'size': self.instance_info.volume_size} else: self.instance_info.volume_size = None self.instance_info.volume = None self.instance_info.nics = None shared_network = CONFIG.get('shared_network', None) if shared_network: self.instance_info.nics = [{'net-id': shared_network}] self._auth_client = None self._unauth_client = None self._admin_client = None self._swift_client = None self._nova_client = None self._neutron_client = None self._test_helper = None self._servers = {} # Attempt to register the main instance. If it doesn't # exist, this will still set the 'report' and 'client' objects # correctly in LogOnFail inst_ids = [] if hasattr(self.instance_info, 'id') and self.instance_info.id: inst_ids = [self.instance_info.id] self.register_debug_inst_ids(inst_ids) @classmethod def fail(cls, message): asserts.fail(message) @classmethod def assert_is_sublist(cls, sub_list, full_list, message=None): if not message: message = 'Unexpected sublist' try: message += ": sub_list '%s' (full_list '%s')." % ( sub_list, full_list) except TypeError: pass return cls.assert_true(set(sub_list).issubset(full_list), message) @classmethod def assert_unique(cls, iterable, message=None): """Assert that a given iterable contains only unique elements. """ cls.assert_equal(len(iterable), len(set(iterable)), message) @classmethod def assert_true(cls, condition, message=None): asserts.assert_true(condition, message=message) @classmethod def assert_false(cls, condition, message=None): asserts.assert_false(condition, message=message) @classmethod def assert_is_none(cls, value, message=None): asserts.assert_is_none(value, message=message) @classmethod def assert_is_not_none(cls, value, message=None): asserts.assert_is_not_none(value, message=message) @classmethod def assert_list_elements_equal(cls, expected, actual, message=None): """Assert that two lists contain same elements (with same multiplicities) ignoring the element order. """ # Sorts the elements of a given list, including dictionaries. # For dictionaries sorts based on dictionary key. # example: # [1, 3, 2] -> [1, 2, 3] # ["b", "a", "c"] -> ["a", "b", "c"] # [{'b':'y'},{'a':'x'}] -> [{'a':'x'},{'b':'y'}] sort = lambda object: sorted(object, key=lambda e: sorted(e.keys()) if isinstance(e, dict) else e) return cls.assert_equal(sort(expected), sort(actual), message) @classmethod def assert_equal(cls, expected, actual, message=None): if not message: message = 'Unexpected value' try: message += ": '%s' (expected '%s')." % (actual, expected) except TypeError: pass asserts.assert_equal(expected, actual, message=message) @classmethod def assert_not_equal(cls, expected, actual, message=None): if not message: message = 'Expected different value than' try: message += ": '%s'." % expected except TypeError: pass asserts.assert_not_equal(expected, actual, message=message) @property def test_helper(self): return self._test_helper @test_helper.setter def test_helper(self, test_helper): self._test_helper = test_helper @property def auth_client(self): return self._create_authorized_client() def _create_authorized_client(self): """Create a client from the normal 'authorized' user.""" return create_dbaas_client(self.instance_info.user) @property def unauth_client(self): return self._create_unauthorized_client() def _create_unauthorized_client(self): """Create a client from a different 'unauthorized' user to facilitate negative testing. """ requirements = Requirements(is_admin=False) other_user = CONFIG.users.find_user( requirements, black_list=[self.instance_info.user.auth_user]) return create_dbaas_client(other_user) @property def admin_client(self): return self._create_admin_client() def _create_admin_client(self): """Create a client from an admin user.""" requirements = Requirements(is_admin=True, services=["trove"]) admin_user = CONFIG.users.find_user(requirements) return create_dbaas_client(admin_user) @property def swift_client(self): return self._create_swift_client(admin=False) @property def admin_swift_client(self): return self._create_swift_client(admin=True) def _create_swift_client(self, admin=True): requirements = Requirements(is_admin=admin, services=["swift"]) user = CONFIG.users.find_user(requirements) os_options = {'region_name': CONFIG.trove_client_region_name} return swiftclient.client.Connection( authurl=CONFIG.auth_url, user=user.auth_user, key=user.auth_key, tenant_name=user.tenant, auth_version='3.0', os_options=os_options) @property def nova_client(self): if self._nova_client is None: self._nova_client = test_util.create_nova_client( self.instance_info.admin_user ) return self._nova_client @property def neutron_client(self): if self._neutron_client is None: self._neutron_client = test_util.create_neutron_client( self.instance_info.admin_user ) return self._neutron_client def register_debug_inst_ids(self, inst_ids): """Method to 'register' an instance ID (or list of instance IDs) for debug purposes on failure. Note that values are only appended here, not overridden. The LogOnFail class will handle 'missing' IDs. """ LogOnFail.add_inst_ids(inst_ids) LogOnFail.set_client(self.admin_client) LogOnFail.set_report(self.report) def get_client_tenant(self, client): tenant_name = client.real_client.client.tenant service_url = client.real_client.client.service_url su_parts = service_url.split('/') tenant_id = su_parts[-1] return tenant_name, tenant_id def assert_raises(self, expected_exception, expected_http_code, client, client_cmd, *cmd_args, **cmd_kwargs): if client: # Make sure that the client_cmd comes from the same client that # was passed in, otherwise asserting the client code may fail. cmd_clz = client_cmd.__self__ cmd_clz_name = cmd_clz.__class__.__name__ client_attrs = [attr[0] for attr in inspect.getmembers( client.real_client) if '__' not in attr[0]] match = [getattr(client, a) for a in client_attrs if getattr(client, a).__class__.__name__ == cmd_clz_name] self.assert_true(any(match), "Could not find method class in client: %s" % client_attrs) self.assert_equal( match[0], cmd_clz, "Test error: client_cmd must be from client obj") asserts.assert_raises(expected_exception, client_cmd, *cmd_args, **cmd_kwargs) self.assert_client_code(client, expected_http_code) def get_datastore_config_property(self, name, datastore=None): """Get a Trove configuration property for a given datastore. Use the current instance's datastore if None. """ try: datastore = datastore or self.instance_info.dbaas_datastore return CONF.get(datastore).get(name) except NoSuchOptError: return CONF.get(name) @property def is_using_existing_instance(self): return TestRunner.using_existing_instance() @staticmethod def using_existing_instance(): return TestRunner.has_env_flag(TestRunner.USE_INSTANCE_ID_FLAG) @staticmethod def has_env_flag(flag_name): """Return whether a given flag was set.""" return os.environ.get(flag_name, None) is not None def get_existing_instance(self): if self.is_using_existing_instance: instance_id = os.environ.get(self.USE_INSTANCE_ID_FLAG) return self.get_instance(instance_id) return None @property def has_do_not_delete_instance(self): return self.has_env_flag(self.DO_NOT_DELETE_INSTANCE_FLAG) def assert_instance_action(self, instance_ids, expected_states): if expected_states: self.assert_all_instance_states( instance_ids if utils.is_collection(instance_ids) else [instance_ids], expected_states) def assert_client_code(self, client, expected_http_code): if client and expected_http_code is not None: self.assert_equal(expected_http_code, client.last_http_code, "Unexpected client status code") def assert_all_instance_states(self, instance_ids, expected_states, fast_fail_status=None, require_all_states=False): self.report.log("Waiting for states (%s) for instances: %s" % (expected_states, instance_ids)) def _make_fn(inst_id): return lambda: self._assert_instance_states( inst_id, expected_states, fast_fail_status=fast_fail_status, require_all_states=require_all_states) tasks = [ build_polling_task( _make_fn(instance_id), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for instance_id in instance_ids] poll_until(lambda: all(poll_task.ready() for poll_task in tasks), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for task in tasks: if task.has_result(): self.assert_true( task.poll_result(), "Some instances failed to acquire all expected states.") elif task.has_exception(): self.fail(str(task.poll_exception())) def _assert_instance_states(self, instance_id, expected_states, fast_fail_status=None, require_all_states=False): """Keep polling for the expected instance states until the instance acquires either the last or fast-fail state. If the instance state does not match the state expected at the time of polling (and 'require_all_states' is not set) the code assumes the instance had already acquired before and moves to the next expected state. """ self.report.log("Waiting for states (%s) for instance: %s" % (expected_states, instance_id)) if fast_fail_status is None: fast_fail_status = ['ERROR', 'FAILED'] found = False for status in expected_states: found_current = self._has_status( instance_id, status, fast_fail_status=fast_fail_status) if require_all_states or found or found_current: found = True start_time = timer.time() try: if not found_current: poll_until(lambda: self._has_status( instance_id, status, fast_fail_status=fast_fail_status), sleep_time=self.def_sleep_time, time_out=self.def_timeout) self.report.log("Instance '%s' has gone '%s' in %s." % (instance_id, status, self._time_since(start_time))) except exception.PollTimeOut: self.report.log( "Status of instance '%s' did not change to '%s' " "after %s." % (instance_id, status, self._time_since(start_time))) return False else: self.report.log( "Instance state was not '%s', moving to the next expected " "state." % status) return found def _time_since(self, start_time): return '%.1fs' % (timer.time() - start_time) def assert_all_gone(self, instance_ids, expected_last_status): self._wait_all_deleted(instance_ids if utils.is_collection(instance_ids) else [instance_ids], expected_last_status) def assert_pagination_match( self, list_page, full_list, start_idx, end_idx): self.assert_equal(full_list[start_idx:end_idx], list(list_page), "List page does not match the expected full " "list section.") def _wait_all_deleted(self, instance_ids, expected_last_status): self.report.log("Waiting for instances to be gone: %s (status %s)" % (instance_ids, expected_last_status)) def _make_fn(inst_id): return lambda: self._wait_for_delete(inst_id, expected_last_status) tasks = [ build_polling_task( _make_fn(instance_id), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for instance_id in instance_ids] poll_until(lambda: all(poll_task.ready() for poll_task in tasks), sleep_time=self.def_sleep_time, time_out=self.def_timeout) for task in tasks: if task.has_result(): self.assert_true( task.poll_result(), "Some instances were not removed.") elif task.has_exception(): self.fail(str(task.poll_exception())) def _wait_for_delete(self, instance_id, expected_last_status): self.report.log("Waiting for instance to be gone: %s (status %s)" % (instance_id, expected_last_status)) start_time = timer.time() try: self._poll_while(instance_id, expected_last_status, sleep_time=self.def_sleep_time, time_out=self.def_timeout) except exceptions.NotFound: self.report.log("Instance was removed in %s." % self._time_since(start_time)) return True except exception.PollTimeOut: self.report.log( "Instance '%s' still existed after %s." % (instance_id, self._time_since(start_time))) return False def _poll_while(self, instance_id, expected_status, sleep_time=1, time_out=0): poll_until(lambda: not self._has_status(instance_id, expected_status), sleep_time=sleep_time, time_out=time_out) def _has_status(self, instance_id, status, fast_fail_status=None): fast_fail_status = fast_fail_status or [] instance = self.get_instance(instance_id, self.admin_client) self.report.log("Polling instance '%s' for state '%s', was '%s'." % (instance_id, status, instance.status)) if instance.status in fast_fail_status: raise RuntimeError("Instance '%s' acquired a fast-fail status: %s" % (instance_id, instance.status)) return instance.status == status def get_server(self, instance_id): server = None if instance_id in self._servers: server = self._servers[instance_id] else: instance = self.get_instance(instance_id) self.report.log("Getting server for instance: %s" % instance) for nova_server in self.nova_client.servers.list(): if str(nova_server.name) == instance.name: server = nova_server break if server: self._servers[instance_id] = server return server def assert_server_group_exists(self, instance_id): """Check that the Nova instance associated with instance_id belongs to a server group, and return the id. """ server = self.get_server(instance_id) self.assert_is_not_none(server, "Could not find Nova server for '%s'" % instance_id) server_group = None server_groups = self.nova_client.server_groups.list() for sg in server_groups: if server.id in sg.members: server_group = sg break if server_group is None: self.fail("Could not find server group for Nova instance %s" % server.id) return server_group.id def assert_server_group_gone(self, srv_grp_id): """Ensure that the server group is no longer present.""" server_group = None server_groups = self.nova_client.server_groups.list() for sg in server_groups: if sg.id == srv_grp_id: server_group = sg break if server_group: self.fail("Found left-over server group: %s" % server_group) def get_instance(self, instance_id, client=None): client = client or self.admin_client return client.instances.get(instance_id) def extract_ipv4s(self, ips): ipv4s = [str(ip) for ip in ips if netaddr.valid_ipv4(ip)] if not ipv4s: self.fail("No IPV4 ip found") return ipv4s def get_instance_host(self, instance_id=None): instance_id = instance_id or self.instance_info.id instance = self.get_instance(instance_id) if 'ip' not in instance._info: self.fail('Instance %s with status %s does not have an IP' ' address.' % (instance_id, instance._info['status'])) host = self.extract_ipv4s(instance._info['ip'])[0] self.report.log("Found host %s for instance %s." % (host, instance_id)) return host def build_flavor(self, flavor_id=2, volume_size=1): return {"flavorRef": flavor_id, "volume": {"size": volume_size}} def get_flavor(self, flavor_name): flavors = self.auth_client.find_flavors_by_name(flavor_name) self.assert_equal( 1, len(flavors), "Unexpected number of flavors with name '%s' found." % flavor_name) return flavors[0] def get_instance_flavor(self, fault_num=None): name_format = 'instance%s%s_flavor_name' default = 'm1.tiny' fault_str = '' eph_str = '' if fault_num: fault_str = '_fault_%d' % fault_num if self.EPHEMERAL_SUPPORT: eph_str = '_eph' default = 'eph.rd-tiny' name = name_format % (fault_str, eph_str) flavor_name = CONFIG.values.get(name, default) return self.get_flavor(flavor_name) def get_flavor_href(self, flavor): return self.auth_client.find_flavor_self_href(flavor) def copy_dict(self, d, ignored_keys=None): return {k: v for k, v in d.items() if not ignored_keys or k not in ignored_keys} def create_test_helper_on_instance(self, instance_id): """Here we add a helper user/database, if any, to a given instance via the Trove API. These are for internal use by the test framework and should not be changed by individual test-cases. """ database_def, user_def, root_def = self.build_helper_defs() client = self.auth_client if database_def: self.report.log( "Creating a helper database '%s' on instance: %s" % (database_def['name'], instance_id)) client.databases.create(instance_id, [database_def]) self.wait_for_database_create(client, instance_id, [database_def]) if user_def: self.report.log( "Creating a helper user '%s:%s' on instance: %s" % (user_def['name'], user_def['password'], instance_id)) client.users.create(instance_id, [user_def]) self.wait_for_user_create(client, instance_id, [user_def]) if root_def: # Not enabling root on a single instance of the cluster here # because we want to test the cluster root enable instead. pass def build_helper_defs(self): """Build helper database and user JSON definitions if credentials are defined by the helper. """ database_def = None def _get_credentials(creds): if creds: username = creds.get('name') if username: password = creds.get('password', '') databases = [] if database_def: databases.append(database_def) return {'name': username, 'password': password, 'databases': databases} return None credentials = self.test_helper.get_helper_credentials() if credentials: database = credentials.get('database') if database: database_def = {'name': database} credentials_root = self.test_helper.get_helper_credentials_root() return (database_def, _get_credentials(credentials), _get_credentials(credentials_root)) def wait_for_user_create(self, client, instance_id, expected_user_defs): expected_user_names = {user_def['name'] for user_def in expected_user_defs} self.report.log("Waiting for all created users to appear in the " "listing: %s" % expected_user_names) def _all_exist(): all_users = self.get_user_names(client, instance_id) return all(usr in all_users for usr in expected_user_names) try: poll_until(_all_exist, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC) self.report.log("All users now exist on the instance.") except exception.PollTimeOut: self.fail("Some users were not created within the poll " "timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC) def get_user_names(self, client, instance_id): full_list = client.users.list(instance_id) return {user.name: user for user in full_list} def wait_for_database_create(self, client, instance_id, expected_database_defs): expected_db_names = {db_def['name'] for db_def in expected_database_defs} self.report.log("Waiting for all created databases to appear in the " "listing: %s" % expected_db_names) def _all_exist(): all_dbs = self.get_db_names(client, instance_id) return all(db in all_dbs for db in expected_db_names) try: poll_until(_all_exist, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC) self.report.log("All databases now exist on the instance.") except exception.PollTimeOut: self.fail("Some databases were not created within the poll " "timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC) def get_db_names(self, client, instance_id): full_list = client.databases.list(instance_id) return {database.name: database for database in full_list} def create_initial_configuration(self, expected_http_code): client = self.auth_client dynamic_config = self.test_helper.get_dynamic_group() non_dynamic_config = self.test_helper.get_non_dynamic_group() values = dynamic_config or non_dynamic_config if values: json_def = json.dumps(values) result = client.configurations.create( 'initial_configuration_for_create_tests', json_def, "Configuration group used by create tests.", datastore=self.instance_info.dbaas_datastore, datastore_version=self.instance_info.dbaas_datastore_version) self.assert_client_code(client, expected_http_code) return (result.id, dynamic_config is None) return (None, False) class CheckInstance(AttrCheck): """Class to check various attributes of Instance details.""" def __init__(self, instance): super(CheckInstance, self).__init__() self.instance = instance self.volume_support = TestRunner.VOLUME_SUPPORT self.existing_instance = TestRunner.is_using_existing_instance def flavor(self): if 'flavor' not in self.instance: self.fail("'flavor' not found in instance.") else: allowed_attrs = ['id', 'links'] self.contains_allowed_attrs( self.instance['flavor'], allowed_attrs, msg="Flavor") self.links(self.instance['flavor']['links']) def datastore(self): if 'datastore' not in self.instance: self.fail("'datastore' not found in instance.") else: allowed_attrs = ['type', 'version'] self.contains_allowed_attrs( self.instance['datastore'], allowed_attrs, msg="datastore") def volume_key_exists(self): if 'volume' not in self.instance: self.fail("'volume' not found in instance.") return False return True def volume(self): if not self.volume_support: return if self.volume_key_exists(): allowed_attrs = ['size'] if self.existing_instance: allowed_attrs.append('used') self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def used_volume(self): if not self.volume_support: return if self.volume_key_exists(): allowed_attrs = ['size', 'used'] print(self.instance) self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def volume_mgmt(self): if not self.volume_support: return if self.volume_key_exists(): allowed_attrs = ['description', 'id', 'name', 'size'] self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volumes") def addresses(self): allowed_attrs = ['addr', 'version'] print(self.instance) networks = ['usernet'] for network in networks: for address in self.instance['addresses'][network]: self.contains_allowed_attrs( address, allowed_attrs, msg="Address") def guest_status(self): allowed_attrs = ['created_at', 'deleted', 'deleted_at', 'instance_id', 'state', 'state_description', 'updated_at'] self.contains_allowed_attrs( self.instance['guest_status'], allowed_attrs, msg="Guest status") def mgmt_volume(self): if not self.volume_support: return allowed_attrs = ['description', 'id', 'name', 'size'] self.contains_allowed_attrs( self.instance['volume'], allowed_attrs, msg="Volume") def replica_of(self): if 'replica_of' not in self.instance: self.fail("'replica_of' not found in instance.") else: allowed_attrs = ['id', 'links'] self.contains_allowed_attrs( self.instance['replica_of'], allowed_attrs, msg="Replica-of links not found") self.links(self.instance['replica_of']['links']) def slaves(self): if 'replicas' not in self.instance: self.fail("'replicas' not found in instance.") else: allowed_attrs = ['id', 'links'] for slave in self.instance['replicas']: self.contains_allowed_attrs( slave, allowed_attrs, msg="Replica links not found") self.links(slave['links']) def fault(self, is_admin=False): if 'fault' not in self.instance: self.fail("'fault' not found in instance.") else: allowed_attrs = ['message', 'created', 'details'] self.contains_allowed_attrs( self.instance['fault'], allowed_attrs, msg="Fault") if is_admin and not self.instance['fault']['details']: self.fail("Missing fault details") if not is_admin and self.instance['fault']['details']: self.fail("Fault details provided for non-admin") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/scenario/runners/user_actions_runners.py0000644000175000017500000005424600000000000026606 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves.urllib import parse as urllib_parse from proboscis import SkipTest from trove.common import exception from trove.common.utils import poll_until from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug from trove.tests.scenario.runners.test_runners import TestRunner from troveclient.compat import exceptions class UserActionsRunner(TestRunner): # TODO(pmalik): I believe the 202 (Accepted) should be replaced by # 200 (OK) as the actions are generally very fast and their results # available immediately upon execution of the request. This would # likely require replacing GA casts with calls which I believe are # more appropriate anyways. def __init__(self): super(UserActionsRunner, self).__init__() self.user_defs = [] self.renamed_user_orig_def = None @property def first_user_def(self): if self.user_defs: # Try to use the first user with databases if any. for user_def in self.user_defs: if 'databases' in user_def and user_def['databases']: return user_def return self.user_defs[0] raise SkipTest("No valid user definitions provided.") @property def non_existing_user_def(self): user_def = self.test_helper.get_non_existing_user_definition() if user_def: return user_def raise SkipTest("No valid user definitions provided.") def run_users_create(self, expected_http_code=202): users = self.test_helper.get_valid_user_definitions() if users: self.user_defs = self.assert_users_create( self.instance_info.id, users, expected_http_code) else: raise SkipTest("No valid user definitions provided.") def assert_users_create(self, instance_id, serial_users_def, expected_http_code): client = self.auth_client client.users.create(instance_id, serial_users_def) self.assert_client_code(client, expected_http_code) self.wait_for_user_create(client, instance_id, serial_users_def) return serial_users_def def run_user_show(self, expected_http_code=200): for user_def in self.user_defs: self.assert_user_show( self.instance_info.id, user_def, expected_http_code) def assert_user_show(self, instance_id, expected_user_def, expected_http_code): user_name = expected_user_def['name'] user_host = expected_user_def.get('host') client = self.auth_client queried_user = client.users.get( instance_id, user_name, user_host) self.assert_client_code(client, expected_http_code) self._assert_user_matches(queried_user, expected_user_def) def _assert_user_matches(self, user, expected_user_def): user_name = expected_user_def['name'] self.assert_equal(expected_user_def['name'], user.name, "Mismatch of names for user: %s" % user_name) self.assert_list_elements_equal( expected_user_def['databases'], user.databases, "Mismatch of databases for user: %s" % user_name) def run_users_list(self, expected_http_code=200): self.assert_users_list( self.instance_info.id, self.user_defs, expected_http_code) def assert_users_list(self, instance_id, expected_user_defs, expected_http_code, limit=2): client = self.auth_client full_list = client.users.list(instance_id) self.assert_client_code(client, expected_http_code) listed_users = {user.name: user for user in full_list} self.assert_is_none(full_list.next, "Unexpected pagination in the list.") for user_def in expected_user_defs: user_name = user_def['name'] self.assert_true( user_name in listed_users, "User not included in the 'user-list' output: %s" % user_name) self._assert_user_matches(listed_users[user_name], user_def) # Check that the system (ignored) users are not included in the output. system_users = self.get_system_users() self.assert_false( any(name in listed_users for name in system_users), "System users should not be included in the 'user-list' output.") # Test list pagination. list_page = client.users.list(instance_id, limit=limit) self.assert_client_code(client, expected_http_code) self.assert_true(len(list_page) <= limit) if len(full_list) > limit: self.assert_is_not_none(list_page.next, "List page is missing.") else: self.assert_is_none(list_page.next, "An extra page in the list.") marker = list_page.next self.assert_pagination_match(list_page, full_list, 0, limit) if marker: last_user = list_page[-1] expected_marker = self.as_pagination_marker(last_user) self.assert_equal(expected_marker, marker, "Pagination marker should be the last element " "in the page.") list_page = client.users.list(instance_id, marker=marker) self.assert_client_code(client, expected_http_code) self.assert_pagination_match( list_page, full_list, limit, len(full_list)) def as_pagination_marker(self, user): return urllib_parse.quote(user.name) def run_user_access_show(self, expected_http_code=200): for user_def in self.user_defs: self.assert_user_access_show( self.instance_info.id, user_def, expected_http_code) def assert_user_access_show(self, instance_id, user_def, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) client = self.auth_client user_dbs = client.users.list_access( instance_id, user_name, hostname=user_host) self.assert_client_code(client, expected_http_code) expected_dbs = {db_def['name'] for db_def in user_def['databases']} listed_dbs = [db.name for db in user_dbs] self.assert_equal(len(expected_dbs), len(listed_dbs), "Unexpected number of databases on the user access " "list.") for database in expected_dbs: self.assert_true( database in listed_dbs, "Database not found in the user access list: %s" % database) def run_user_access_revoke(self, expected_http_code=202): self._apply_on_all_databases( self.instance_info.id, self.assert_user_access_revoke, expected_http_code) def _apply_on_all_databases(self, instance_id, action, expected_http_code): if any(user_def['databases'] for user_def in self.user_defs): for user_def in self.user_defs: user_name, user_host = self._get_user_name_host_pair(user_def) db_defs = user_def['databases'] for db_def in db_defs: db_name = db_def['name'] action(instance_id, user_name, user_host, db_name, expected_http_code) else: raise SkipTest("No user databases defined.") def assert_user_access_revoke(self, instance_id, user_name, user_host, database, expected_http_code): client = self.auth_client client.users.revoke( instance_id, user_name, database, hostname=user_host) self.assert_client_code(client, expected_http_code) user_dbs = client.users.list_access( instance_id, user_name, hostname=user_host) self.assert_false(any(db.name == database for db in user_dbs), "Database should no longer be included in the user " "access list after revoke: %s" % database) def run_user_access_grant(self, expected_http_code=202): self._apply_on_all_databases( self.instance_info.id, self.assert_user_access_grant, expected_http_code) def assert_user_access_grant(self, instance_id, user_name, user_host, database, expected_http_code): client = self.auth_client client.users.grant( instance_id, user_name, [database], hostname=user_host) self.assert_client_code(client, expected_http_code) user_dbs = client.users.list_access( instance_id, user_name, hostname=user_host) self.assert_true(any(db.name == database for db in user_dbs), "Database should be included in the user " "access list after granting access: %s" % database) def run_user_create_with_no_attributes( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_users_create_failure( self.instance_info.id, {}, expected_exception, expected_http_code) def run_user_create_with_blank_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # Test with missing user name attribute. no_name_usr_def = self.copy_dict(self.non_existing_user_def, ignored_keys=['name']) self.assert_users_create_failure( self.instance_info.id, no_name_usr_def, expected_exception, expected_http_code) # Test with empty user name attribute. blank_name_usr_def = self.copy_dict(self.non_existing_user_def) blank_name_usr_def.update({'name': ''}) self.assert_users_create_failure( self.instance_info.id, blank_name_usr_def, expected_exception, expected_http_code) def run_user_create_with_blank_password( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # Test with missing password attribute. no_pass_usr_def = self.copy_dict(self.non_existing_user_def, ignored_keys=['password']) self.assert_users_create_failure( self.instance_info.id, no_pass_usr_def, expected_exception, expected_http_code) # Test with missing databases attribute. no_db_usr_def = self.copy_dict(self.non_existing_user_def, ignored_keys=['databases']) self.assert_users_create_failure( self.instance_info.id, no_db_usr_def, expected_exception, expected_http_code) def run_existing_user_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_users_create_failure( self.instance_info.id, self.first_user_def, expected_exception, expected_http_code) def run_system_user_create( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: user_defs = [{'name': name, 'password': 'password1', 'databases': []} for name in system_users] self.assert_users_create_failure( self.instance_info.id, user_defs, expected_exception, expected_http_code) def assert_users_create_failure( self, instance_id, serial_users_def, expected_exception, expected_http_code): client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.users.create, instance_id, serial_users_def) def run_user_update_with_blank_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_user_attribute_update_failure( self.instance_info.id, self.first_user_def, {'name': ''}, expected_exception, expected_http_code) def run_user_update_with_existing_name( self, expected_exception=exceptions.BadRequest, expected_http_code=400): self.assert_user_attribute_update_failure( self.instance_info.id, self.first_user_def, {'name': self.first_user_def['name']}, expected_exception, expected_http_code) def assert_user_attribute_update_failure( self, instance_id, user_def, update_attribites, expected_exception, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.users.update_attributes, instance_id, user_name, update_attribites, user_host) def _get_user_name_host_pair(self, user_def): return user_def['name'], user_def.get('host') def run_system_user_attribute_update( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: for name in system_users: user_def = {'name': name, 'password': 'password2'} self.assert_user_attribute_update_failure( self.instance_info.id, user_def, user_def, expected_exception, expected_http_code) def run_user_attribute_update(self, expected_http_code=202): updated_def = self.first_user_def # Update the name by appending a random string to it. updated_name = ''.join([updated_def['name'], 'upd']) update_attribites = {'name': updated_name, 'password': 'password2'} self.assert_user_attribute_update( self.instance_info.id, updated_def, update_attribites, expected_http_code) def assert_user_attribute_update(self, instance_id, user_def, update_attribites, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) client = self.auth_client client.users.update_attributes( instance_id, user_name, update_attribites, user_host) self.assert_client_code(client, expected_http_code) # Update the stored definitions with the new value. expected_def = None for user_def in self.user_defs: if user_def['name'] == user_name: self.renamed_user_orig_def = dict(user_def) user_def.update(update_attribites) expected_def = user_def self.wait_for_user_create(client, instance_id, self.user_defs) # Verify using 'user-show' and 'user-list'. self.assert_user_show(instance_id, expected_def, 200) self.assert_users_list(instance_id, self.user_defs, 200) def run_user_recreate_with_no_access(self, expected_http_code=202): if (self.renamed_user_orig_def and self.renamed_user_orig_def['databases']): self.assert_user_recreate_with_no_access( self.instance_info.id, self.renamed_user_orig_def, expected_http_code) else: raise SkipTest("No renamed users with databases.") def assert_user_recreate_with_no_access(self, instance_id, original_def, expected_http_code=202): # Recreate a previously renamed user without assigning any access # rights to it. recreated_user_def = dict(original_def) recreated_user_def.update({'databases': []}) user_def = self.assert_users_create( instance_id, [recreated_user_def], expected_http_code) # Append the new user to defs for cleanup. self.user_defs.extend(user_def) # Assert empty user access. self.assert_user_access_show(instance_id, recreated_user_def, 200) def run_user_delete(self, expected_http_code=202): for user_def in self.user_defs: self.assert_user_delete( self.instance_info.id, user_def, expected_http_code) def assert_user_delete(self, instance_id, user_def, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) client = self.auth_client client.users.delete(instance_id, user_name, user_host) self.assert_client_code(client, expected_http_code) self._wait_for_user_delete(client, instance_id, user_name) def _wait_for_user_delete(self, client, instance_id, deleted_user_name): self.report.log("Waiting for deleted user to disappear from the " "listing: %s" % deleted_user_name) def _db_is_gone(): all_users = self.get_user_names(client, instance_id) return deleted_user_name not in all_users try: poll_until(_db_is_gone, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC) self.report.log("User is now gone from the instance.") except exception.PollTimeOut: self.fail("User still listed after the poll timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC) def run_nonexisting_user_show( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_user_show_failure( self.instance_info.id, {'name': self.non_existing_user_def['name']}, expected_exception, expected_http_code) def assert_user_show_failure(self, instance_id, user_def, expected_exception, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) client = self.auth_client self.assert_raises( expected_exception, expected_http_code, client, client.users.get, instance_id, user_name, user_host) def run_system_user_show( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: for name in system_users: self.assert_user_show_failure( self.instance_info.id, {'name': name}, expected_exception, expected_http_code) def run_nonexisting_user_update(self, expected_http_code=404): # Test valid update on a non-existing user. update_def = {'name': self.non_existing_user_def['name']} self.assert_user_attribute_update_failure( self.instance_info.id, update_def, update_def, exceptions.NotFound, expected_http_code) def run_nonexisting_user_delete( self, expected_exception=exceptions.NotFound, expected_http_code=404): self.assert_user_delete_failure( self.instance_info.id, {'name': self.non_existing_user_def['name']}, expected_exception, expected_http_code) def assert_user_delete_failure( self, instance_id, user_def, expected_exception, expected_http_code): user_name, user_host = self._get_user_name_host_pair(user_def) client = self.auth_client self.assert_raises(expected_exception, expected_http_code, client, client.users.delete, instance_id, user_name, user_host) def run_system_user_delete( self, expected_exception=exceptions.BadRequest, expected_http_code=400): # TODO(pmalik): Actions on system users and databases should probably # return Forbidden 403 instead. The current error messages are # confusing (talking about a malformed request). system_users = self.get_system_users() if system_users: for name in system_users: self.assert_user_delete_failure( self.instance_info.id, {'name': name}, expected_exception, expected_http_code) def get_system_users(self): return self.get_datastore_config_property('ignore_users') class MysqlUserActionsRunner(UserActionsRunner): def as_pagination_marker(self, user): return urllib_parse.quote('%s@%s' % (user.name, user.host)) class MariadbUserActionsRunner(MysqlUserActionsRunner): def __init__(self): super(MariadbUserActionsRunner, self).__init__() class PerconaUserActionsRunner(MysqlUserActionsRunner): def __init__(self): super(PerconaUserActionsRunner, self).__init__() class PxcUserActionsRunner(MysqlUserActionsRunner): def __init__(self): super(PxcUserActionsRunner, self).__init__() class PostgresqlUserActionsRunner(UserActionsRunner): def run_user_update_with_existing_name(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) def run_system_user_show(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) def run_system_user_attribute_update(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) def run_system_user_delete(self): raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.788111 trove-12.1.0.dev92/trove/tests/unittests/0000755000175000017500000000000000000000000020512 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/__init__.py0000644000175000017500000000000000000000000022611 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.788111 trove-12.1.0.dev92/trove/tests/unittests/api/0000755000175000017500000000000000000000000021263 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/api/__init__.py0000644000175000017500000000000000000000000023362 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.788111 trove-12.1.0.dev92/trove/tests/unittests/api/common/0000755000175000017500000000000000000000000022553 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/api/common/__init__.py0000644000175000017500000000000000000000000024652 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/api/common/test_extensions.py0000644000175000017500000000672000000000000026370 0ustar00coreycorey00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import os import pkg_resources from six.moves import configparser as config_parser import trove from trove.common import extensions from trove.extensions.routes.mgmt import Mgmt from trove.extensions.routes.mysql import Mysql from trove.tests.unittests import trove_testtools DEFAULT_EXTENSION_MAP = { 'Mgmt': [Mgmt, extensions.ExtensionDescriptor], 'MYSQL': [Mysql, extensions.ExtensionDescriptor] } EP_TEXT = ''' mgmt = trove.extensions.routes.mgmt:Mgmt mysql = trove.extensions.routes.mysql:Mysql invalid = trove.tests.unittests.api.common.test_extensions:InvalidExtension ''' class InvalidExtension(object): def get_name(self): return "Invalid" def get_description(self): return "Invalid Extension" def get_alias(self): return "Invalid" def get_namespace(self): return "http://TBD" def get_updated(self): return "2014-08-14T13:25:27-06:00" def get_resources(self): return [] class TestExtensionLoading(trove_testtools.TestCase): def setUp(self): super(TestExtensionLoading, self).setUp() def tearDown(self): super(TestExtensionLoading, self).tearDown() def _assert_default_extensions(self, ext_list): for alias, ext in ext_list.items(): for clazz in DEFAULT_EXTENSION_MAP[alias]: self.assertIsInstance(ext, clazz, "Improper extension class") @mock.patch("pkg_resources.iter_entry_points") def test_default_extensions(self, mock_iter_eps): trove_base = os.path.abspath(os.path.join( os.path.dirname(trove.__file__), "..")) setup_path = "%s/setup.cfg" % trove_base # check if we are running as unit test without module installed if os.path.isfile(setup_path): parser = config_parser.ConfigParser() parser.read(setup_path) entry_points = parser.get( 'entry_points', extensions.ExtensionManager.EXT_NAMESPACE) eps = pkg_resources.EntryPoint.parse_group('plugins', entry_points) mock_iter_eps.return_value = eps.values() extension_mgr = extensions.ExtensionManager() self.assertEqual(sorted(DEFAULT_EXTENSION_MAP.keys()), sorted(extension_mgr.extensions.keys()), "Invalid extension names") self._assert_default_extensions(extension_mgr.extensions) @mock.patch("pkg_resources.iter_entry_points") def test_invalid_extension(self, mock_iter_eps): eps = pkg_resources.EntryPoint.parse_group('mock', EP_TEXT) mock_iter_eps.return_value = eps.values() extension_mgr = extensions.ExtensionManager() self.assertEqual(len(DEFAULT_EXTENSION_MAP.keys()), len(extension_mgr.extensions), "Loaded invalid extensions") self._assert_default_extensions(extension_mgr.extensions) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/api/common/test_limits.py0000644000175000017500000006725200000000000025501 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ from mock import Mock, MagicMock, patch from oslo_serialization import jsonutils import six from six.moves import http_client import webob from trove.common import limits from trove.common.limits import Limit from trove.limits.service import LimitsController from trove.limits import views from trove.quota.models import Quota from trove.quota.quota import QUOTAS from trove.tests.unittests import trove_testtools TEST_LIMITS = [ Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), Limit("POST", "*", ".*", 7, limits.PER_MINUTE), Limit("POST", "/mgmt", "^/mgmt", 3, limits.PER_MINUTE), Limit("PUT", "*", "", 10, limits.PER_MINUTE), ] class BaseLimitTestSuite(trove_testtools.TestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.absolute_limits = {"max_instances": 55, "max_volumes": 100, "max_backups": 40} class LimitsControllerTest(BaseLimitTestSuite): def setUp(self): super(LimitsControllerTest, self).setUp() @patch.object(QUOTAS, 'get_all_quotas_by_tenant', return_value={}) def test_limit_index_empty(self, quotas_mock): limit_controller = LimitsController() req = MagicMock() req.environ = {'trove.context': self.context} view = limit_controller.index(req, "test_tenant_id") expected = {'limits': [{'verb': 'ABSOLUTE'}]} self.assertEqual(expected, view._data) def test_limit_index(self): tenant_id = "test_tenant_id" limit_controller = LimitsController() limits = [ { "URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "PUT", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "DELETE", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "GET", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 } ] abs_limits = {"instances": Quota(tenant_id=tenant_id, resource="instances", hard_limit=100), "backups": Quota(tenant_id=tenant_id, resource="backups", hard_limit=40), "volumes": Quota(tenant_id=tenant_id, resource="volumes", hard_limit=55)} req = MagicMock() req.environ = {"trove.limits": limits, 'trove.context': self.context} with patch.object(QUOTAS, 'get_all_quotas_by_tenant', return_value=abs_limits): view = limit_controller.index(req, tenant_id) expected = { 'limits': [ { 'max_instances': 100, 'max_backups': 40, 'verb': 'ABSOLUTE', 'max_volumes': 55 }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'POST', 'remaining': 2, 'unit': 'MINUTE' }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'PUT', 'remaining': 2, 'unit': 'MINUTE' }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'DELETE', 'remaining': 2, 'unit': 'MINUTE' }, { 'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'GET', 'remaining': 2, 'unit': 'MINUTE' } ] } self.assertEqual(expected, view._data) class TestLimiter(limits.Limiter): """Note: This was taken from Nova.""" pass class LimitMiddlewareTest(BaseLimitTestSuite): """ Tests for the `limits.RateLimitingMiddleware` class. """ @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.TestLimiter" % self.__class__.__module__) def test_limit_class(self): # Test that middleware selected correct limiter class. assert isinstance(self.app._limiter, TestLimiter) def test_good_request(self): # Test successful GET request through middleware. request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) def test_limited_request_json(self): # Test a rate-limited (413) GET request through middleware. request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(413, response.status_int) self.assertIn('Retry-After', response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimit"]["details"].strip() self.assertEqual(expected, value) self.assertIn("retryAfter", body["overLimit"]) retryAfter = body["overLimit"]["retryAfter"] self.assertEqual("60", retryAfter) class LimitTest(BaseLimitTestSuite): """ Tests for the `limits.Limit` class. """ def test_GET_no_delay(self): # Test a limit handles 1 GET per second. limit = Limit("GET", "*", ".*", 1, 1) limit._get_time = MagicMock(return_value=0.0) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): # Test two calls to 1 GET per second limit. limit = Limit("GET", "*", ".*", 1, 1) limit._get_time = MagicMock(return_value=0.0) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) limit._get_time = MagicMock(return_value=4.0) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): """ Tests for the default limits parser in the in-memory `limits.Limiter` class. """ def test_invalid(self): # Test that parse_limits() handles invalid input correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): # Test that parse_limits() handles bad rules correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): # Test that parse_limits() handles missing args correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): # Test that parse_limits() handles bad values correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): # Test that parse_limits() handles bad units correctly. self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): # Test that parse_limits() handles multiple rules correctly. try: result = limits.Limiter.parse_limits( '(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)' ) except ValueError as e: assert False, str(e) # Make sure the number of returned limits are correct self.assertEqual(4, len(result)) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual(expected, [t.verb for t in result]) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual(expected, [t.uri for t in result]) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual(expected, [t.regex for t in result]) # ...the values... expected = [20, 10, 5, 1] self.assertEqual(expected, [t.value for t in result]) # ...and the units... expected = [limits.PER_MINUTE, limits.PER_HOUR, limits.PER_SECOND, limits.PER_DAY] self.assertEqual(expected, [t.unit for t in result]) class LimiterTest(BaseLimitTestSuite): """ Tests for the in-memory `limits.Limiter` class. """ def update_limits(self, delay, limit_list): for ln in limit_list: ln._get_time = Mock(return_value=delay) def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'user:user3': ''} self.update_limits(0.0, TEST_LIMITS) self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in range(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """ Simple test to ensure no delay on a single call for a limit verb we didn"t set. """ delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_no_delay_PUT(self): # Simple test to ensure no delay on a single call for a known limit. delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual((None, None), delay) def test_delay_PUT(self): """ Ensure the 11th PUT will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """ Ensure the 8th POST will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.assertAlmostEqual(expected, results, 8) def test_delay_POST_mgmt(self): """ Ensure the 4th mgmt POST will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 3 results = list(self._check(3, "POST", "/mgmt")) self.assertEqual(expected, results) expected = 60.0 / 3.0 results = self._check_sum(1, "POST", "/mgmt") self.assertAlmostEqual(expected, results, 4) def test_delay_GET(self): # Ensure the 11th GET will result in NO delay. expected = [None] * 11 results = list(self._check(11, "GET", "/mgmt")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """ Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.update_limits(6.0, self.limiter.levels[None]) expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): # Ensure multiple requests still get a delay. expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.update_limits(1.0, self.limiter.levels[None]) expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) def test_user_limit(self): # Test user-specific limits. self.assertEqual([], self.limiter.levels['user3']) def test_multiple_users(self): # Tests involving multiple users. # User1 self.update_limits(0.0, self.limiter.levels["user1"]) expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) # User1 again self.update_limits(1.0, self.limiter.levels["user1"]) expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 again self.update_limits(2.0, self.limiter.levels["user2"]) expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """ Tests for `limits.WsgiLimiter` class. """ def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return jsonutils.dump_as_bytes({"verb": verb, "path": path}) def _request(self, verb, url, username=None): """Make sure that POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(403, response.status_int) return response.headers["X-Wait-Seconds"] self.assertEqual(204, response.status_int) def test_invalid_methods(self): # Only POSTs should work. for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(405, response.status_int) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertAlmostEqual(float(delay), 60, 1) def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertAlmostEqual(float(delay), 60, 1) delay = self._request("GET", "/delayed", "user2") self.assertAlmostEqual(float(delay), 60, 1) class FakeHttplibSocket(object): """ Fake `http_client.HTTPResponse` replacement. """ def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" self._buffer = six.BytesIO(response_string) def makefile(self, _mode, *args): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """ Fake `http_client.HTTPConnection`. """ def __init__(self, app, host): """ Initialize `FakeHttplibConnection`. """ self.app = app self.host = host def request(self, method, path, body=b"", headers=None): """ Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `http_client.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = body resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp if six.PY3: resp = resp.encode("utf-8") sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls http_client.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = http_client.HTTPConnection http_client.HTTPConnection = HTTPConnectionDecorator( http_client.HTTPConnection) return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """ Tests for the `limits.WsgiLimiterProxy` class. """ def setUp(self): """ Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `http_client` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) self.oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") def test_200(self): # Successful request test. delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_403(self): # Forbidden request test. delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual((None, None), delay) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() self.assertAlmostEqual(float(delay), 60, 1) self.assertEqual(b"403 Forbidden\n\nOnly 1 GET request(s) can be" b" made to /delayed every minute.", error) def tearDown(self): # restore original HTTPConnection object http_client.HTTPConnection = self.oldHTTPConnection super(WsgiLimiterProxyTest, self).tearDown() class LimitsViewTest(trove_testtools.TestCase): def setUp(self): super(LimitsViewTest, self).setUp() def test_empty_data(self): """ Test the default returned results if an empty dictionary is given """ rate_limit = {} view = views.LimitView(rate_limit) self.assertIsNotNone(view) data = view.data() expected = {'limit': {'regex': '', 'nextAvailable': '1970-01-01T00:00:00Z', 'uri': '', 'value': '', 'verb': '', 'remaining': 0, 'unit': ''}} self.assertEqual(expected, data) def test_data(self): """ Test the returned results for a fully populated dictionary """ rate_limit = { "URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 } view = views.LimitView(rate_limit) self.assertIsNotNone(view) data = view.data() expected = {'limit': {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'POST', 'remaining': 2, 'unit': 'MINUTE'}} self.assertEqual(expected, data) class LimitsViewsTest(trove_testtools.TestCase): def setUp(self): super(LimitsViewsTest, self).setUp() def test_empty_data(self): rate_limits = [] abs_view = dict() view_data = views.LimitViews(abs_view, rate_limits) self.assertIsNotNone(view_data) data = view_data.data() expected = {'limits': [{'verb': 'ABSOLUTE'}]} self.assertEqual(expected, data) def test_data(self): rate_limits = [ { "URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "PUT", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "DELETE", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 }, { "URI": "*", "regex": ".*", "value": 10, "verb": "GET", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226 } ] abs_view = {"instances": 55, "volumes": 100, "backups": 40} view_data = views.LimitViews(abs_view, rate_limits) self.assertIsNotNone(view_data) data = view_data.data() expected = {'limits': [{'max_instances': 55, 'max_backups': 40, 'verb': 'ABSOLUTE', 'max_volumes': 100}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'POST', 'remaining': 2, 'unit': 'MINUTE'}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'PUT', 'remaining': 2, 'unit': 'MINUTE'}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'DELETE', 'remaining': 2, 'unit': 'MINUTE'}, {'regex': '.*', 'nextAvailable': '2011-07-21T18:17:06Z', 'uri': '*', 'value': 10, 'verb': 'GET', 'remaining': 2, 'unit': 'MINUTE'}]} self.assertEqual(expected, data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/api/test_versions.py0000644000175000017500000002302100000000000024542 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock import webob from trove.common import cfg from trove.tests.unittests import trove_testtools from trove.versions import BaseVersion from trove.versions import Version from trove.versions import VersionDataView from trove.versions import VERSIONS from trove.versions import VersionsAPI from trove.versions import VersionsController from trove.versions import VersionsDataView BASE_URL = 'http://localhost' id = VERSIONS['1.0']['id'] status = VERSIONS['1.0']['status'] base_url = BASE_URL updated = VERSIONS['1.0']['updated'] class VersionsControllerTest(trove_testtools.TestCase): def setUp(self): super(VersionsControllerTest, self).setUp() self.controller = VersionsController() self.assertIsNotNone(self.controller, "VersionsController instance was None") def tearDown(self): super(VersionsControllerTest, self).tearDown() cfg.CONF.clear_override('public_endpoint') def test_index_json(self): request = Mock() result = self.controller.index(request) self.assertIsNotNone(result, 'Result was None') result._data = Mock() result._data.data_for_json = \ lambda: {'status': 'CURRENT', 'updated': '2012-08-01T00:00:00Z', 'id': 'v1.0', 'links': [{'href': 'http://localhost/v1.0/', 'rel': 'self'}]} # can be anything but xml json_data = result.data("application/json") self.assertIsNotNone(json_data, 'Result json_data was None') self.assertEqual('v1.0', json_data['id'], 'Version id is incorrect') self.assertEqual('CURRENT', json_data['status'], 'Version status is incorrect') self.assertEqual('2012-08-01T00:00:00Z', json_data['updated'], 'Version updated value is incorrect') def test_index_json_with_public_endpoint(self): cfg.CONF.set_override('public_endpoint', "https://example.com:8779") req = webob.Request.blank('/') resp = self.controller.index(req) result = resp.data('application/json')['versions'] expected = [ { 'status': 'CURRENT', 'updated': '2012-08-01T00:00:00Z', 'id': 'v1.0', 'links': [{ 'href': 'https://example.com:8779/v1.0/', 'rel': 'self'}] } ] self.assertEqual(expected, result) def test_show_json(self): request = Mock() request.url_version = '1.0' result = self.controller.show(request) self.assertIsNotNone(result, 'Result was None') json_data = result.data("application/json") self.assertIsNotNone(json_data, "JSON data was None") version = json_data.get('version', None) self.assertIsNotNone(version, "Version was None") self.assertEqual('CURRENT', version['status'], "Version status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', version['updated'], "Version updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', version['id'], "Version id was not 'v1.0'") def test_show_json_with_public_endpoint(self): cfg.CONF.set_override('public_endpoint', "https://example.com:8779") req = webob.Request.blank('/') req.url_version = '1.0' resp = self.controller.show(req) result = resp.data('application/json')['version'] expected = { 'status': 'CURRENT', 'updated': '2012-08-01T00:00:00Z', 'id': 'v1.0', 'links': [{ 'href': 'https://example.com:8779/', 'rel': 'self'}] } self.assertEqual(expected, result) class BaseVersionTestCase(trove_testtools.TestCase): def setUp(self): super(BaseVersionTestCase, self).setUp() self.base_version = BaseVersion(id, status, base_url, updated) self.assertIsNotNone(self.base_version, 'BaseVersion instance was None') def test_data(self): data = self.base_version.data() self.assertIsNotNone(data, 'Base Version data was None') self.assertTrue(type(data) is dict, "Base Version data is not a dict") self.assertEqual('CURRENT', data['status'], "Data status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', data['updated'], "Data updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', data['id'], "Data status was not 'v1.0'") def test_url(self): url = self.base_version.url() self.assertIsNotNone(url, 'Url was None') self.assertEqual('http://localhost/v1.0/', url, "Base Version url is incorrect") class VersionTestCase(trove_testtools.TestCase): def setUp(self): super(VersionTestCase, self).setUp() self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') def test_url_no_trailing_slash(self): url = self.version.url() self.assertIsNotNone(url, 'Version url was None') self.assertEqual(BASE_URL + '/', url, 'Base url value was incorrect') def test_url_with_trailing_slash(self): self.version.base_url = 'http://localhost/' url = self.version.url() self.assertEqual(BASE_URL + '/', url, 'Base url value was incorrect') class VersionDataViewTestCase(trove_testtools.TestCase): def setUp(self): super(VersionDataViewTestCase, self).setUp() # get a version object first self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') # then create an instance of VersionDataView self.version_data_view = VersionDataView(self.version) self.assertIsNotNone(self.version_data_view, 'Version Data view instance was None') def test_data_for_json(self): json_data = self.version_data_view.data_for_json() self.assertIsNotNone(json_data, "JSON data was None") self.assertTrue(type(json_data) is dict, "JSON version data is not a dict") self.assertIsNotNone(json_data.get('version'), "Dict json_data has no key 'version'") data = json_data['version'] self.assertIsNotNone(data, "JSON data version was None") self.assertEqual('CURRENT', data['status'], "Data status was not 'CURRENT'") self.assertEqual('2012-08-01T00:00:00Z', data['updated'], "Data updated was not '2012-08-01T00:00:00Z'") self.assertEqual('v1.0', data['id'], "Data status was not 'v1.0'") class VersionsDataViewTestCase(trove_testtools.TestCase): def setUp(self): super(VersionsDataViewTestCase, self).setUp() # get a version object, put it in a list self.versions = [] self.version = Version(id, status, base_url, updated) self.assertIsNotNone(self.version, 'Version instance was None') self.versions.append(self.version) # then create an instance of VersionsDataView self.versions_data_view = VersionsDataView(self.versions) self.assertIsNotNone(self.versions_data_view, 'Versions Data view instance was None') def test_data_for_json(self): json_data = self.versions_data_view.data_for_json() self.assertIsNotNone(json_data, "JSON data was None") self.assertTrue(type(json_data) is dict, "JSON versions data is not a dict") self.assertIsNotNone(json_data.get('versions', None), "Dict json_data has no key 'versions'") versions = json_data['versions'] self.assertIsNotNone(versions, "Versions was None") self.assertEqual(1, len(versions), "Versions length != 1") # explode the version object versions_data = [v.data() for v in self.versions] d1 = versions_data.pop() d2 = versions.pop() self.assertEqual(d1['id'], d2['id'], "Version ids are not equal") class VersionAPITestCase(trove_testtools.TestCase): def setUp(self): super(VersionAPITestCase, self).setUp() def test_instance(self): self.versions_api = VersionsAPI() self.assertIsNotNone(self.versions_api, "VersionsAPI instance was None") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.788111 trove-12.1.0.dev92/trove/tests/unittests/backup/0000755000175000017500000000000000000000000021757 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/backup/__init__.py0000644000175000017500000000000000000000000024056 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/backup/test_backup_controller.py0000644000175000017500000001000700000000000027076 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from testtools.matchers import Equals from trove.backup.service import BackupController from trove.common import apischema from trove.tests.unittests import trove_testtools class TestBackupController(trove_testtools.TestCase): def setUp(self): super(TestBackupController, self).setUp() self.uuid = "d6338c9c-3cc8-4313-b98f-13cc0684cf15" self.invalid_uuid = "ead-edsa-e23-sdf-23" self.controller = BackupController() def test_validate_create_complete(self): body = {"backup": {"instance": self.uuid, "name": "testback-backup"}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_with_blankname(self): body = {"backup": {"instance": self.uuid, "name": ' '}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("' ' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) def test_validate_create_with_invalidname(self): body = {"backup": {"instance": self.uuid, "name": '$#@&?'}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("'$#@&?' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) def test_validate_create_invalid_uuid(self): body = {"backup": {"instance": self.invalid_uuid, "name": "testback-backup"}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].message, Equals("'%s' does not match '%s'" % (self.invalid_uuid, apischema.uuid['pattern']))) def test_validate_create_incremental(self): body = {"backup": {"instance": self.uuid, "name": "testback-backup", "parent_id": self.uuid}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_invalid_parent_id(self): body = {"backup": {"instance": self.uuid, "name": "testback-backup", "parent_id": self.invalid_uuid}} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].message, Equals("'%s' does not match '%s'" % (self.invalid_uuid, apischema.uuid['pattern']))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/backup/test_backup_models.py0000644000175000017500000005740400000000000026212 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from mock import DEFAULT from mock import MagicMock from mock import patch from swiftclient.client import ClientException from trove.backup import models from trove.backup import state from trove.common import context from trove.common import exception from trove.common import timeutils from trove.common import utils from trove.db.models import DatabaseModelBase from trove.instance import models as instance_models from trove.taskmanager import api from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util def _prep_conf(current_time): current_time = str(current_time) _context = context.TroveContext(tenant='TENANT-' + current_time) instance_id = 'INSTANCE-' + current_time return _context, instance_id BACKUP_NAME = 'WORKS' BACKUP_NAME_2 = 'IT-WORKS' BACKUP_NAME_3 = 'SECOND-LAST-ONE' BACKUP_NAME_4 = 'LAST-ONE-FULL' BACKUP_NAME_5 = 'LAST-ONE-INCREMENTAL' BACKUP_NAME_6 = 'LAST-ONE-DELETED' BACKUP_STATE = state.BackupState.NEW BACKUP_STATE_COMPLETED = state.BackupState.COMPLETED BACKUP_DESC = 'Backup test' BACKUP_FILENAME = '45a3d8cb-ade8-484c-a8a5-0c3c7286fb2f.xbstream.gz' BACKUP_LOCATION = 'https://hpcs.com/tenant/database_backups/' + BACKUP_FILENAME class BackupCreateTest(trove_testtools.TestCase): def setUp(self): super(BackupCreateTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow()) self.created = False def tearDown(self): super(BackupCreateTest, self).tearDown() if self.created: models.DBBackup.find_by( tenant_id=self.context.project_id).delete() @patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock())) def test_create(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) instance.datastore_version = MagicMock() instance.datastore_version.id = 'datastore-id-999' instance.cluster_id = None with patch.multiple(models.Backup, validate_can_perform_action=DEFAULT, verify_swift_auth_token=DEFAULT): with patch.object(api.API, 'create_backup', MagicMock(return_value=None)): bu = models.Backup.create(self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) self.created = True self.assertEqual(BACKUP_NAME, bu.name) self.assertEqual(BACKUP_DESC, bu.description) self.assertEqual(self.instance_id, bu.instance_id) self.assertEqual(state.BackupState.NEW, bu.state) db_record = models.DBBackup.find_by(id=bu.id) self.assertEqual(bu.id, db_record['id']) self.assertEqual(BACKUP_NAME, db_record['name']) self.assertEqual(BACKUP_DESC, db_record['description']) self.assertEqual(self.instance_id, db_record['instance_id']) self.assertEqual(state.BackupState.NEW, db_record['state']) self.assertEqual(instance.datastore_version.id, db_record['datastore_version_id']) @patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock())) def test_create_incremental(self): instance = MagicMock() parent = MagicMock(spec=models.DBBackup) with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) instance.validate_can_perform_action = MagicMock( return_value=None) instance.datastore_version = MagicMock() instance.datastore_version.id = 'datastore-id-999' instance.cluster_id = None with patch.multiple(models.Backup, validate_can_perform_action=DEFAULT, verify_swift_auth_token=DEFAULT, get_by_id=MagicMock(return_value=parent)): with patch.object(api.API, 'create_backup', MagicMock(return_value=None)): incremental = models.Backup.create( self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC, parent_id='parent_uuid') self.created = True db_record = models.DBBackup.find_by(id=incremental.id) self.assertEqual(incremental.id, db_record['id']) self.assertEqual(BACKUP_NAME, db_record['name']) self.assertEqual(BACKUP_DESC, db_record['description']) self.assertEqual(self.instance_id, db_record['instance_id']) self.assertEqual(state.BackupState.NEW, db_record['state']) self.assertEqual('parent_uuid', db_record['parent_id']) self.assertEqual(instance.datastore_version.id, db_record['datastore_version_id']) def test_create_instance_not_found(self): self.assertRaises(exception.NotFound, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) def test_create_incremental_not_found(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) instance.cluster_id = None with patch.object(models.Backup, 'validate_can_perform_action', return_value=None): with patch.object(models.Backup, 'verify_swift_auth_token', return_value=None): self.assertRaises(exception.NotFound, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC, parent_id='BAD') def test_create_instance_not_active(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( side_effect=exception.UnprocessableEntity) self.assertRaises(exception.UnprocessableEntity, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) def test_create_backup_swift_token_invalid(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): instance.validate_can_perform_action = MagicMock( return_value=None) with patch.object(models.Backup, 'validate_can_perform_action', return_value=None): with patch.object(models.Backup, 'verify_swift_auth_token', side_effect=exception.SwiftAuthError): self.assertRaises(exception.SwiftAuthError, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) def test_create_backup_datastore_operation_not_supported(self): instance = MagicMock() with patch.object(instance_models.BuiltInstance, 'load', return_value=instance): with patch.object( models.Backup, 'validate_can_perform_action', side_effect=exception.DatastoreOperationNotSupported ): self.assertRaises(exception.DatastoreOperationNotSupported, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) def test_create_backup_cluster_instance_operation_not_supported(self): instance = MagicMock() instance.cluster_id = 'bad_id' with patch.object(instance_models.BuiltInstance, 'load', return_value=instance),\ patch.object(models.Backup, 'validate_can_perform_action', return_value=None),\ patch.object(models.Backup, 'verify_swift_auth_token', return_value=None): self.assertRaises(exception.ClusterInstanceOperationNotSupported, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) @patch('trove.backup.models.LOG') def test_create_backup_creation_error(self, mock_logging): instance = MagicMock() instance.cluster_id = None with patch.object(instance_models.BuiltInstance, 'load', return_value=instance),\ patch.object(models.Backup, 'validate_can_perform_action', return_value=None),\ patch.object(models.Backup, 'verify_swift_auth_token', return_value=None),\ patch.object(DatabaseModelBase, 'is_valid', return_value=False),\ patch('trove.quota.quota.QuotaEngine.reserve', return_value=[]): DatabaseModelBase.errors = {} self.assertRaises(exception.BackupCreationError, models.Backup.create, self.context, self.instance_id, BACKUP_NAME, BACKUP_DESC) class BackupDeleteTest(trove_testtools.TestCase): def setUp(self): super(BackupDeleteTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow()) def tearDown(self): super(BackupDeleteTest, self).tearDown() def test_delete_backup_not_found(self): self.assertRaises(exception.NotFound, models.Backup.delete, self.context, 'backup-id') def test_delete_backup_is_running(self): backup = MagicMock() backup.is_running = True with patch.object(models.Backup, 'get_by_id', return_value=backup): self.assertRaises(exception.UnprocessableEntity, models.Backup.delete, self.context, 'backup_id') def test_delete_backup_swift_token_invalid(self): backup = MagicMock() backup.is_running = False with patch.object(models.Backup, 'get_by_id', return_value=backup): with patch.object(models.Backup, 'verify_swift_auth_token', side_effect=exception.SwiftAuthError): self.assertRaises(exception.SwiftAuthError, models.Backup.delete, self.context, 'backup_id') class BackupORMTest(trove_testtools.TestCase): def setUp(self): super(BackupORMTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow()) self.backup = models.DBBackup.create(tenant_id=self.context.project_id, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, size=2.0, location=BACKUP_LOCATION) self.deleted = False def tearDown(self): super(BackupORMTest, self).tearDown() if not self.deleted: models.DBBackup.find_by(tenant_id=self.context.project_id).delete() def test_list(self): backups, marker = models.Backup.list(self.context) self.assertIsNone(marker) self.assertEqual(1, len(backups)) def test_list_for_instance(self): models.DBBackup.create(tenant_id=self.context.project_id, name=BACKUP_NAME_2, state=BACKUP_STATE, instance_id=self.instance_id, size=2.0, deleted=False) backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) self.assertEqual(2, len(backups)) def test_get_last_completed(self): models.DBBackup.create(tenant_id=self.context.project_id, name=BACKUP_NAME_3, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, size=2.0, deleted=False) models.DBBackup.create(tenant_id=self.context.project_id, name=BACKUP_NAME_4, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, size=2.0, deleted=False) models.DBBackup.create(tenant_id=self.context.project_id, name=BACKUP_NAME_5, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, parent_id='parent_uuid', size=2.0, deleted=False) models.DBBackup.create(tenant_id=self.context.project_id, name=BACKUP_NAME_6, state=BACKUP_STATE_COMPLETED, instance_id=self.instance_id, size=2.0, deleted=True) backup = models.Backup.get_last_completed( self.context, self.instance_id, include_incremental=True) self.assertEqual(BACKUP_NAME_5, backup.name) backup = models.Backup.get_last_completed( self.context, self.instance_id, include_incremental=False) self.assertEqual(BACKUP_NAME_4, backup.name) def test_running(self): running = models.Backup.running(instance_id=self.instance_id) self.assertTrue(running) def test_not_running(self): not_running = models.Backup.running(instance_id='non-existent') self.assertFalse(not_running) def test_running_exclude(self): not_running = models.Backup.running(instance_id=self.instance_id, exclude=self.backup.id) self.assertFalse(not_running) def test_is_running(self): self.assertTrue(self.backup.is_running) def test_is_done(self): self.backup.state = state.BackupState.COMPLETED self.backup.save() self.assertTrue(self.backup.is_done) def test_not_is_running(self): self.backup.state = state.BackupState.COMPLETED self.backup.save() self.assertFalse(self.backup.is_running) def test_not_is_done(self): self.assertFalse(self.backup.is_done) def test_backup_size(self): db_record = models.DBBackup.find_by(id=self.backup.id) self.assertEqual(self.backup.size, db_record.size) def test_backup_delete(self): backup = models.DBBackup.find_by(id=self.backup.id) backup.delete() backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) self.assertEqual(0, len(backups)) def test_delete(self): self.backup.delete() db_record = models.DBBackup.find_by(id=self.backup.id, deleted=True) self.assertEqual(self.instance_id, db_record['instance_id']) def test_deleted_not_running(self): self.backup.delete() self.assertFalse(models.Backup.running(self.instance_id)) def test_filename(self): self.assertEqual(BACKUP_FILENAME, self.backup.filename) def test_filename_bad(self): def _set_bad_filename(): self.backup.location = 'bad' self.backup.filename self.assertRaises(ValueError, _set_bad_filename) @patch('trove.common.clients.create_swift_client') def test_check_swift_object_exist_integrity_error(self, mock_swift_client): mock_swift_client.return_value.head_object.return_value = {'etag': ''} self.assertRaises(exception.RestoreBackupIntegrityError, self.backup.check_swift_object_exist, self.context, True) @patch('trove.common.clients.create_swift_client') def test_check_swift_object_exist_client_exception(self, mock_swift_client): mock_swift_client.side_effect = ClientException( self.context.project_id ) self.assertRaises(exception.SwiftAuthError, self.backup.check_swift_object_exist, self.context) @patch('trove.common.clients.create_swift_client') def test_check_swift_object_exist_client_exception_404(self, mock_swift_client): e = ClientException(self.context.project_id) e.http_status = 404 mock_swift_client.side_effect = e self.assertFalse(self.backup.check_swift_object_exist(self.context)) @patch('trove.common.clients.create_swift_client') def test_swift_auth_token_client_exception(self, mock_swift_client): mock_swift_client.side_effect = ClientException( self.context.project_id ) self.assertRaises(exception.SwiftAuthError, models.Backup.verify_swift_auth_token, self.context) @patch('trove.common.clients.create_swift_client') def test_swift_auth_token_no_service_endpoint(self, mock_swift_client): mock_swift_client.side_effect = exception.NoServiceEndpoint self.assertRaises(exception.SwiftNotFound, models.Backup.verify_swift_auth_token, self.context) class PaginationTests(trove_testtools.TestCase): def setUp(self): super(PaginationTests, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow()) # Create a bunch of backups bkup_info = { 'tenant_id': self.context.project_id, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } for backup in range(50): bkup_info.update({'name': 'Backup-%s' % backup}) models.DBBackup.create(**bkup_info) def tearDown(self): super(PaginationTests, self).tearDown() query = models.DBBackup.query() query.filter_by(instance_id=self.instance_id).delete() def test_pagination_list(self): # page one backups, marker = models.Backup.list(self.context) self.assertEqual(20, marker) self.assertEqual(20, len(backups)) # page two self.context.marker = 20 backups, marker = models.Backup.list(self.context) self.assertEqual(40, marker) self.assertEqual(20, len(backups)) # page three self.context.marker = 40 backups, marker = models.Backup.list(self.context) self.assertIsNone(marker) self.assertEqual(10, len(backups)) def test_pagination_list_for_instance(self): # page one backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertEqual(20, marker) self.assertEqual(20, len(backups)) # page two self.context.marker = 20 backups, marker = models.Backup.list(self.context) self.assertEqual(40, marker) self.assertEqual(20, len(backups)) # page three self.context.marker = 40 backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) self.assertEqual(10, len(backups)) class OrderingTests(trove_testtools.TestCase): def setUp(self): super(OrderingTests, self).setUp() util.init_db() now = timeutils.utcnow() self.context, self.instance_id = _prep_conf(now) info = { 'tenant_id': self.context.project_id, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } four = now - datetime.timedelta(days=4) one = now - datetime.timedelta(days=1) three = now - datetime.timedelta(days=3) two = now - datetime.timedelta(days=2) # Create backups out of order, save/create set the 'updated' field, # so we need to use the db_api directly. models.DBBackup().db_api.save( models.DBBackup(name='four', updated=four, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='one', updated=one, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='three', updated=three, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='two', updated=two, id=utils.generate_uuid(), **info)) def tearDown(self): super(OrderingTests, self).tearDown() query = models.DBBackup.query() query.filter_by(instance_id=self.instance_id).delete() def test_list(self): backups, marker = models.Backup.list(self.context) self.assertIsNone(marker) actual = [b.name for b in backups] expected = [u'one', u'two', u'three', u'four'] self.assertEqual(expected, actual) def test_list_for_instance(self): backups, marker = models.Backup.list_for_instance(self.context, self.instance_id) self.assertIsNone(marker) actual = [b.name for b in backups] expected = [u'one', u'two', u'three', u'four'] self.assertEqual(expected, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/backup/test_backupagent.py0000644000175000017500000005522700000000000025667 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import mock import os from mock import Mock, MagicMock, patch, ANY, DEFAULT, call from oslo_utils import netutils from webob.exc import HTTPNotFound from trove.backup.state import BackupState from trove.common.context import TroveContext from trove.common.strategies.storage.base import Storage from trove.common import utils from trove.conductor import api as conductor_api from trove.guestagent.backup import backupagent from trove.guestagent.common import configuration from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.datastore.experimental.redis.service import RedisApp from trove.guestagent.strategies.backup.base import BackupRunner from trove.guestagent.strategies.backup.base import UnknownBackupType from trove.guestagent.strategies.backup.experimental import couchbase_impl from trove.guestagent.strategies.backup.experimental import db2_impl from trove.guestagent.strategies.backup.experimental import mongo_impl from trove.guestagent.strategies.backup.experimental import redis_impl from trove.guestagent.strategies.backup import mysql_impl from trove.guestagent.strategies.backup.mysql_impl import MySqlApp from trove.guestagent.strategies.restore.base import RestoreRunner from trove.tests.unittests import trove_testtools def create_fake_data(): from random import choice from string import ascii_letters return ''.join([choice(ascii_letters) for _ in range(1024)]) class MockBackup(BackupRunner): """Create a large temporary file to 'backup' with subprocess.""" backup_type = 'mock_backup' def __init__(self, *args, **kwargs): self.data = create_fake_data() self.cmd = 'echo %s' % self.data super(MockBackup, self).__init__(*args, **kwargs) def cmd(self): return self.cmd class MockCheckProcessBackup(MockBackup): """Backup runner that fails confirming the process.""" def check_process(self): return False class MockLossyBackup(MockBackup): """Fake Incomplete writes to swift.""" def read(self, *args): results = super(MockLossyBackup, self).read(*args) if results: # strip a few chars from the stream return results[20:] class MockSwift(object): """Store files in String.""" def __init__(self, *args, **kwargs): self.store = '' self.containers = [] self.container = "database_backups" self.url = 'http://mockswift/v1' self.etag = hashlib.md5() def put_container(self, container): if container not in self.containers: self.containers.append(container) return None def put_object(self, container, obj, contents, **kwargs): if container not in self.containers: raise HTTPNotFound while True: if not hasattr(contents, 'read'): break content = contents.read(2 ** 16) if not content: break self.store += content self.etag.update(self.store) return self.etag.hexdigest() def save(self, filename, stream, metadata=None): location = '%s/%s/%s' % (self.url, self.container, filename) return True, 'w00t', 'fake-checksum', location def load(self, context, storage_url, container, filename, backup_checksum): pass def load_metadata(self, location, checksum): return {} def save_metadata(self, location, metadata): pass class MockStorage(Storage): def __call__(self, *args, **kwargs): return self def load(self, location, backup_checksum): pass def save(self, filename, stream, metadata=None): pass def load_metadata(self, location, checksum): return {} def save_metadata(self, location, metadata={}): pass def is_enabled(self): return True class MockRestoreRunner(RestoreRunner): def __init__(self, storage, **kwargs): pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def restore(self): pass def is_zipped(self): return False class MockStats(object): f_blocks = 1024 ** 2 f_bsize = 4096 f_bfree = 512 * 1024 class BackupAgentTest(trove_testtools.TestCase): def setUp(self): super(BackupAgentTest, self).setUp() self.patch_ope = patch.multiple('os.path', exists=DEFAULT) self.mock_ope = self.patch_ope.start() self.addCleanup(self.patch_ope.stop) self.patch_pc = patch('trove.guestagent.datastore.service.' 'BaseDbStatus.prepare_completed') self.mock_pc = self.patch_pc.start() self.mock_pc.__get__ = Mock(return_value=True) self.addCleanup(self.patch_pc.stop) self.get_auth_pwd_patch = patch.object( MySqlApp, 'get_auth_password', MagicMock(return_value='123')) self.get_auth_pwd_mock = self.get_auth_pwd_patch.start() self.addCleanup(self.get_auth_pwd_patch.stop) self.get_ss_patch = patch.object( backupagent, 'get_storage_strategy', MagicMock(return_value=MockSwift)) self.get_ss_mock = self.get_ss_patch.start() self.addCleanup(self.get_ss_patch.stop) self.statvfs_patch = patch.object( os, 'statvfs', MagicMock(return_value=MockStats)) self.statvfs_mock = self.statvfs_patch.start() self.addCleanup(self.statvfs_patch.stop) self.orig_utils_execute_with_timeout = utils.execute_with_timeout self.orig_os_get_ip_address = netutils.get_my_ipv4 def tearDown(self): super(BackupAgentTest, self).tearDown() utils.execute_with_timeout = self.orig_utils_execute_with_timeout netutils.get_my_ipv4 = self.orig_os_get_ip_address def test_backup_impl_MySQLDump(self): """This test is for guestagent/strategies/backup/mysql_impl """ mysql_dump = mysql_impl.MySQLDump( 'abc', extra_opts='') self.assertIsNotNone(mysql_dump.cmd) str_mysql_dump_cmd = ('mysqldump' ' --all-databases' ' %(extra_opts)s' ' --opt' ' --password=123' ' -u os_admin' ' 2>/tmp/mysqldump.log' ' | gzip |' ' openssl enc -aes-256-cbc -salt ' '-pass pass:default_aes_cbc_key') self.assertEqual(str_mysql_dump_cmd, mysql_dump.cmd) self.assertIsNotNone(mysql_dump.manifest) self.assertEqual('abc.gz.enc', mysql_dump.manifest) @mock.patch.object( MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') def test_backup_impl_InnoBackupEx(self, mock_datadir): """This test is for guestagent/strategies/backup/mysql_impl """ inno_backup_ex = mysql_impl.InnoBackupEx('innobackupex', extra_opts='') self.assertIsNotNone(inno_backup_ex.cmd) str_innobackup_cmd = ('sudo innobackupex' ' --stream=xbstream' ' %(extra_opts)s' ' --user=os_admin --password=123' ' --host=localhost' ' --socket=/var/run/mysqld/mysqld.sock' ' /var/lib/mysql/data 2>/tmp/innobackupex.log' ' | gzip |' ' openssl enc -aes-256-cbc -salt ' '-pass pass:default_aes_cbc_key') self.assertEqual(str_innobackup_cmd, inno_backup_ex.cmd) self.assertIsNotNone(inno_backup_ex.manifest) str_innobackup_manifest = 'innobackupex.xbstream.gz.enc' self.assertEqual(str_innobackup_manifest, inno_backup_ex.manifest) def test_backup_impl_CbBackup(self): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") utils.execute_with_timeout = Mock(return_value=None) cbbackup = couchbase_impl.CbBackup('cbbackup', extra_opts='') self.assertIsNotNone(cbbackup) str_cbbackup_cmd = ("tar cpPf - /tmp/backups | " "gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_cbbackup_cmd, cbbackup.cmd) self.assertIsNotNone(cbbackup.manifest) self.assertIn('gz.enc', cbbackup.manifest) @mock.patch.object(db2_impl.DB2Backup, 'list_dbnames', return_value=['testdb1', 'testdb2']) def test_backup_impl_DB2Backup(self, _): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") db2_backup = db2_impl.DB2Backup('db2backup', extra_opts='') self.assertIsNotNone(db2_backup) str_db2_backup_cmd = ("sudo tar cPf - /home/db2inst1/db2inst1/backup " "| gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_db2_backup_cmd, db2_backup.cmd) self.assertIsNotNone(db2_backup.manifest) self.assertIn('gz.enc', db2_backup.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_backup_impl_MongoDump(self, _): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") utils.execute_with_timeout = Mock(return_value=None) mongodump = mongo_impl.MongoDump('mongodump', extra_opts='') self.assertIsNotNone(mongodump) str_mongodump_cmd = ("sudo tar cPf - /var/lib/mongodb/dump | " "gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_mongodump_cmd, mongodump.cmd) self.assertIsNotNone(mongodump.manifest) self.assertIn('gz.enc', mongodump.manifest) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) @patch.object(configuration.ConfigurationManager, 'parse_configuration', Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_backup_impl_RedisBackup(self, *mocks): netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") redis_backup = redis_impl.RedisBackup('redisbackup', extra_opts='') self.assertIsNotNone(redis_backup) str_redis_backup_cmd = ("sudo cat /var/lib/redis/dump.rdb | " "gzip | openssl enc -aes-256-cbc -salt -pass " "pass:default_aes_cbc_key") self.assertEqual(str_redis_backup_cmd, redis_backup.cmd) self.assertIsNotNone(redis_backup.manifest) self.assertIn('gz.enc', redis_backup.manifest) def test_backup_base(self): """This test is for guestagent/strategies/backup/base """ BackupRunner.cmd = "%s" backup_runner = BackupRunner('sample', cmd='echo command') if backup_runner.is_zipped: self.assertEqual('.gz', backup_runner.zip_manifest) self.assertIsNotNone(backup_runner.zip_manifest) self.assertIsNotNone(backup_runner.zip_cmd) self.assertEqual(' | gzip', backup_runner.zip_cmd) else: self.assertIsNone(backup_runner.zip_manifest) self.assertIsNone(backup_runner.zip_cmd) self.assertEqual('BackupRunner', backup_runner.backup_type) @patch('os.killpg') def test_backup_runner_exits_with_exception(self, mock_kill_pg): """This test is for guestagent/strategies/backup/base, ensures that when backup runner exits with an exception, all child processes are also killed. """ BackupRunner.cmd = "%s" backup_runner = BackupRunner('sample', cmd='echo command') def test_backup_runner_reraise_exception(): mock_func = mock.Mock(side_effect=RuntimeError) with backup_runner: mock_func() self.assertRaises(RuntimeError, test_backup_runner_reraise_exception) self.assertTrue(mock_kill_pg.called) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(conductor_api.API, 'update_backup', Mock(return_value=Mock())) def test_execute_backup(self): """This test should ensure backup agent ensures that backup and storage is not running resolves backup instance starts backup starts storage reports status """ agent = backupagent.BackupAgent() backup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'datastore': 'mysql', 'datastore_version': '5.5' } agent.execute_backup(context=None, backup_info=backup_info, runner=MockBackup) conductor_api.API.update_backup.assert_has_calls([ call( ANY, backup_id=backup_info['id'], sent=ANY, size=ANY, state=BackupState.BUILDING ), call( ANY, backup_id=backup_info['id'], checksum='fake-checksum', location=ANY, note='w00t', sent=ANY, size=ANY, backup_type=MockBackup.backup_type, state=BackupState.COMPLETED, success=True ) ]) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(conductor_api.API, 'update_backup', Mock(return_value=Mock())) @patch('trove.guestagent.backup.backupagent.LOG') def test_execute_bad_process_backup(self, mock_logging): agent = backupagent.BackupAgent() backup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'datastore': 'mysql', 'datastore_version': '5.5' } self.assertRaises(backupagent.BackupError, agent.execute_backup, context=None, backup_info=backup_info, runner=MockCheckProcessBackup) conductor_api.API.update_backup.assert_has_calls([ call( ANY, backup_id=backup_info['id'], sent=ANY, size=ANY, state=BackupState.BUILDING ), call( ANY, backup_id=backup_info['id'], checksum='fake-checksum', location=ANY, note='w00t', sent=ANY, size=ANY, backup_type=MockCheckProcessBackup.backup_type, state=BackupState.FAILED, success=True ) ]) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(conductor_api.API, 'update_backup', Mock(return_value=Mock())) @patch('trove.guestagent.backup.backupagent.LOG') def test_execute_lossy_backup(self, mock_logging): """This test verifies that incomplete writes to swift will fail.""" with patch.object(MockSwift, 'save', return_value=(False, 'Error', 'y', 'z')): agent = backupagent.BackupAgent() backup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'datastore': 'mysql', 'datastore_version': '5.5' } self.assertRaises(backupagent.BackupError, agent.execute_backup, context=None, backup_info=backup_info, runner=MockLossyBackup) conductor_api.API.update_backup.assert_has_calls([ call(ANY, backup_id=backup_info['id'], sent=ANY, size=ANY, state=BackupState.BUILDING ), call( ANY, backup_id=backup_info['id'], checksum='y', location='z', note='Error', sent=ANY, size=ANY, backup_type=MockLossyBackup.backup_type, state=BackupState.FAILED, success=False )] ) def test_execute_restore(self): """This test should ensure backup agent resolves backup instance determines backup/restore type transfers/downloads data and invokes the restore module reports status """ with patch.object(backupagent, 'get_storage_strategy', return_value=MockStorage): with patch.object(backupagent, 'get_restore_strategy', return_value=MockRestoreRunner): agent = backupagent.BackupAgent() bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } agent.execute_restore(TroveContext(), bkup_info, '/var/lib/mysql/data') @patch('trove.guestagent.backup.backupagent.LOG') def test_restore_unknown(self, mock_logging): with patch.object(backupagent, 'get_restore_strategy', side_effect=ImportError): agent = backupagent.BackupAgent() bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'foo', 'checksum': 'fake-checksum', } self.assertRaises(UnknownBackupType, agent.execute_restore, context=None, backup_info=bkup_info, restore_location='/var/lib/mysql/data') @patch.object(MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(MockSwift, 'load_metadata', return_value={'lsn': '54321'}) @patch.object(MockSwift, 'save') @patch.object(backupagent, 'get_storage_strategy', return_value=MockSwift) @patch('trove.guestagent.backup.backupagent.LOG') def test_backup_incremental_metadata(self, mock_logging, get_storage_strategy_mock, save_mock, load_metadata_mock, get_datadir_mock): meta = { 'lsn': '12345', 'parent_location': 'fake', 'parent_checksum': 'md5', } with patch.multiple(mysql_impl.InnoBackupExIncremental, metadata=MagicMock(return_value=meta), _run=MagicMock(return_value=True), __exit__=MagicMock(return_value=True)): agent = backupagent.BackupAgent() expected_metadata = {'datastore': 'mysql', 'datastore_version': 'bo.gus'} bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'parent': {'location': 'fake', 'checksum': 'md5'}} bkup_info.update(expected_metadata) agent.execute_backup(TroveContext(), bkup_info, '/var/lib/mysql/data') save_mock.assert_called_once_with( ANY, ANY, metadata=expected_metadata) @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) @patch('trove.guestagent.backup.backupagent.LOG') def test_backup_incremental_bad_metadata(self, mock_logging): with patch.object(backupagent, 'get_storage_strategy', return_value=MockSwift): agent = backupagent.BackupAgent() bkup_info = {'id': '123', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', 'parent': {'location': 'fake', 'checksum': 'md5'} } self.assertRaises( AttributeError, agent.execute_backup, TroveContext(), bkup_info, 'location') def test_backup_mysqldump_check_process(self): mysql_dump = mysql_impl.MySQLDump( 'abc', extra_opts='') str_will_be_true = 'Warning: Using a password ' \ 'on the command line interface can be insecure.' str_will_be_false = 'ERROR: mysqldump command did not succeed.' with mock.patch('trove.guestagent.strategies.backup.mysql_impl.open', mock.mock_open(read_data='')): self.assertTrue(mysql_dump.check_process()) with mock.patch('trove.guestagent.strategies.backup.mysql_impl.open', mock.mock_open(read_data=str_will_be_true)): self.assertTrue(mysql_dump.check_process()) with mock.patch('trove.guestagent.strategies.backup.mysql_impl.open', mock.mock_open(read_data=str_will_be_false)): self.assertFalse(mysql_dump.check_process()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/backup/test_storage.py0000644000175000017500000003453200000000000025043 0ustar00coreycorey00000000000000# Copyright 2013 Rackspace Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib from mock import Mock, MagicMock, patch from trove.common.strategies.storage import swift from trove.common.strategies.storage.swift import StreamReader from trove.common.strategies.storage.swift \ import SwiftDownloadIntegrityError from trove.common.strategies.storage.swift import SwiftStorage from trove.tests.fakes.swift import FakeSwiftConnection from trove.tests.unittests.backup.test_backupagent \ import MockBackup as MockBackupRunner from trove.tests.unittests import trove_testtools class SwiftStorageSaveChecksumTests(trove_testtools.TestCase): """SwiftStorage.save is used to save a backup to Swift.""" def setUp(self): super(SwiftStorageSaveChecksumTests, self).setUp() self.max_file_size = swift.MAX_FILE_SIZE swift.MAX_FILE_SIZE = 128 def tearDown(self): swift.MAX_FILE_SIZE = self.max_file_size super(SwiftStorageSaveChecksumTests, self).tearDown() def test_swift_small_file_checksum_save(self): """This tests that SwiftStorage.save returns the swift checksum for small files. """ context = trove_testtools.TroveTestContext(self) backup_id = '123' user = 'user' password = 'password' swift.MAX_FILE_SIZE = 2 * (1024 ** 3) swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) with MockBackupRunner(filename=backup_id, user=user, password=password) as runner: (success, note, checksum, location) = storage_strategy.save(runner.manifest, runner) self.assertTrue(success, "The backup should have been successful.") self.assertIsNotNone(note, "A note should have been returned.") self.assertEqual('http://mockswift/v1/database_backups/123.gz.enc', location, "Incorrect swift location was returned.") def test_swift_checksum_save(self): """This tests that SwiftStorage.save returns the swift checksum for large files. """ context = trove_testtools.TroveTestContext(self) backup_id = '123' user = 'user' password = 'password' swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) with MockBackupRunner(filename=backup_id, user=user, password=password) as runner: (success, note, checksum, location) = storage_strategy.save(runner.manifest, runner) self.assertTrue(success, "The backup should have been successful.") self.assertIsNotNone(note, "A note should have been returned.") self.assertEqual('http://mockswift/v1/database_backups/123.gz.enc', location, "Incorrect swift location was returned.") @patch('trove.common.strategies.storage.swift.LOG') def test_swift_segment_checksum_etag_mismatch(self, mock_logging): """This tests that when etag doesn't match segment uploaded checksum False is returned and None for checksum and location """ context = trove_testtools.TroveTestContext(self) # this backup_id will trigger fake swift client with calculate_etag # enabled to spit out a bad etag when a segment object is uploaded backup_id = 'bad_segment_etag_123' user = 'user' password = 'password' swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) with MockBackupRunner(filename=backup_id, user=user, password=password) as runner: (success, note, checksum, location) = storage_strategy.save(runner.manifest, runner) self.assertFalse(success, "The backup should have failed!") self.assertTrue(note.startswith("Error saving data to Swift!")) self.assertIsNone(checksum, "Swift checksum should be None for failed backup.") self.assertEqual('http://mockswift/v1/database_backups/' 'bad_segment_etag_123.gz.enc', location, "Incorrect swift location was returned.") @patch('trove.common.strategies.storage.swift.LOG') def test_swift_checksum_etag_mismatch(self, mock_logging): """This tests that when etag doesn't match swift checksum False is returned and None for checksum and location """ context = trove_testtools.TroveTestContext(self) # this backup_id will trigger fake swift client with calculate_etag # enabled to spit out a bad etag when a segment object is uploaded backup_id = 'bad_manifest_etag_123' user = 'user' password = 'password' swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) with MockBackupRunner(filename=backup_id, user=user, password=password) as runner: (success, note, checksum, location) = storage_strategy.save(runner.manifest, runner) self.assertFalse(success, "The backup should have failed!") self.assertTrue(note.startswith("Error saving data to Swift!")) self.assertIsNone(checksum, "Swift checksum should be None for failed backup.") self.assertEqual('http://mockswift/v1/database_backups/' 'bad_manifest_etag_123.gz.enc', location, "Incorrect swift location was returned.") class SwiftStorageUtils(trove_testtools.TestCase): def setUp(self): super(SwiftStorageUtils, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.swift_client = FakeSwiftConnection() self.create_swift_client_patch = patch.object( swift, 'create_swift_client', MagicMock(return_value=self.swift_client)) self.create_swift_client_mock = self.create_swift_client_patch.start() self.addCleanup(self.create_swift_client_patch.stop) self.swift = SwiftStorage(self.context) def tearDown(self): super(SwiftStorageUtils, self).tearDown() def test_explode_location(self): location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' url, container, filename = self.swift._explodeLocation(location) self.assertEqual('http://mockswift.com/v1/545433', url) self.assertEqual('backups', container) self.assertEqual('mybackup.tar', filename) def test_validate_checksum_good(self): match = self.swift._verify_checksum('"my-good-etag"', 'my-good-etag') self.assertTrue(match) @patch('trove.common.strategies.storage.swift.LOG') def test_verify_checksum_bad(self, mock_logging): self.assertRaises(SwiftDownloadIntegrityError, self.swift._verify_checksum, '"THE-GOOD-THE-BAD"', 'AND-THE-UGLY') class SwiftStorageLoad(trove_testtools.TestCase): """SwiftStorage.load is used to return SwiftDownloadStream which is used to download a backup object from Swift """ def setUp(self): super(SwiftStorageLoad, self).setUp() def tearDown(self): super(SwiftStorageLoad, self).tearDown() def test_run_verify_checksum(self): """This tests that swift download cmd runs if original backup checksum matches swift object etag """ context = trove_testtools.TroveTestContext(self) location = "/backup/location/123" backup_checksum = "fake-md5-sum" swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) download_stream = storage_strategy.load(location, backup_checksum) self.assertIsNotNone(download_stream) @patch('trove.common.strategies.storage.swift.LOG') def test_run_verify_checksum_mismatch(self, mock_logging): """This tests that SwiftDownloadIntegrityError is raised and swift download cmd does not run when original backup checksum does not match swift object etag """ context = trove_testtools.TroveTestContext(self) location = "/backup/location/123" backup_checksum = "checksum_different_then_fake_swift_etag" swift_client = FakeSwiftConnection() with patch.object(swift, 'create_swift_client', return_value=swift_client): storage_strategy = SwiftStorage(context) self.assertRaises(SwiftDownloadIntegrityError, storage_strategy.load, location, backup_checksum) class MockBackupStream(MockBackupRunner): def read(self, chunk_size): return b'X' * chunk_size class StreamReaderTests(trove_testtools.TestCase): def setUp(self): super(StreamReaderTests, self).setUp() self.runner = MockBackupStream(filename='123.xbstream.enc.gz', user='user', password='password') self.stream = StreamReader(self.runner, self.runner.manifest, max_file_size=100) def test_base_filename(self): self.assertEqual('123', self.stream.base_filename) def test_base_filename_no_extension(self): stream_reader = StreamReader(self.runner, 'foo') self.assertEqual('foo', stream_reader.base_filename) def test_segment(self): self.assertEqual('123_00000000', self.stream.segment) def test_end_of_file(self): self.assertFalse(self.stream.end_of_file) def test_end_of_segment(self): self.assertFalse(self.stream.end_of_segment) def test_segment_almost_complete(self): self.stream.segment_length = 98 results = self.stream.read(2) self.assertEqual(b'XX', results) self.assertEqual('123_00000000', self.stream.segment, "The Segment should still be the same") self.assertEqual(100, self.stream.segment_length) checksum = hashlib.md5(b'XX') checksum = checksum.hexdigest() segment_checksum = self.stream.segment_checksum.hexdigest() self.assertEqual(checksum, segment_checksum, "Segment checksum did not match") def test_segment_complete(self): self.stream.segment_length = 99 results = self.stream.read(2) self.assertEqual('', results, "Results should be empty.") self.assertEqual('123_00000001', self.stream.segment) def test_stream_complete(self): results = self.stream.read(0) self.assertEqual('', results, "Results should be empty.") self.assertTrue(self.stream.end_of_file) class SwiftMetadataTests(trove_testtools.TestCase): def setUp(self): super(SwiftMetadataTests, self).setUp() self.swift_client = FakeSwiftConnection() self.context = trove_testtools.TroveTestContext(self) self.create_swift_client_patch = patch.object( swift, 'create_swift_client', MagicMock(return_value=self.swift_client)) self.create_swift_client_mock = self.create_swift_client_patch.start() self.addCleanup(self.create_swift_client_patch.stop) self.swift = SwiftStorage(self.context) def tearDown(self): super(SwiftMetadataTests, self).tearDown() def test__get_attr(self): normal_header = self.swift._get_attr('content-type') self.assertEqual('content_type', normal_header) meta_header = self.swift._get_attr('x-object-meta-foo') self.assertEqual('foo', meta_header) meta_header_two = self.swift._get_attr('x-object-meta-foo-bar') self.assertEqual('foo_bar', meta_header_two) def test__set_attr(self): meta_header = self.swift._set_attr('foo') self.assertEqual('X-Object-Meta-foo', meta_header) meta_header_two = self.swift._set_attr('foo_bar') self.assertEqual('X-Object-Meta-foo-bar', meta_header_two) def test_load_metadata(self): location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' headers = { 'etag': '"fake-md5-sum"', 'x-object-meta-lsn': '1234567' } with patch.object(self.swift_client, 'head_object', return_value=headers): metadata = self.swift.load_metadata(location, 'fake-md5-sum') self.assertEqual({'lsn': '1234567'}, metadata) def test_save_metadata(self): location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' metadata = {'lsn': '1234567'} self.swift_client.post_object = Mock() self.swift.save_metadata(location, metadata=metadata) headers = { 'X-Object-Meta-lsn': '1234567' } self.swift_client.post_object.assert_called_with( 'backups', 'mybackup.tar', headers=headers) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7921112 trove-12.1.0.dev92/trove/tests/unittests/cluster/0000755000175000017500000000000000000000000022173 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/__init__.py0000644000175000017500000000000000000000000024272 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cassandra_cluster.py0000644000175000017500000001026400000000000027307 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import ANY from mock import MagicMock from mock import Mock from mock import patch from trove.cluster import models from trove.common.strategies.cluster.experimental.cassandra.api \ import CassandraCluster from trove.common.strategies.cluster.experimental.cassandra.taskmanager \ import CassandraClusterTasks from trove.instance import models as inst_models from trove.quota import quota from trove.tests.unittests import trove_testtools class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.context = trove_testtools.TroveTestContext(self) def tearDown(self): super(ClusterTest, self).tearDown() @patch.object(inst_models.Instance, 'create') @patch.object(quota.QUOTAS, 'check_quotas') @patch.object(models, 'assert_homogeneous_cluster') @patch.object(models, 'validate_instance_nics') @patch.object(models, 'validate_instance_flavors') @patch.object(models, 'get_required_volume_size', return_value=3) def test_create_cluster_instances(self, get_vol_size, _, mock_validate_nics, mock_homogeneous_cluster, check_quotas, inst_create): test_instances = [MagicMock(), MagicMock()] num_instances = len(test_instances) datastore = Mock(manager='cassandra') datastore_version = Mock(manager='cassandra') with patch.object(CassandraClusterTasks, 'find_cluster_node_ids', return_value=[inst.id for inst in test_instances]): CassandraCluster._create_cluster_instances( self.context, 'test_cluster_id', 'test_cluster', datastore, datastore_version, test_instances, None, None, None) check_quotas.assert_called_once_with( ANY, instances=num_instances, volumes=get_vol_size.return_value) self.assertEqual(num_instances, inst_create.call_count, "Unexpected number of instances created.") def test_choose_seed_nodes(self): nodes = self._build_mock_nodes(3) seeds = CassandraClusterTasks.choose_seed_nodes(nodes) self.assertEqual(1, len(seeds), "Only one seed node should be selected for a " "single-rack-single-dc cluster.") nodes = self._build_mock_nodes(3) nodes[0]['rack'] = 'rack1' nodes[1]['rack'] = 'rack2' seeds = CassandraClusterTasks.choose_seed_nodes(nodes) self.assertEqual(2, len(seeds), "There should be exactly two seed nodes. " "One from each rack.") nodes = self._build_mock_nodes(3) nodes[0]['rack'] = 'rack1' nodes[1]['rack'] = 'rack2' nodes[2]['dc'] = 'dc2' seeds = CassandraClusterTasks.choose_seed_nodes(nodes) self.assertEqual(3, len(seeds), "There should be exactly three seed nodes. " "One from each rack and data center.") def _build_mock_nodes(self, num_nodes): nodes = [] for _ in range(num_nodes): mock_instance = MagicMock() nodes.append({'instance': mock_instance, 'guest': MagicMock(), 'id': mock_instance.id, 'ip': '%s_IP' % mock_instance.id, 'dc': 'dc1', 'rack': 'rack1' }) return nodes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cluster.py0000644000175000017500000002323100000000000025266 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from mock import Mock from mock import patch from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import cfg from trove.common import clients from trove.common import exception from trove.common.strategies.cluster.experimental.mongodb import ( api as mongodb_api) from trove.common import utils from trove.datastore import models as datastore_models from trove.instance import models as inst_models from trove.instance.models import DBInstance from trove.instance.tasks import InstanceTasks from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools CONF = cfg.CONF class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "mongodb" self.datastore_version = self.dv self.cluster = mongodb_api.MongoDbCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.cluster._server_group_loaded = True self.instances = [{'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}] self.volume_support = CONF.get(self.dv.manager).volume_support self.remote_nova = clients.create_nova_client def tearDown(self): super(ClusterTest, self).tearDown() CONF.get(self.dv.manager).volume_support = self.volume_support clients.create_nova_client = self.remote_nova def test_create_empty_instances(self): self.assertRaises(exception.ClusterNumInstancesNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, [], {}, None, None) @patch.object(clients, 'create_nova_client') def test_create_unequal_flavors(self, mock_client): instances = self.instances instances[0]['flavor_id'] = '4567' self.assertRaises(exception.ClusterFlavorsNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(clients, 'create_nova_client') def test_create_unequal_volumes(self, mock_client): instances = self.instances instances[0]['volume_size'] = 2 flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizesNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(clients, 'create_nova_client') def test_create_storage_not_specified(self, mock_client): class FakeFlavor(object): def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] CONF.get(self.dv.manager).volume_support = False (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch('trove.common.strategies.cluster.experimental.mongodb.api.LOG') def test_add_shard_bad_task_status(self, mock_logging): task_status = ClusterTasks.BUILDING_INITIAL self.cluster.db_info.task_status = task_status self.assertRaises(exception.UnprocessableEntity, self.cluster.add_shard) @patch.object(utils, 'generate_uuid', Mock(return_value='new-shard-id')) @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(task_api, 'load') @patch.object(Cluster, 'update_db') @patch.object(inst_models.Instance, 'create') @patch.object(QUOTAS, 'check_quotas') @patch.object(inst_models, 'load_any_instance') @patch.object(inst_models.DBInstance, 'find_all') def test_add_shard(self, mock_find_all, mock_load_any_instance, mock_check_quotas, mock_instance_create, mock_update_db, mock_task_api_load, mock_load_by_uuid): self.cluster.db_info.task_status = ClusterTasks.NONE (mock_find_all.return_value .all.return_value) = [DBInstance(InstanceTasks.NONE, name="TestInstance1", shard_id="1", id='1', datastore_version_id='1'), DBInstance(InstanceTasks.NONE, name="TestInstance2", shard_id="1", id='2', datastore_version_id='1'), DBInstance(InstanceTasks.NONE, name="TestInstance3", shard_id="1", id='3', datastore_version_id='1')] mock_datastore_version = Mock() mock_datastore_version.manager = 'mongodb' mock_load_by_uuid.return_value = mock_datastore_version mock_task_api = Mock() mock_task_api.mongodb_add_shard_cluster.return_value = None mock_task_api_load.return_value = mock_task_api self.cluster.add_shard() mock_update_db.assert_called_with( task_status=ClusterTasks.ADDING_SHARD) mock_task_api.mongodb_add_shard_cluster.assert_called_with( self.cluster.id, 'new-shard-id', 'rs2') @patch('trove.cluster.models.LOG') def test_upgrade_not_implemented(self, mock_logging): self.assertRaises(exception.BadRequest, self.cluster.upgrade, "foo") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cluster_controller.py0000644000175000017500000004206500000000000027537 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster, DBCluster from trove.cluster.service import ClusterController from trove.cluster.tasks import ClusterTasks from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common.strategies.cluster import strategy from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() self.locality = 'anti-affinity' instances = [ { "flavorRef": "7", "volume": { "size": 1 }, "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] * 5 self.cluster = { "cluster": { "name": "products", "datastore": { "type": "mongodb", "version": "2.4.10" }, "instances": instances, "locality": self.locality, } } self.add_shard = { "add_shard": {} } self.grow = { "grow": [ {"flavorRef": "7"}, ] } self.shrink = { "shrink": [ {"id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}, ] } self.upgrade = { "upgrade": { "datastore_version": "2.4.10" } } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertIn('cluster', schema['properties']) def test_get_schema_action_add_shard(self): schema = self.controller.get_schema('action', self.add_shard) self.assertIsNotNone(schema) self.assertIn('add_shard', schema['properties']) def test_get_schema_action_grow(self): schema = self.controller.get_schema('action', self.grow) self.assertIsNotNone(schema) self.assertIn('grow', schema['properties']) def test_get_schema_action_shrink(self): schema = self.controller.get_schema('action', self.shrink) self.assertIsNotNone(schema) self.assertIn('shrink', schema['properties']) def test_get_schema_action_upgrade(self): schema = self.controller.get_schema('action', self.upgrade) self.assertIsNotNone(schema) self.assertIn('upgrade', schema['properties']) def test_get_schema_action_invalid(self): schema = self.controller.get_schema('action', {'wow': {}}) self.assertIsNotNone(schema) self.assertThat(len(schema.keys()), Is(0)) def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_add_shard(self): body = self.add_shard schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_grow(self): body = self.grow schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_shrink(self): body = self.shrink schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_upgrade(self): body = self.upgrade schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) def test_validate_create_bad_locality(self): body = self.cluster body['cluster']['locality'] = "$%^&" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'$%^&' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("locality", error_paths) @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.ClusterDatastoreNotSupported, self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mongodb' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { 'volume_size': 1, 'volume_type': None, 'flavor_id': '1234', 'availability_zone': 'az', 'modules': None, 'region_name': None, 'nics': [ {'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'} ] } ] * 5 mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'mongodb' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}, self.locality, None) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'mongodb' mock_cluster_load.return_value = mock_cluster mock_cluster.locality = self.locality self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(trove_testtools.TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "mongodb", "version": "2.4.10" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, } ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='mongodb') cfg.CONF.clear_override('api_strategy', group='mongodb') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='mongodb') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mongodb' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.TroveError, self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='mongodb') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mongodb' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'mongodb' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) @patch.object(models.Cluster, 'load') def test_controller_action_multi_action(self, mock_cluster_load): body = {'do_stuff': {}, 'do_stuff2': {}} tenant_id = Mock() context = trove_testtools.TroveTestContext(self) cluster_id = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) cluster = Mock() cluster.instances_without_server = [Mock()] cluster.datastore_version.manager = 'test_dsv' mock_cluster_load.return_value = cluster self.assertRaisesRegex(exception.TroveError, 'should have exactly one action specified', self.controller.action, req, body, tenant_id, cluster_id) @patch.object(models.Cluster, 'load') def test_controller_action_no_strategy(self, mock_cluster_load): body = {'do_stuff2': {}} tenant_id = Mock() context = trove_testtools.TroveTestContext(self) cluster_id = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) db_info = DBCluster(ClusterTasks.NONE, id=cluster_id, tenant_id=tenant_id) cluster = Cluster(context, db_info, datastore='test_ds', datastore_version='test_dsv') mock_cluster_load.return_value = cluster self.assertRaisesRegex(exception.TroveError, 'Action do_stuff2 not supported', self.controller.action, req, body, tenant_id, cluster_id) @patch.object(strategy, 'load_api_strategy') @patch.object(models.Cluster, 'load') def test_controller_action_found(self, mock_cluster_load, mock_cluster_api_strategy): body = {'grow': {}} tenant_id = Mock() context = trove_testtools.TroveTestContext(self) cluster_id = 'test_uuid' req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) cluster = Mock() cluster.instances_without_server = [Mock()] cluster.datastore_version.manager = 'test_dsv' mock_cluster_load.return_value = cluster self.controller.action(req, body, tenant_id, cluster_id) self.assertEqual(1, cluster.action.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cluster_models.py0000644000175000017500000000356400000000000026640 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock, patch from trove.cluster import models from trove.common.strategies.cluster.experimental.mongodb.api import ( MongoDbCluster) from trove.datastore import models as datastore_models from trove.instance import models as instance_models from trove.tests.unittests import trove_testtools class TestClusterModel(trove_testtools.TestCase): @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(models.DBCluster, 'find_by') @patch.object(instance_models.Instances, 'load_all_by_cluster_id') def test_load(self, mock_inst_load, mock_find_by, mock_load_dsv_by_uuid, mock_ds_load): context = trove_testtools.TroveTestContext(self) id = Mock() inst_mock = Mock() server_group = Mock() inst_mock.server_group = server_group mock_inst_load.return_value = [inst_mock] dsv = Mock() dsv.manager = 'mongodb' mock_load_dsv_by_uuid.return_value = dsv cluster = models.Cluster.load(context, id) self.assertIsInstance(cluster, MongoDbCluster) self.assertEqual(server_group, cluster.server_group, "Unexpected server group") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cluster_pxc_controller.py0000644000175000017500000002574400000000000030416 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.service import ClusterController from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() instances = [ { "flavorRef": "7", "volume": { "size": 1 }, "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] * 3 self.cluster = { "cluster": { "name": "products", "datastore": { "type": "pxc", "version": "5.5" }, "instances": instances } } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertIn('cluster', schema['properties']) self.assertIsNotNone(schema['properties']['cluster']) def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) @patch.object(Cluster, 'create') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.ClusterDatastoreNotSupported, self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'pxc' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { 'volume_size': 1, 'volume_type': None, 'flavor_id': '1234', 'availability_zone': 'az', 'modules': None, 'region_name': None, 'nics': [ {'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'} ] } ] * 3 mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'pxc' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}, None, None) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'pxc' mock_cluster_load.return_value = mock_cluster self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(trove_testtools.TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "pxc", "version": "5.5" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='pxc') cfg.CONF.clear_override('api_strategy', group='pxc') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='pxc') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'pxc' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.TroveError, self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='pxc') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'pxc' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'pxc' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cluster_redis_controller.py0000644000175000017500000003065500000000000030727 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.service import ClusterController from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() instances = [ { "volume_size": None, "flavorRef": "7", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, "flavorRef": "8", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, "flavorRef": "7", "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] self.cluster = { "cluster": { "name": "products", "datastore": { "type": "redis", "version": "3.0" }, "instances": instances } } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertIn('cluster', schema['properties']) self.assertIsNotNone(schema['properties']['cluster']) def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) @patch.object(Cluster, 'create') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaisesRegex(exception.ClusterDatastoreNotSupported, "Clusters not supported for", self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'redis' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { "volume_size": None, 'volume_type': None, "flavor_id": "1234", "availability_zone": "az", 'modules': None, 'region_name': None, "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, 'volume_type': None, "flavor_id": "1234", "availability_zone": "az", 'modules': None, 'region_name': None, "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] }, { "volume_size": None, 'volume_type': None, "flavor_id": "1234", "availability_zone": "az", 'modules': None, 'region_name': None, "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'redis' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}, None, None) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'redis' mock_cluster_load.return_value = mock_cluster self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(trove_testtools.TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "redis", "version": "3.0" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='redis') cfg.CONF.clear_override('api_strategy', group='redis') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='redis') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'redis' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaisesRegex(exception.TroveError, "Clusters not supported for", self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='redis') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'redis' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'redis' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cluster_vertica_controller.py0000644000175000017500000002602300000000000031250 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import MagicMock from mock import Mock from mock import patch from testtools.matchers import Is, Equals from trove.cluster import models from trove.cluster.models import Cluster from trove.cluster.service import ClusterController from trove.cluster import views import trove.common.cfg as cfg from trove.common import exception from trove.common import utils from trove.datastore import models as datastore_models from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.controller = ClusterController() instances = [ { "flavorRef": "7", "volume": { "size": 1 }, "availability_zone": "az", "nics": [ {"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"} ] } ] * 3 self.cluster = { "cluster": { "name": "products", "datastore": { "type": "vertica", "version": "7.1" }, "instances": instances } } def test_get_schema_create(self): schema = self.controller.get_schema('create', self.cluster) self.assertIsNotNone(schema) self.assertIn('cluster', schema['properties']) self.assertIsNotNone(schema['properties']['cluster']) def test_validate_create(self): body = self.cluster schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.cluster body['cluster']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.cluster body['cluster']['datastore']['type'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("type", error_paths) @patch.object(Cluster, 'create') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters_disabled(self, mock_get_datastore_version, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'mysql' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.ClusterDatastoreNotSupported, self.controller.create, req, body, tenant_id) @patch.object(Cluster, 'create') @patch.object(utils, 'get_id_from_href') @patch.object(datastore_models, 'get_datastore_version') def test_create_clusters(self, mock_get_datastore_version, mock_id_from_href, mock_cluster_create): body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'vertica' datastore = Mock() mock_get_datastore_version.return_value = (datastore, datastore_version) instances = [ { 'volume_size': 1, 'volume_type': None, 'flavor_id': '1234', 'availability_zone': 'az', 'modules': None, 'region_name': None, 'nics': [ {'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'} ] } ] * 3 mock_id_from_href.return_value = '1234' mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'vertica' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) mock_cluster_create.assert_called_with(context, 'products', datastore, datastore_version, instances, {}, None, None) @patch.object(Cluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) mock_cluster = Mock() mock_cluster.instances = [] mock_cluster.instances_without_server = [] mock_cluster.datastore_version.manager = 'vertica' mock_cluster_load.return_value = mock_cluster self.controller.show(req, tenant_id, id) mock_cluster_load.assert_called_with(context, id) @patch.object(Cluster, 'load') @patch.object(Cluster, 'load_instance') def test_show_cluster_instance(self, mock_cluster_load_instance, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() instance_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) cluster = Mock() mock_cluster_load.return_value = cluster cluster.id = cluster_id self.controller.show_instance(req, tenant_id, cluster_id, instance_id) mock_cluster_load_instance.assert_called_with(context, cluster.id, instance_id) @patch.object(Cluster, 'load') def test_delete_cluster(self, mock_cluster_load): tenant_id = Mock() cluster_id = Mock() req = MagicMock() cluster = Mock() trove_testtools.patch_notifier(self) mock_cluster_load.return_value = cluster self.controller.delete(req, tenant_id, cluster_id) cluster.delete.assert_called_with() class TestClusterControllerWithStrategy(trove_testtools.TestCase): def setUp(self): super(TestClusterControllerWithStrategy, self).setUp() self.controller = ClusterController() self.cluster = { "cluster": { "name": "products", "datastore": { "type": "vertica", "version": "7.1" }, "instances": [ { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, { "flavorRef": "7", "volume": { "size": 1 }, }, ] } } def tearDown(self): super(TestClusterControllerWithStrategy, self).tearDown() cfg.CONF.clear_override('cluster_support', group='vertica') cfg.CONF.clear_override('api_strategy', group='vertica') @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_disabled(self, mock_cluster_create, mock_get_datastore_version): cfg.CONF.set_override('cluster_support', False, group='vertica') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'vertica' mock_get_datastore_version.return_value = (Mock(), datastore_version) self.assertRaises(exception.TroveError, self.controller.create, req, body, tenant_id) @patch.object(views.ClusterView, 'data', return_value={}) @patch.object(datastore_models, 'get_datastore_version') @patch.object(models.Cluster, 'create') def test_create_clusters_enabled(self, mock_cluster_create, mock_get_datastore_version, mock_cluster_view_data): cfg.CONF.set_override('cluster_support', True, group='vertica') body = self.cluster tenant_id = Mock() context = trove_testtools.TroveTestContext(self) req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) datastore_version = Mock() datastore_version.manager = 'vertica' mock_get_datastore_version.return_value = (Mock(), datastore_version) mock_cluster = Mock() mock_cluster.datastore_version.manager = 'vertica' mock_cluster_create.return_value = mock_cluster self.controller.create(req, body, tenant_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_cluster_views.py0000644000175000017500000001647200000000000026514 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import MagicMock from mock import Mock from mock import patch from trove.cluster.views import ClusterInstanceDetailView from trove.cluster.views import ClusterView from trove.cluster.views import load_view from trove.common.strategies.cluster.experimental.mongodb.api import ( MongoDbClusterView) from trove.tests.unittests import trove_testtools class ClusterViewTest(trove_testtools.TestCase): def setUp(self): super(ClusterViewTest, self).setUp() self.locality = 'anti-affinity' self.cluster = Mock() self.cluster.created = 'Yesterday' self.cluster.updated = 'Now' self.cluster.name = 'cluster1' self.cluster.datastore_version = Mock() self.cluster.datastore_version.name = 'mysql_test_version' self.cluster.instances = [] self.cluster.instances.append(Mock()) self.cluster.instances[0].flavor_id = '123' self.cluster.instances[0].volume = Mock() self.cluster.instances[0].volume.size = 1 self.cluster.instances[0].slave_of_id = None self.cluster.instances[0].slaves = None self.cluster.locality = self.locality def tearDown(self): super(ClusterViewTest, self).tearDown() @patch.object(ClusterView, 'build_instances', return_value=('10.0.0.1', [])) @patch.object(ClusterView, '_build_flavor_info') @patch.object(ClusterView, '_build_links') def test_data(self, mock_build_links, mock_build_flavor_info, mock_build_instances): mock_build_instances.return_value = Mock(), Mock() view = ClusterView(self.cluster, Mock()) result = view.data() self.assertEqual(self.cluster.created, result['cluster']['created']) self.assertEqual(self.cluster.updated, result['cluster']['updated']) self.assertEqual(self.cluster.name, result['cluster']['name']) self.assertEqual(self.cluster.datastore_version.name, result['cluster']['datastore']['version']) self.assertEqual(self.locality, result['cluster']['locality']) @patch.object(ClusterView, 'build_instances', return_value=('10.0.0.1', [])) @patch.object(ClusterView, '_build_flavor_info') @patch.object(ClusterView, '_build_links') def test_load_view(self, *args): cluster = Mock() cluster.datastore_version.manager = 'mongodb' view = load_view(cluster, Mock()) self.assertIsInstance(view, MongoDbClusterView) def test__build_instances(self, *args): cluster = Mock() cluster.instances = [] cluster.instances.append(Mock()) cluster.instances.append(Mock()) cluster.instances.append(Mock()) cluster.instances[0].type = 'configsvr' cluster.instances[0].get_visible_ip_addresses = lambda: ['1.2.3.4'] cluster.instances[0].datastore_version.manager = 'mongodb' cluster.instances[1].type = 'query_router' cluster.instances[1].get_visible_ip_addresses = lambda: ['1.2.3.4'] cluster.instances[1].datastore_version.manager = 'mongodb' cluster.instances[2].type = 'member' cluster.instances[2].get_visible_ip_addresses = lambda: ['1.2.3.4'] cluster.instances[2].datastore_version.manager = 'mongodb' def test_case(ip_to_be_published_for, instance_dict_to_be_published_for, number_of_ip_published, number_of_instance_dict_published): view = ClusterView(cluster, MagicMock()) instances, ip_list = view._build_instances( ip_to_be_published_for, instance_dict_to_be_published_for) self.assertEqual(number_of_ip_published, len(ip_list)) self.assertEqual(number_of_instance_dict_published, len(instances)) test_case([], [], 0, 0) test_case(['abc'], ['def'], 0, 0) test_case(['query_router'], ['member'], 1, 1) test_case(['query_router'], ['query_router', 'configsvr', 'member'], 1, 3) test_case(['query_router', 'member'], ['member'], 2, 1) class ClusterInstanceDetailViewTest(trove_testtools.TestCase): def setUp(self): super(ClusterInstanceDetailViewTest, self).setUp() self.instance = Mock() self.instance.created = 'Yesterday' self.instance.updated = 'Now' self.instance.datastore_version = Mock() self.instance.datastore_version.name = 'mysql_test_version' self.instance.hostname = 'test.trove.com' self.ip = "1.2.3.4" self.instance.addresses = {"private": [{"addr": self.ip}]} self.instance.volume_used = '3' self.instance.root_password = 'iloveyou' self.instance.get_visible_ip_addresses = lambda: ["1.2.3.4"] self.instance.slave_of_id = None self.instance.slaves = None self.context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=self.context) def tearDown(self): super(ClusterInstanceDetailViewTest, self).tearDown() @patch.object(ClusterInstanceDetailView, '_build_links') @patch.object(ClusterInstanceDetailView, '_build_flavor_links') @patch.object(ClusterInstanceDetailView, '_build_configuration_info') def test_data(self, *args): view = ClusterInstanceDetailView(self.instance, self.req) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertEqual(self.instance.hostname, result['instance']['hostname']) self.assertNotIn('ip', result['instance']) @patch.object(ClusterInstanceDetailView, '_build_links') @patch.object(ClusterInstanceDetailView, '_build_flavor_links') @patch.object(ClusterInstanceDetailView, '_build_configuration_info') def test_data_ip(self, *args): self.instance.hostname = None view = ClusterInstanceDetailView(self.instance, self.req) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertNotIn('hostname', result['instance']) self.assertEqual([self.ip], result['instance']['ip']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_galera_cluster.py0000644000175000017500000004214300000000000026604 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from mock import Mock from mock import patch from novaclient import exceptions as nova_exceptions from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import clients from trove.common import exception from trove.common.strategies.cluster.experimental.galera_common import ( api as galera_api) from trove.common.strategies.cluster.experimental.galera_common import ( taskmanager as galera_task) from trove.instance import models as inst_models from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools class FakeOptGroup(object): def __init__(self, min_cluster_member_count=3, volume_support=True, device_path='/dev/vdb'): self.min_cluster_member_count = min_cluster_member_count self.volume_support = volume_support self.device_path = device_path class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "pxc" self.datastore_version = self.dv self.cluster = galera_api.GaleraCommonCluster( self.context, self.db_info, self.datastore, self.datastore_version) self.cluster._server_group_loaded = True self.instances = [ {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}] def tearDown(self): super(ClusterTest, self).tearDown() def test_create_empty_instances(self): self.assertRaises(exception.ClusterNumInstancesNotLargeEnough, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, [], {}, None, None) @patch.object(clients, 'create_nova_client') def test_create_flavor_not_specified(self, mock_client): instances = self.instances instances[0]['flavor_id'] = None self.assertRaises(exception.ClusterFlavorsNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(clients, 'create_nova_client') def test_create_invalid_flavor_specified(self, mock_client): instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] (mock_client.return_value.flavors.get) = Mock( side_effect=nova_exceptions.NotFound( 404, "Flavor id not found %s" % id)) self.assertRaises(exception.FlavorNotFound, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(clients, 'create_nova_client') def test_create_volume_no_specified(self, mock_client): instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizeRequired, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(clients, 'create_nova_client') @patch.object(galera_api, 'CONF') def test_create_storage_specified_with_no_volume_support(self, mock_conf, mock_client): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.VolumeNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(clients, 'create_nova_client') @patch.object(galera_api, 'CONF') def test_create_storage_not_specified_and_no_ephemeral_flavor(self, mock_conf, mock_client): class FakeFlavor(object): def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(clients, 'create_nova_client') def test_create_volume_not_equal(self, mock_client): instances = self.instances instances[0]['volume_size'] = 2 flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizesNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') @patch.object(clients, 'create_neutron_client') def test_create(self, mock_neutron_client, mock_nova_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_find_all): instances = self.instances flavors = Mock() networks = Mock() mock_nova_client.return_value.flavors = flavors mock_neutron_client.return_value.find_resource = networks self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') def test_create_over_limit(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create): instances = [{'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}, {'volume_size': 1, 'flavor_id': '1234'}] flavors = Mock() mock_client.return_value.flavors = flavors self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(4, mock_ins_create.call_count) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(galera_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') def test_create_with_ephemeral_flavor(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_conf, mock_find_all): class FakeFlavor(object): def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 1 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(DBCluster, 'update') @patch.object(galera_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') @patch.object(clients, 'create_neutron_client') def test_grow(self, mock_neutron_client, mock_nova_client, mock_check_quotas, mock_task_api, mock_inst_create, mock_conf, mock_update): mock_nova_client.return_value.flavors = Mock() mock_neutron_client.return_value.find_resource = Mock() self.cluster.grow(self.instances) mock_update.assert_called_with( task_status=ClusterTasks.GROWING_CLUSTER) mock_task_api.return_value.grow_cluster.assert_called_with( self.db_info.id, [mock_inst_create.return_value.id] * 3) self.assertEqual(3, mock_inst_create.call_count) @patch.object(DBCluster, 'update') @patch.object(galera_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') @patch.object(clients, 'create_neutron_client') def test_grow_exception(self, mock_neutron_client, mock_nova_client, mock_check_quotas, mock_inst_create, mock_conf, mock_update): mock_nova_client.return_value.flavors = Mock() mock_neutron_client.return_value.find_resource = Mock() with patch.object(task_api, 'load') as mock_load: mock_load.return_value.grow_cluster = Mock( side_effect=exception.BadRequest) self.assertRaises(exception.BadRequest, self.cluster.grow, self.instances) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'load') @patch.object(Cluster, 'validate_cluster_available') def test_shrink_empty(self, mock_validate, mock_load, mock_find_all): instance = Mock() self.assertRaises( exception.ClusterShrinkMustNotLeaveClusterEmpty, self.cluster.shrink, [instance]) @patch.object(galera_api.GaleraCommonCluster, '__init__') @patch.object(task_api, 'load') @patch.object(DBCluster, 'update') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'load') @patch.object(Cluster, 'validate_cluster_available') def test_shrink(self, mock_validate, mock_load, mock_find_all, mock_update, mock_task_api, mock_init): mock_init.return_value = None existing_instances = [Mock(), Mock()] mock_find_all.return_value.all.return_value = existing_instances instance = Mock() self.cluster.shrink([instance]) mock_validate.assert_called_with() mock_update.assert_called_with( task_status=ClusterTasks.SHRINKING_CLUSTER) mock_task_api.return_value.shrink_cluster.assert_called_with( self.db_info.id, [mock_load.return_value.id]) mock_init.assert_called_with(self.context, self.db_info, self.datastore, self.datastore_version) @patch.object(galera_task.GaleraCommonClusterTasks, 'shrink_cluster') @patch.object(galera_api.GaleraCommonCluster, '__init__') @patch.object(DBCluster, 'update') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'load') @patch.object(Cluster, 'validate_cluster_available') def test_shrink_exception(self, mock_validate, mock_load, mock_find_all, mock_update, mock_init, mock_shrink): mock_init.return_value = None existing_instances = [Mock(), Mock()] mock_find_all.return_value.all.return_value = existing_instances instance = Mock() with patch.object(task_api, 'load') as mock_load: mock_load.return_value.shrink_cluster = Mock( side_effect=exception.BadRequest) self.assertRaises(exception.BadRequest, self.cluster.shrink, [instance]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_models.py0000644000175000017500000002254500000000000025077 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import ANY from mock import call from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch from mock import PropertyMock from neutronclient.common import exceptions as neutron_exceptions from trove.cluster import models from trove.common import clients from trove.common import exception from trove.tests.unittests import trove_testtools class TestModels(trove_testtools.TestCase): @patch.object(clients, 'create_nova_client', return_value=MagicMock()) def test_validate_instance_flavors(self, create_nova_cli_mock): patch.object( create_nova_cli_mock.return_value, 'flavors', new_callable=PropertyMock(return_value=Mock())) mock_flv = create_nova_cli_mock.return_value.flavors.get.return_value mock_flv.ephemeral = 0 test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 1.5, 'region_name': 'home'}, {'flavor_id': 2, 'volume_size': 3, 'region_name': 'work'}] models.validate_instance_flavors(Mock(), test_instances, True, True) create_nova_cli_mock.assert_has_calls([call(ANY, None), call(ANY, 'home'), call(ANY, 'work')]) self.assertRaises(exception.LocalStorageNotSpecified, models.validate_instance_flavors, Mock(), test_instances, False, True) mock_flv.ephemeral = 1 models.validate_instance_flavors(Mock(), test_instances, False, True) def test_validate_volume_size(self): self.patch_conf_property('max_accepted_volume_size', 10) models.validate_volume_size(9) models.validate_volume_size(10) self.assertRaises(exception.VolumeQuotaExceeded, models.validate_volume_size, 11) self.assertRaises(exception.VolumeSizeNotSpecified, models.validate_volume_size, None) @patch.object(models, 'validate_volume_size') def test_get_required_volume_size(self, vol_size_validator_mock): test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 1.5}, {'flavor_id': 1, 'volume_size': 3}] total_size = models.get_required_volume_size(test_instances, True) self.assertEqual(14.5, total_size) vol_size_validator_mock.assert_has_calls([call(10), call(1.5), call(3)], any_order=True) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 1.5}, {'flavor_id': 1, 'volume_size': None}] self.assertRaises(exception.ClusterVolumeSizeRequired, models.get_required_volume_size, test_instances, True) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 1.5}, {'flavor_id': 1}] self.assertRaises(exception.ClusterVolumeSizeRequired, models.get_required_volume_size, test_instances, True) test_instances = [{'flavor_id': 1}, {'flavor_id': 1}, {'flavor_id': 1}] total_size = models.get_required_volume_size(test_instances, False) self.assertIsNone(total_size) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 1.5}] self.assertRaises(exception.VolumeNotSupported, models.get_required_volume_size, test_instances, False) def test_assert_same_instance_volumes(self): test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}] models.assert_same_instance_volumes(test_instances) test_instances = [{'flavor_id': 1, 'volume_size': 5}, {'flavor_id': 1, 'volume_size': 5}, {'flavor_id': 1, 'volume_size': 5}] models.assert_same_instance_volumes(test_instances, required_size=5) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 1.5}, {'flavor_id': 1, 'volume_size': 10}] self.assertRaises(exception.ClusterVolumeSizesNotEqual, models.assert_same_instance_volumes, test_instances) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}] self.assertRaises(exception.ClusterVolumeSizesNotEqual, models.assert_same_instance_volumes, test_instances, required_size=5) def test_assert_same_instance_flavors(self): test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}] models.assert_same_instance_flavors(test_instances) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}] models.assert_same_instance_flavors(test_instances, required_flavor=1) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 2, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}] self.assertRaises(exception.ClusterFlavorsNotEqual, models.assert_same_instance_flavors, test_instances) test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}] self.assertRaises(exception.ClusterFlavorsNotEqual, models.assert_same_instance_flavors, test_instances, required_flavor=2) @patch.multiple(models, assert_same_instance_flavors=DEFAULT, assert_same_instance_volumes=DEFAULT) def test_assert_homogeneous_cluster(self, assert_same_instance_flavors, assert_same_instance_volumes): test_instances = [{'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}, {'flavor_id': 1, 'volume_size': 10}] required_flavor = Mock() required_volume_size = Mock() models.assert_homogeneous_cluster( test_instances, required_flavor=required_flavor, required_volume_size=required_volume_size) assert_same_instance_flavors.assert_called_once_with( test_instances, required_flavor=required_flavor) assert_same_instance_volumes.assert_called_once_with( test_instances, required_size=required_volume_size) @patch.object(clients, 'create_neutron_client', return_value=MagicMock()) def test_validate_instance_nics(self, create_neutron_cli_mock): test_instances = [ {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "surprise"}]}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}] self.assertRaises(exception.ClusterNetworksNotEqual, models.validate_instance_nics, Mock(), test_instances) test_instances = [ {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]}] create_neutron_cli_mock.return_value.find_resource = Mock( side_effect=neutron_exceptions.NotFound( "Nic id not found %s" % id)) self.assertRaises(exception.NetworkNotFound, models.validate_instance_nics, Mock(), test_instances) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_mongodb_cluster.py0000644000175000017500000006040500000000000026777 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import uuid from novaclient import exceptions as nova_exceptions from trove.cluster import models from trove.cluster import tasks from trove.common import cfg from trove.common import clients from trove.common import exception from trove.common.strategies.cluster.experimental.mongodb import api from trove.instance import models as inst_models from trove.instance import tasks as inst_tasks from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools CONF = cfg.CONF class FakeOptGroup(object): def __init__(self, num_config_servers_per_cluster=3, num_query_routers_per_cluster=1, config_servers_volume_size=10, query_routers_volume_size=10, cluster_secure=True, volume_support=True, device_path='/dev/vdb'): self.num_config_servers_per_cluster = num_config_servers_per_cluster self.num_query_routers_per_cluster = num_query_routers_per_cluster self.config_servers_volume_size = config_servers_volume_size self.query_routers_volume_size = query_routers_volume_size self.cluster_secure = cluster_secure self.volume_support = volume_support self.device_path = device_path class MongoDBClusterTest(trove_testtools.TestCase): def setUp(self): super(MongoDBClusterTest, self).setUp() self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = models.DBCluster(models.ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=models.ClusterTasks.NONE._code) self.context = mock.Mock() self.datastore = mock.Mock() self.dv = mock.Mock() self.dv.manager = "mongodb" self.datastore_version = self.dv self.cluster = api.MongoDbCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.cluster._server_group_loaded = True self.manager = mock.Mock() self.cluster.manager = self.manager self.volume_support = CONF.get('mongodb').volume_support self.remote_nova = clients.create_nova_client self.instances = [ {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}], 'region_name': "foo-region"}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}], 'region_name': "foo-region"}, {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}], 'region_name': "foo-region"}] def tearDown(self): super(MongoDBClusterTest, self).tearDown() def test_create_configuration_specified(self): configuration = "foo-config" self.assertRaises(exception.ConfigurationNotSupported, models.Cluster.create, mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances, {}, None, configuration) def test_create_invalid_instance_numbers_specified(self): instance = [ {'volume_size': 1, 'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}]} ] self.assertRaises(exception.ClusterNumInstancesNotSupported, models.Cluster.create, mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instance, {}, None, None) @mock.patch.object(clients, 'create_nova_client') def test_create_invalid_flavor_specified(self, mock_client): (mock_client.return_value.flavors.get) = mock.Mock( side_effect=nova_exceptions.NotFound( 404, "Flavor id not found.")) self.assertRaises(exception.FlavorNotFound, models.Cluster.create, mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances, {}, None, None) @mock.patch.object(clients, 'create_nova_client') def test_create_flavor_not_equal(self, mock_client): instances = self.instances instances[0]['flavor_id'] = '4321' flavors = mock.Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterFlavorsNotEqual, models.Cluster.create, mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @mock.patch.object(clients, 'create_nova_client') def test_create_volume_not_equal(self, mock_client): instances = self.instances instances[0]['volume_size'] = 2 flavors = mock.Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizesNotEqual, models.Cluster.create, mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @mock.patch.object(clients, 'create_nova_client') def test_create_volume_not_specified(self, mock_client): instances = [ {'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}], 'region_name': "foo-region"}, {'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}], 'region_name': "foo-region"}, {'flavor_id': '1234', 'nics': [{"net-id": "foo-bar"}], 'region_name': "foo-region"}] flavors = mock.Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizeRequired, models.Cluster.create, mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @mock.patch.object(clients, 'create_nova_client') @mock.patch.object(api, 'CONF') def test_create_storage_specified_with_no_volume_support(self, mock_conf, mock_client): mock_conf.get = mock.Mock( return_value=FakeOptGroup(volume_support=False)) flavors = mock.Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.VolumeNotSupported, models.Cluster.create, mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances, {}, None, None) @mock.patch.object(task_api, 'load') @mock.patch.object(inst_models.Instance, 'create') @mock.patch.object(models.DBCluster, 'create') @mock.patch.object(clients, 'create_neutron_client') @mock.patch.object(clients, 'create_nova_client') @mock.patch.object(api, 'check_quotas') def test_create_validate_volumes_deltas(self, mock_check_quotas, *args): extended_properties = { "configsvr_volume_size": 5, "mongos_volume_size": 7} self.cluster.create(mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances, extended_properties, None, None) deltas = {'instances': 7, 'volumes': 25} # volumes=1*3+5*3+7*1 mock_check_quotas.assert_called_with(mock.ANY, deltas) @mock.patch.object(task_api, 'load') @mock.patch.object(inst_models.Instance, 'create') @mock.patch.object(models.DBCluster, 'create') @mock.patch.object(QUOTAS, 'check_quotas') @mock.patch.object(clients, 'create_nova_client') @mock.patch.object(clients, 'create_neutron_client') def test_create(self, mock_neutron_client, mock_nova_client, mock_check_quotas, mock_db_create, mock_ins_create, mock_task_api): instances = self.instances flavors = mock.Mock() networks = mock.Mock() mock_neutron_client.return_value.find_resource = networks mock_nova_client.return_value.flavors = flavors self.cluster.create(mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(7, mock_ins_create.call_count) @mock.patch.object(task_api, 'load') @mock.patch.object(models.DBCluster, 'create') @mock.patch.object(models, 'validate_instance_nics') @mock.patch.object(QUOTAS, 'check_quotas') @mock.patch.object(models, 'validate_instance_flavors') @mock.patch.object(inst_models.Instance, 'create') def test_create_with_correct_nics(self, mock_ins_create, *args): self.cluster.create(mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances, {}, None, None) nics = [{"net-id": "foo-bar"}] nics_count = [kw.get('nics') for _, kw in mock_ins_create.call_args_list].count(nics) self.assertEqual(7, nics_count) @mock.patch.object(task_api, 'load') @mock.patch.object(models.DBCluster, 'create') @mock.patch.object(models, 'validate_instance_nics') @mock.patch.object(QUOTAS, 'check_quotas') @mock.patch.object(models, 'validate_instance_flavors') @mock.patch.object(inst_models.Instance, 'create') def test_create_with_extended_properties(self, mock_ins_create, *args): extended_properties = { "num_configsvr": 5, "num_mongos": 7, "configsvr_volume_size": 8, "configsvr_volume_type": "foo_type", "mongos_volume_size": 9, "mongos_volume_type": "bar_type"} self.cluster.create(mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances, extended_properties, None, None) volume_args_list = [ (arg[8], kw['volume_type']) for arg, kw in mock_ins_create.call_args_list ] self.assertEqual(5, volume_args_list.count((8, "foo_type"))) self.assertEqual(7, volume_args_list.count((9, "bar_type"))) @mock.patch.object(task_api, 'load') @mock.patch.object(inst_models.Instance, 'create') @mock.patch.object(models.DBCluster, 'create') @mock.patch.object(QUOTAS, 'check_quotas') @mock.patch.object(clients, 'create_nova_client') @mock.patch.object(clients, 'create_neutron_client') @mock.patch.object(api, 'CONF') def test_create_with_lower_configsvr(self, mock_conf, mock_neutron_client, mock_nova_client, ock_check_quotas, mock_db_create, mock_ins_create, mock_task_api): mock_conf.get = mock.Mock( return_value=FakeOptGroup(num_config_servers_per_cluster=1)) instances = self.instances flavors = mock.Mock() networks = mock.Mock() mock_nova_client.return_value.flavors = flavors mock_neutron_client.return_value.find_resource = networks self.cluster.create(mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(5, mock_ins_create.call_count) @mock.patch.object(task_api, 'load') @mock.patch.object(inst_models.Instance, 'create') @mock.patch.object(models.DBCluster, 'create') @mock.patch.object(QUOTAS, 'check_quotas') @mock.patch.object(clients, 'create_nova_client') @mock.patch.object(clients, 'create_neutron_client') @mock.patch.object(api, 'CONF') def test_create_with_higher_configsvr(self, mock_conf, mock_neutron_client, mock_nova_client, mock_check_quotas, mock_db_create, mock_ins_create, mock_task_api): mock_conf.get = mock.Mock( return_value=FakeOptGroup(num_config_servers_per_cluster=5)) instances = self.instances flavors = mock.Mock() networks = mock.Mock() mock_nova_client.return_value.flavors = flavors mock_neutron_client.return_value.find_resource = networks self.cluster.create(mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(9, mock_ins_create.call_count) @mock.patch.object(task_api, 'load') @mock.patch.object(inst_models.Instance, 'create') @mock.patch.object(models.DBCluster, 'create') @mock.patch.object(QUOTAS, 'check_quotas') @mock.patch.object(clients, 'create_nova_client') @mock.patch.object(clients, 'create_neutron_client') @mock.patch.object(api, 'CONF') def test_create_with_higher_mongos(self, mock_conf, mock_neutron_client, mock_nova_client, mock_check_quotas, mock_db_create, mock_ins_create, mock_task_api): mock_conf.get = mock.Mock( return_value=FakeOptGroup(num_query_routers_per_cluster=4)) instances = self.instances flavors = mock.Mock() networks = mock.Mock() mock_nova_client.return_value.flavors = flavors mock_neutron_client.return_value.find_resource = networks self.cluster.create(mock.Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(10, mock_ins_create.call_count) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') @mock.patch.object(api.MongoDbCluster, '_check_instances') @mock.patch.object(api.MongoDbCluster, '_create_shard_instances', return_value=['id1', 'id2', 'id3']) @mock.patch.object(api.MongoDbCluster, '_create_query_router_instances', return_value=['id4']) @mock.patch.object(api.MongoDbCluster, 'update_db') def test_grow(self, mock_update_db, mock_create_query_router_instances, mock_create_shard_instances, mock_check_instances, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance2 = {'name': 'replicaB', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaA'} instance3 = {'name': 'replicaC', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaA'} instance4 = {'flavor_id': 1, 'volume_size': 5, 'instance_type': 'query_router'} self.cluster.grow([instance1, instance2, instance3, instance4]) self.assertTrue(mock_prep_resize.called) mock_create_shard_instances.assert_called_with([instance1, instance2, instance3], None) mock_create_query_router_instances.assert_called_with([instance4], None) mock_update_db.assert_called_with( task_status=tasks.ClusterTasks.GROWING_CLUSTER ) self.manager.grow_cluster.assert_called_with( self.cluster_id, ['id1', 'id2', 'id3', 'id4'] ) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_invalid_type(self, mock_check_quotas, mock_prep_resize): instance1 = {'flavor_id': 1, 'volume_size': 5, 'instance_type': 'config_server'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_invalid_shard_size(self, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaB'} instance2 = {'name': 'replicaB', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaA'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1, instance2]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_no_name(self, mock_check_quotas, mock_prep_resize): instance1 = {'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_repeated_name(self, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance1 = {'name': 'replicaC', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_quotas') def test_grow_bad_relations(self, mock_check_quotas, mock_prep_resize): instance1 = {'name': 'replicaA', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaB'} instance2 = {'name': 'replicaB', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaC'} instance3 = {'name': 'replicaC', 'flavor_id': 1, 'volume_size': 5, 'instance_type': 'replica', 'related_to': 'replicaD'} self.assertRaises(exception.TroveError, self.cluster.grow, [instance1, instance2, instance3]) @mock.patch.object(api.MongoDbCluster, '_prep_resize') @mock.patch.object(api.MongoDbCluster, '_check_shard_status') @mock.patch.object(api.MongoDbCluster, 'update_db') @mock.patch.object(inst_models, 'load_any_instance') def test_shrink(self, mock_load_any_instance, mock_update_db, mock_check_shard_status, mock_prep_resize): self._mock_db_instances() self.cluster.query_routers.append( inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id6', cluster_id=self.cluster_id, type='query_router') ) self.cluster.shrink(['id1', 'id2', 'id3', 'id4']) self.assertTrue(mock_prep_resize.called) mock_check_shard_status.assert_called_with('id1') mock_update_db.assert_called_with( task_status=tasks.ClusterTasks.SHRINKING_CLUSTER ) self.assertEqual(4, mock_load_any_instance().delete.call_count) self.manager.shrink_cluster.assert_called_with( self.cluster_id, ['id1', 'id2', 'id3', 'id4'] ) @mock.patch.object(api.MongoDbCluster, '_prep_resize') def test_shrink_invalid_type(self, mock_prep_resize): self._mock_db_instances() self.assertRaises(exception.TroveError, self.cluster.shrink, ['id5']) @mock.patch.object(api.MongoDbCluster, '_prep_resize') def test_shrink_incomplete_shard(self, mock_prep_resize): self._mock_db_instances() self.assertRaises(exception.TroveError, self.cluster.shrink, ['id1', 'id2']) def _mock_db_instances(self): self.shard_id = uuid.uuid4() self.cluster.members = [ inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id1', cluster_id=self.cluster_id, shard_id=self.shard_id, type='member'), inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id2', cluster_id=self.cluster_id, shard_id=self.shard_id, type='member'), inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id3', cluster_id=self.cluster_id, shard_id=self.shard_id, type='member'), ] self.cluster.query_routers = [ inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id4', cluster_id=self.cluster_id, type='query_router') ] self.cluster.config_svrs = [ inst_models.DBInstance(inst_tasks.InstanceTasks.NONE, id='id5', cluster_id=self.cluster_id, type='config_server') ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_redis_cluster.py0000644000175000017500000002711200000000000026456 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from mock import Mock from mock import patch from novaclient import exceptions as nova_exceptions from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import clients from trove.common import exception from trove.common.strategies.cluster.experimental.redis import api as redis_api from trove.instance import models as inst_models from trove.instance.models import DBInstance from trove.instance.models import InstanceTasks from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools class FakeOptGroup(object): def __init__(self, cluster_member_count=3, volume_support=True, device_path='/dev/vdb'): self.cluster_member_count = cluster_member_count self.volume_support = volume_support self.device_path = device_path class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.dbcreate_patch = patch.object(DBCluster, 'create', return_value=self.db_info) self.dbcreate_mock = self.dbcreate_patch.start() self.addCleanup(self.dbcreate_patch.stop) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "redis" self.datastore_version = self.dv self.cluster = redis_api.RedisCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.cluster._server_group_loaded = True self.instances_w_volumes = [{'volume_size': 1, 'flavor_id': '1234'}] * 3 self.instances_no_volumes = [{'flavor_id': '1234'}] * 3 def tearDown(self): super(ClusterTest, self).tearDown() @patch.object(clients, 'create_nova_client') def test_create_invalid_flavor_specified(self, mock_client): (mock_client.return_value.flavors.get) = Mock( side_effect=nova_exceptions.NotFound( 404, "Flavor id not found %s" % id)) self.assertRaises(exception.FlavorNotFound, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_w_volumes, {}, None, None) @patch.object(clients, 'create_nova_client') @patch.object(redis_api, 'CONF') def test_create_volume_no_specified(self, mock_conf, mock_client): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=True)) self.assertRaises(exception.ClusterVolumeSizeRequired, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_no_volumes, {}, None, None) @patch.object(clients, 'create_nova_client') @patch.object(redis_api, 'CONF') def test_create_storage_specified_with_no_volume_support(self, mock_conf, mock_client): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) mock_client.return_value.flavors = Mock() self.assertRaises(exception.VolumeNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_w_volumes, {}, None, None) @patch.object(clients, 'create_nova_client') @patch.object(redis_api, 'CONF') def test_create_storage_not_specified_and_no_ephemeral_flavor(self, mock_conf, mock_client): class FakeFlavor(object): def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_no_volumes, {}, None, None) @patch.object(redis_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') def test_create(self, mock_client, mock_check_quotas, mock_task_api, mock_ins_create, mock_conf): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=True)) mock_client.return_value.flavors = Mock() self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_w_volumes, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( self.dbcreate_mock.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(redis_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') def test_create_with_ephemeral_flavor(self, mock_client, mock_check_quotas, mock_task_api, mock_ins_create, mock_conf): class FakeFlavor(object): def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 1 mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, self.instances_no_volumes, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( self.dbcreate_mock.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(DBCluster, 'update') @patch.object(redis_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') def test_grow(self, mock_client, mock_check_quotas, mock_task_api, mock_ins_create, mock_conf, mock_update): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=True)) mock_client.return_value.flavors = Mock() self.cluster.grow(self.instances_w_volumes) mock_task_api.return_value.grow_cluster.assert_called_with( self.dbcreate_mock.return_value.id, [mock_ins_create.return_value.id] * 3) self.assertEqual(3, mock_ins_create.call_count) @patch.object(DBInstance, 'find_all') @patch.object(Cluster, 'get_guest') @patch.object(DBCluster, 'update') @patch.object(inst_models.Instance, 'load') @patch.object(inst_models.Instance, 'delete') def test_shrink(self, mock_ins_delete, mock_ins_load, mock_update, mock_guest, mock_find_all): mock_find_all.return_value.all.return_value = [ DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, type="member")] self.cluster.shrink(['id1']) self.assertEqual(1, mock_ins_delete.call_count) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cluster/test_vertica_cluster.py0000644000175000017500000003172000000000000027005 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from mock import Mock from mock import patch from novaclient import exceptions as nova_exceptions from trove.cluster.models import Cluster from trove.cluster.models import ClusterTasks from trove.cluster.models import DBCluster from trove.common import clients from trove.common import exception from trove.common.strategies.cluster.experimental.vertica import ( api as vertica_api) from trove.instance import models as inst_models from trove.quota.quota import QUOTAS from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools class FakeOptGroup(object): def __init__(self, cluster_member_count=3, volume_support=True, device_path='/dev/vdb'): self.cluster_member_count = cluster_member_count self.volume_support = volume_support self.device_path = device_path class ClusterTest(trove_testtools.TestCase): def setUp(self): super(ClusterTest, self).setUp() self.get_client_patch = patch.object(task_api.API, 'get_client') self.get_client_mock = self.get_client_patch.start() self.addCleanup(self.get_client_patch.stop) self.cluster_id = str(uuid.uuid4()) self.cluster_name = "Cluster" + self.cluster_id self.tenant_id = "23423432" self.dv_id = "1" self.db_info = DBCluster(ClusterTasks.NONE, id=self.cluster_id, name=self.cluster_name, tenant_id=self.tenant_id, datastore_version_id=self.dv_id, task_id=ClusterTasks.NONE._code) self.context = trove_testtools.TroveTestContext(self) self.datastore = Mock() self.dv = Mock() self.dv.manager = "vertica" self.datastore_version = self.dv self.cluster = vertica_api.VerticaCluster(self.context, self.db_info, self.datastore, self.datastore_version) self.instances = [{'volume_size': 1, 'flavor_id': '1234', 'instance_type': 'master'}, {'volume_size': 1, 'flavor_id': '1234', 'instance_type': 'member'}, {'volume_size': 1, 'flavor_id': '1234', 'instance_type': 'member'}] self.db_instances = [1, 2, 3] def tearDown(self): super(ClusterTest, self).tearDown() @patch.object(inst_models.DBInstance, 'find_all') def test_create_empty_instances(self, *args): self.assertRaises(exception.ClusterNumInstancesNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, [], {}, None, None) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(clients, 'create_nova_client') def test_create_flavor_not_specified(self, *args): instances = self.instances instances[0]['flavor_id'] = None self.assertRaises(exception.ClusterFlavorsNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(clients, 'create_nova_client') def test_create_invalid_flavor_specified(self, mock_client, mock_find_all, mock_create): instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] (mock_client.return_value.flavors.get) = Mock( side_effect=nova_exceptions.NotFound( 404, "Flavor id not found %s" % id)) self.assertRaises(exception.FlavorNotFound, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(clients, 'create_nova_client') def test_create_volume_no_specified(self, mock_client, mock_find_all, mock_create): instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizeRequired, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(clients, 'create_nova_client') @patch.object(vertica_api, 'CONF') def test_create_storage_specified_with_no_volume_support(self, mock_conf, mock_client, mock_find_all, mock_create): mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) instances = self.instances instances[0]['volume_size'] = None flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.VolumeNotSupported, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(clients, 'create_nova_client') @patch.object(vertica_api, 'CONF') def test_create_storage_not_specified_and_no_ephemeral_flavor(self, mock_conf, mock_client, m_find_all, mock_create): class FakeFlavor(object): def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 0 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.assertRaises(exception.LocalStorageNotSpecified, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(DBCluster, 'create') @patch.object(inst_models.DBInstance, 'find_all') @patch.object(clients, 'create_nova_client') def test_create_volume_not_equal(self, mock_client, mock_find_all, mock_create): instances = self.instances instances[0]['volume_size'] = 2 flavors = Mock() mock_client.return_value.flavors = flavors self.assertRaises(exception.ClusterVolumeSizesNotEqual, Cluster.create, Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') def test_create(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_find_all): instances = self.instances flavors = Mock() mock_client.return_value.flavors = flavors self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch.object(inst_models.DBInstance, 'find_all') @patch.object(vertica_api, 'CONF') @patch.object(inst_models.Instance, 'create') @patch.object(DBCluster, 'create') @patch.object(task_api, 'load') @patch.object(QUOTAS, 'check_quotas') @patch.object(clients, 'create_nova_client') def test_create_with_ephemeral_flavor(self, mock_client, mock_check_quotas, mock_task_api, mock_db_create, mock_ins_create, mock_conf, mock_find_all): class FakeFlavor(object): def __init__(self, flavor_id): self.flavor_id = flavor_id @property def id(self): return self.flavor.id @property def ephemeral(self): return 1 instances = [{'flavor_id': '1234'}, {'flavor_id': '1234'}, {'flavor_id': '1234'}] mock_conf.get = Mock( return_value=FakeOptGroup(volume_support=False)) (mock_client.return_value. flavors.get.return_value) = FakeFlavor('1234') self.cluster.create(Mock(), self.cluster_name, self.datastore, self.datastore_version, instances, {}, None, None) mock_task_api.return_value.create_cluster.assert_called_with( mock_db_create.return_value.id) self.assertEqual(3, mock_ins_create.call_count) @patch('trove.cluster.models.LOG') def test_delete_bad_task_status(self, mock_logging): self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL self.assertRaises(exception.UnprocessableEntity, self.cluster.delete) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_none(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.NONE self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) @patch.object(task_api.API, 'delete_cluster') @patch.object(Cluster, 'update_db') @patch.object(inst_models.DBInstance, 'find_all') def test_delete_task_status_deleting(self, mock_find_all, mock_update_db, mock_delete_cluster): self.cluster.db_info.task_status = ClusterTasks.DELETING self.cluster.delete() mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7921112 trove-12.1.0.dev92/trove/tests/unittests/cmd/0000755000175000017500000000000000000000000021255 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cmd/__init__.py0000644000175000017500000000000000000000000023354 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/cmd/test_status.py0000644000175000017500000000363500000000000024220 0ustar00coreycorey00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from mock import patch from oslo_upgradecheck.upgradecheck import Code from trove.cmd import status from trove.tests.unittests import trove_testtools @patch("trove.cmd.status.db.get_db_api") @patch("trove.cmd.status.DBInstance") class TestUpgradeChecksInstancesWithTasks(trove_testtools.TestCase): def setUp(self): super(TestUpgradeChecksInstancesWithTasks, self).setUp() self.cmd = status.Checks() self.fake_db_api = Mock() def test__check_no_instances_with_tasks(self, mock_instance, fake_get_db_api): fake_get_db_api.return_value = self.fake_db_api mock_instance.query.return_value.filter.return_value.filter_by.\ return_value.count.return_value = 0 check_result = self.cmd._check_instances_with_running_tasks() self.assertEqual(Code.SUCCESS, check_result.code) def test__check_instances_with_tasks(self, mock_instance, fake_get_db_api): fake_get_db_api.return_value = self.fake_db_api mock_instance.query.return_value.filter.return_value.filter_by.\ return_value.count.return_value = 1 check_result = self.cmd._check_instances_with_running_tasks() self.assertEqual(Code.WARNING, check_result.code) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7921112 trove-12.1.0.dev92/trove/tests/unittests/common/0000755000175000017500000000000000000000000022002 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/__init__.py0000644000175000017500000000000000000000000024101 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_auth.py0000644000175000017500000000234300000000000024356 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from trove.common import auth from trove.tests.unittests import trove_testtools class TestAuth(trove_testtools.TestCase): def test_unicode_characters_in_headers(self): middleware = auth.AuthorizationMiddleware( "test_trove", [auth.TenantBasedAuth()]) tenant_id = 'test_tenant_id' url = '/%s/instances' % tenant_id req = webob.Request.blank(url) # test string with chinese characters test_str = u'\u6d4b\u8bd5' req.headers = { 'X-Tenant-ID': tenant_id, 'X-Auth-Project-Id': test_str } # invocation middleware.process_request(req) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_conductor_serializer.py0000644000175000017500000001046000000000000027645 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from trove.common import cfg from trove.common.rpc import conductor_guest_serializer as gsz from trove.common.rpc import conductor_host_serializer as hsz from trove.tests.unittests import trove_testtools CONF = cfg.CONF class FakeInstance(object): def __init__(self): self.uuid = 'a3af1652-686a-4574-a916-2ef7e85136e5' @property def key(self): return 'mo79Y86Bp3bzQDWR31ihhVGfLBmeac' class FakeContext(object): def __init__(self, instance_id=None, fields=None): self.instance_id = instance_id self.fields = fields class TestConductorSerializer(trove_testtools.TestCase): def setUp(self): self.uuid = 'a3af1652-686a-4574-a916-2ef7e85136e5' self.key = 'mo79Y86Bp3bzQDWR31ihhVGfLBmeac' self.data = 'ELzWd81qtgcj2Gxc1ipbh0HgbvHGrgptDj3n4GNMBN0F2WtNdr' self.context = {'a': 'ij2J8AJLyz0rDqbjxy4jPVINhnK2jsBGpWRKIe3tUnUD', 'b': 32, 'c': {'a': 21, 'b': 22}} self.old_guest_id = gsz.CONF.guest_id gsz.CONF.guest_id = self.uuid super(TestConductorSerializer, self).setUp() def tearDown(self): gsz.CONF.guest_id = self.old_guest_id super(TestConductorSerializer, self).tearDown() def test_gsz_serialize_entity_nokey(self): sz = gsz.ConductorGuestSerializer(None, None) self.assertEqual(sz.serialize_entity(self.context, self.data), self.data) def test_gsz_serialize_context_nokey(self): sz = gsz.ConductorGuestSerializer(None, None) self.assertEqual(sz.serialize_context(self.context), self.context) @mock.patch('trove.common.rpc.conductor_host_serializer.' 'get_instance_encryption_key', return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac') def test_hsz_serialize_entity_nokey_noinstance(self, _): sz = hsz.ConductorHostSerializer(None, None) ctxt = FakeContext(instance_id=None) self.assertEqual(sz.serialize_entity(ctxt, self.data), self.data) @mock.patch('trove.common.rpc.conductor_host_serializer.' 'get_instance_encryption_key', return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac') def test_hsz_serialize_context_nokey_noinstance(self, _): sz = hsz.ConductorHostSerializer(None, None) ctxt = FakeContext(instance_id=None) self.assertEqual(sz.serialize_context(ctxt), ctxt) @mock.patch('trove.common.rpc.conductor_host_serializer.' 'get_instance_encryption_key', return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac') def test_conductor_entity(self, _): guestsz = gsz.ConductorGuestSerializer(None, self.key) hostsz = hsz.ConductorHostSerializer(None, None) encrypted_entity = guestsz.serialize_entity(self.context, self.data) self.assertNotEqual(encrypted_entity, self.data) entity = hostsz.deserialize_entity(self.context, encrypted_entity) self.assertEqual(entity, self.data) @mock.patch('trove.common.rpc.conductor_host_serializer.' 'get_instance_encryption_key', return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac') def test_conductor_context(self, _): guestsz = gsz.ConductorGuestSerializer(None, self.key) hostsz = hsz.ConductorHostSerializer(None, None) encrypted_context = guestsz.serialize_context(self.context) self.assertNotEqual(encrypted_context, self.context) context = hostsz.deserialize_context(encrypted_context) self.assertEqual(context.get('instance_id'), self.uuid) context.pop('instance_id') self.assertDictEqual(context, self.context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_context.py0000644000175000017500000000675100000000000025110 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from testtools.matchers import Equals, Is from trove.common import context from trove.common.notification import DBaaSInstanceCreate from trove.tests.unittests import trove_testtools class TestTroveContext(trove_testtools.TestCase): def test_create_with_extended_args(self): expected_service_catalog = {'key': 'value'} ctx = context.TroveContext(user="test_user_id", request_id="test_req_id", limit="500", marker="x", service_catalog=expected_service_catalog) self.assertThat(ctx.limit, Equals("500")) self.assertThat(ctx.marker, Equals("x")) self.assertThat(ctx.service_catalog, Equals(expected_service_catalog)) def test_create(self): ctx = context.TroveContext(user='test_user_id', request_id='test_req_id') self.assertThat(ctx.user, Equals('test_user_id')) self.assertThat(ctx.request_id, Equals('test_req_id')) self.assertThat(ctx.limit, Is(None)) self.assertThat(ctx.marker, Is(None)) self.assertThat(ctx.service_catalog, Is(None)) def test_to_dict(self): ctx = context.TroveContext(user='test_user_id', request_id='test_req_id') ctx_dict = ctx.to_dict() self.assertThat(ctx_dict.get('user'), Equals('test_user_id')) self.assertThat(ctx_dict.get('request_id'), Equals('test_req_id')) def test_to_dict_with_notification(self): ctx = context.TroveContext(user='test_user_id', tenant='the_tenant', request_id='test_req_id') ctx.notification = DBaaSInstanceCreate(ctx, request=Mock()) ctx_dict = ctx.to_dict() self.assertThat(ctx_dict.get('user'), Equals('test_user_id')) self.assertThat(ctx_dict.get('request_id'), Equals('test_req_id')) self.assertIn('trove_notification', ctx_dict) n_dict = ctx_dict['trove_notification'] self.assertThat(n_dict.get('notification_classname'), Equals('trove.common.notification.' 'DBaaSInstanceCreate')) def test_create_with_bogus(self): ctx = context.TroveContext.from_dict( {'user': 'test_user_id', 'request_id': 'test_req_id', 'tenant': 'abc', 'blah_blah': 'blah blah'}) self.assertThat(ctx.user, Equals('test_user_id')) self.assertThat(ctx.request_id, Equals('test_req_id')) self.assertThat(ctx.tenant, Equals('abc')) self.assertThat(ctx.limit, Is(None)) self.assertThat(ctx.marker, Is(None)) self.assertThat(ctx.service_catalog, Is(None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_crypto_utils.py0000644000175000017500000001031600000000000026154 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock import os import six from trove.common import crypto_utils from trove.tests.unittests import trove_testtools class TestEncryptUtils(trove_testtools.TestCase): def setUp(self): super(TestEncryptUtils, self).setUp() def tearDown(self): super(TestEncryptUtils, self).tearDown() def test_encode_decode_string(self): random_data = bytearray(os.urandom(12)) data = [b'abc', b'numbers01234', b'\x00\xFF\x00\xFF\xFF\x00', random_data, u'Unicode:\u20ac'] for datum in data: encoded_data = crypto_utils.encode_data(datum) decoded_data = crypto_utils.decode_data(encoded_data) if isinstance(datum, six.text_type): decoded_data = decoded_data.decode('utf-8') self. assertEqual(datum, decoded_data, "Encode/decode failed") def test_pad_unpad(self): for size in range(1, 100): data_str = b'a' * size padded_str = crypto_utils.pad_for_encryption( data_str, crypto_utils.IV_BYTE_COUNT) self.assertEqual(0, len(padded_str) % crypto_utils.IV_BYTE_COUNT, "Padding not successful") unpadded_str = crypto_utils.unpad_after_decryption(padded_str) self.assertEqual(data_str, unpadded_str, "String mangled after pad/unpad") def test_encryp_decrypt(self): key = 'my_secure_key' for size in range(1, 100): orig_data = os.urandom(size) orig_encoded = crypto_utils.encode_data(orig_data) encrypted = crypto_utils.encrypt_data(orig_encoded, key) encoded = crypto_utils.encode_data(encrypted) decoded = crypto_utils.decode_data(encoded) decrypted = crypto_utils.decrypt_data(decoded, key) final_decoded = crypto_utils.decode_data(decrypted) self.assertEqual(orig_data, final_decoded, "Decrypted data did not match original") def test_encrypt(self): # test encrypt() with an hardcoded IV key = 'my_secure_key' salt = b'x' * crypto_utils.IV_BYTE_COUNT with mock.patch('os.urandom', return_value=salt): for orig_data, expected in ( # byte string (b'Hello World!', 'eHh4eHh4eHh4eHh4eHh4eF5RK6VdDrAWl4Th1mNG2eps+VB2BouFRiY2Wa' 'P/RRPT'), # Unicoded string (encoded to UTF-8) (u'Unicode:\u20ac', 'eHh4eHh4eHh4eHh4eHh4eAMsI5YsrtMNAPJfVF0j9NegXML7OsJ0LuAy66' 'LKv5F4'), ): orig_encoded = crypto_utils.encode_data(orig_data) encrypted = crypto_utils.encrypt_data(orig_encoded, key) encoded = crypto_utils.encode_data(encrypted) self.assertEqual(expected, encoded) def test_decrypt(self): key = 'my_secure_key' for encoded, expected in ( # byte string: b'Hello World!' ('ZUhoNGVIaDRlSGg0ZUhoNL9PmM70hVcQ7j/kYF7Pw+BT7VSfsht0VsCIxy' 'KNN0NH', b'Hello World!'), # Unicoded string: u'Unicode:\u20ac' ('ZUhoNGVIaDRlSGg0ZUhoNIHZLIuIcQCRwWY7PR2y7JcqoDf4ViqXIfh0uE' 'Rbg9BA', b'Unicode:\xe2\x82\xac'), ): decoded = crypto_utils.decode_data(encoded) decrypted = crypto_utils.decrypt_data(decoded, key) final_decoded = crypto_utils.decode_data(decrypted) self.assertEqual(expected, final_decoded) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_dbmodels.py0000644000175000017500000003246300000000000025214 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from trove.common.db import models from trove.tests.unittests import trove_testtools class DatastoreSchemaTest(trove_testtools.TestCase): def setUp(self): super(DatastoreSchemaTest, self).setUp() self.dbname = 'testdb' self.serial_db = {'_name': self.dbname, '_character_set': None, '_collate': None} def tearDown(self): super(DatastoreSchemaTest, self).tearDown() def _empty_schema(self): return models.DatastoreSchema(deserializing=True) def test_init_name(self): database = models.DatastoreSchema(self.dbname) self.assertEqual(self.dbname, database.name) database2 = models.DatastoreSchema(name=self.dbname) self.assertEqual(self.dbname, database2.name) def test_init_no_name(self): self.assertRaises(RuntimeError, models.DatastoreSchema) @mock.patch.object(models.DatastoreSchema, 'verify_dict') def test_init_deserializing(self, mock_verify): database = models.DatastoreSchema.deserialize(self.serial_db) mock_verify.assert_any_call() self.assertEqual(self.dbname, database.name) def test_serialize(self): database = models.DatastoreSchema(self.dbname) self.assertEqual(self.serial_db, database.serialize()) def test_name_property(self): test_name = "Anna" database = self._empty_schema() database.name = test_name self.assertEqual(test_name, database.name) def _do_validate_bad_schema_name(self, name): database = self._empty_schema() self.assertRaises(ValueError, database._validate_schema_name, name) def test_validate_name_empty(self): self._do_validate_bad_schema_name(None) @mock.patch.object(models.DatastoreSchema, '_max_schema_name_length', new_callable=mock.PropertyMock) def test_validate_name_long(self, mock_max_len): mock_max_len.return_value = 5 self._do_validate_bad_schema_name('toolong') @mock.patch.object(models.DatastoreSchema, '_is_valid_schema_name') def test_validate_name_invalid(self, mock_is_valid): mock_is_valid.return_value = False self._do_validate_bad_schema_name('notvalid') def test_verify_dict(self): database = models.DatastoreSchema(self.dbname) # using context patch because the property setter needs to work # properly during init for this test with mock.patch.object( models.DatastoreSchema, 'name', new_callable=mock.PropertyMock) as mock_name_property: database.verify_dict() mock_name_property.assert_called_with(self.dbname) def test_checks_pass(self): database = models.DatastoreSchema(self.dbname) database.check_reserved() database.check_create() database.check_delete() @mock.patch.object(models.DatastoreSchema, 'ignored_dbs', new_callable=mock.PropertyMock) def test_checks_fail(self, mock_ignored_dbs): mock_ignored_dbs.return_value = [self.dbname] database = models.DatastoreSchema(self.dbname) self.assertRaises(ValueError, database.check_reserved) self.assertRaises(ValueError, database.check_create) self.assertRaises(ValueError, database.check_delete) class DatastoreUserTest(trove_testtools.TestCase): def setUp(self): super(DatastoreUserTest, self).setUp() self.username = 'testuser' self.password = 'password' self.host = '192.168.0.1' self.dbname = 'testdb' self.serial_db = {'_name': self.dbname, '_character_set': None, '_collate': None} self.databases = [self.serial_db] self.host_wildcard = '%' self.serial_user_basic = { '_name': self.username, '_password': None, '_host': self.host_wildcard, '_databases': [], '_is_root': False } self.serial_user_full = { '_name': self.username, '_password': self.password, '_host': self.host, '_databases': self.databases, '_is_root': False } def tearDown(self): super(DatastoreUserTest, self).tearDown() def _empty_user(self): return models.DatastoreUser(deserializing=True) def _test_user_basic(self, user): self.assertEqual(self.username, user.name) self.assertIsNone(user.password) self.assertEqual(self.host_wildcard, user.host) self.assertEqual([], user.databases) def _test_user_full(self, user): self.assertEqual(self.username, user.name) self.assertEqual(self.password, user.password) self.assertEqual(self.host, user.host) self.assertEqual(self.databases, user.databases) def test_init_name(self): user1 = models.DatastoreUser(self.username) self._test_user_basic(user1) user2 = models.DatastoreUser(name=self.username) self._test_user_basic(user2) def test_init_no_name(self): self.assertRaises(ValueError, models.DatastoreUser) def test_init_options(self): user1 = models.DatastoreUser(self.username) self._test_user_basic(user1) user2 = models.DatastoreUser(self.username, self.password, self.host, self.dbname) self._test_user_full(user2) user3 = models.DatastoreUser(name=self.username, password=self.password, host=self.host, databases=self.dbname) self._test_user_full(user3) @mock.patch.object(models.DatastoreUser, 'verify_dict') def test_init_deserializing(self, mock_verify): user1 = models.DatastoreUser.deserialize(self.serial_user_basic) self._test_user_basic(user1) user2 = models.DatastoreUser.deserialize(self.serial_user_full) self._test_user_full(user2) self.assertEqual(2, mock_verify.call_count) def test_serialize(self): user1 = models.DatastoreUser(self.username) self.assertEqual(self.serial_user_basic, user1.serialize()) user2 = models.DatastoreUser(self.username, self.password, self.host, self.dbname) self.assertEqual(self.serial_user_full, user2.serialize()) @mock.patch.object(models.DatastoreUser, '_validate_user_name') def test_name_property(self, mock_validate): test_name = "Anna" user = self._empty_user() user.name = test_name self.assertEqual(test_name, user.name) mock_validate.assert_called_with(test_name) def _do_validate_bad_user_name(self, name): user = self._empty_user() self.assertRaises(ValueError, user._validate_user_name, name) def test_validate_name_empty(self): self._do_validate_bad_user_name(None) @mock.patch.object(models.DatastoreUser, '_max_user_name_length', new_callable=mock.PropertyMock) def test_validate_name_long(self, mock_max_len): mock_max_len.return_value = 5 self._do_validate_bad_user_name('toolong') @mock.patch.object(models.DatastoreUser, '_is_valid_user_name') def test_validate_name_invalid(self, mock_is_valid): mock_is_valid.return_value = False self._do_validate_bad_user_name('notvalid') @mock.patch.object(models.DatastoreUser, '_is_valid_password') def test_password_property(self, mock_validate): test_password = "NewPassword" user = self._empty_user() user.password = test_password mock_validate.assert_called_with(test_password) self.assertEqual(test_password, user.password) @mock.patch.object(models.DatastoreUser, '_is_valid_password') def test_password_property_error(self, mock_validate): mock_validate.return_value = False test_password = "NewPassword" user = self._empty_user() def test(): user.password = test_password self.assertRaises(ValueError, test) @mock.patch.object(models.DatastoreUser, '_is_valid_host_name') def test_host_property(self, mock_validate): test_host = "192.168.0.2" user = self._empty_user() user.host = test_host mock_validate.assert_called_with(test_host) self.assertEqual(test_host, user.host) @mock.patch.object(models.DatastoreUser, '_is_valid_host_name') def test_host_property_error(self, mock_validate): mock_validate.return_value = False test_host = "192.168.0.2" user = self._empty_user() def test(): user.host = test_host self.assertRaises(ValueError, test) @mock.patch.object(models.DatastoreUser, '_add_database') def test_databases_property(self, mock_add_database): test_dbname1 = 'otherdb' test_dbname2 = 'lastdb' user = self._empty_user() def test(value): user._databases.append({'_name': value, '_character_set': None, '_collate': None}) mock_add_database.side_effect = test user.databases = self.dbname user.databases = [test_dbname1, test_dbname2] mock_add_database.assert_any_call(self.dbname) mock_add_database.assert_any_call(test_dbname1) mock_add_database.assert_any_call(test_dbname2) self.assertIn(self.serial_db, user.databases) self.assertIn({'_name': test_dbname1, '_character_set': None, '_collate': None}, user.databases) self.assertIn({'_name': test_dbname2, '_character_set': None, '_collate': None}, user.databases) def test_build_database_schema(self): user = self._empty_user() schema = user._build_database_schema(self.dbname) self.assertEqual(self.serial_db, schema.serialize()) def test_add_database(self): user = self._empty_user() user._add_database(self.dbname) self.assertEqual([self.serial_db], user.databases) # check that adding an exsting db does nothing user._add_database(self.dbname) self.assertEqual([self.serial_db], user.databases) @mock.patch.object(models, 'DatastoreSchema') def test_deserialize_schema(self, mock_ds_schema): mock_ds_schema.deserialize = mock.Mock() user = self._empty_user() user.deserialize_schema(self.serial_db) mock_ds_schema.deserialize.assert_called_with(self.serial_db) @mock.patch.object(models.DatastoreUser, 'deserialize_schema') @mock.patch.object(models.DatastoreUser, 'host', new_callable=mock.PropertyMock) @mock.patch.object(models.DatastoreUser, 'password', new_callable=mock.PropertyMock) @mock.patch.object(models.DatastoreUser, 'name', new_callable=mock.PropertyMock) def _test_verify_dict_with_mocks(self, user, mock_name_property, mock_password_property, mock_host_property, mock_deserialize_schema): user.verify_dict() mock_name_property.assert_called_with(self.username) mock_password_property.assert_called_with(self.password) mock_host_property.assert_called_with(self.host) mock_deserialize_schema.assert_called_with(self.serial_db) def test_verify_dict(self): user = models.DatastoreUser(self.username, self.password, self.host, self.dbname) self._test_verify_dict_with_mocks(user) def test_validate_dict_defaults(self): user = models.DatastoreUser(self.username) user.verify_dict() self.assertIsNone(user.password) self.assertEqual(self.host_wildcard, user.host) self.assertEqual([], user.databases) def test_is_root(self): user = models.DatastoreUser(self.username) self.assertFalse(user._is_root) user.make_root() self.assertTrue(user._is_root) def test_checks_pass(self): user = models.DatastoreUser(self.username) user.check_reserved() user.check_create() user.check_delete() @mock.patch.object(models.DatastoreUser, 'ignored_users', new_callable=mock.PropertyMock) def test_checks_fail(self, mock_ignored_users): mock_ignored_users.return_value = [self.username] user = models.DatastoreUser(self.username) self.assertRaises(ValueError, user.check_reserved) self.assertRaises(ValueError, user.check_create) self.assertRaises(ValueError, user.check_delete) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_exception.py0000644000175000017500000000202600000000000025411 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.exception import TroveError from trove.tests.unittests import trove_testtools class TroveErrorTest(trove_testtools.TestCase): def test_valid_error_message_format(self): error = TroveError("%02d" % 1) self.assertEqual("01", error.message) def test_invalid_error_message_format(self): error = TroveError("test%999999sdb") self.assertEqual("test999999sdb", error.message) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_notification.py0000644000175000017500000003772400000000000026116 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock, patch from oslo_utils import timeutils from trove.common import cfg from trove.common.context import TroveContext from trove.common import exception from trove.common import notification from trove.common.notification import EndNotification, StartNotification from trove.conductor import api as conductor_api from trove import rpc from trove.tests.unittests import trove_testtools class TestEndNotification(trove_testtools.TestCase): def setUp(self): super(TestEndNotification, self).setUp() self.context = trove_testtools.TroveTestContext(self) def _server_call(self, server_type): with patch.object(self.context, "notification", server_type=server_type) as notification: with EndNotification(self.context): pass self.assertTrue(notification.notify_end.called) def _server_exception(self, server_type): with patch.object(self.context, "notification", server_type=server_type) as notification: try: with EndNotification(self.context): raise exception.TroveError() except Exception: self.assertTrue(notification.notify_exc_info.called) def test_api_server_call(self): self._server_call('api') def test_api_server_exception(self): self._server_exception('api') def test_taskmanager_server_call(self): self._server_call('taskmanager') def test_taskmanager_server_exception(self): self._server_exception('taskmanager') def test_conductor_server_call(self): with patch.object(conductor_api, 'API') as api: with patch.object(self.context, "notification", server_type='conductor'): with EndNotification(self.context): pass self.assertTrue(api(self.context).notify_end.called) def test_conductor_server_exception(self): with patch.object(conductor_api, 'API') as api: with patch.object(self.context, "notification", server_type='conductor'): try: with EndNotification(self.context): raise exception.TroveError() except Exception: self.assertTrue(api(self.context).notify_exc_info.called) class TestStartNotification(trove_testtools.TestCase): def setUp(self): super(TestStartNotification, self).setUp() self.context = trove_testtools.TroveTestContext(self) def test_api_call(self): with patch.object(self.context, "notification", server_type='api') as notification: with StartNotification(self.context): pass self.assertTrue(notification.notify_start.called) def test_taskmanager_call(self): with patch.object(self.context, "notification", server_type='taskmanager') as notification: with StartNotification(self.context): pass self.assertTrue(notification.notify_start.called) def test_conductor_call(self): with patch.object(conductor_api, 'API'): with patch.object(self.context, "notification", server_type='conductor') as notification: with StartNotification(self.context): pass self.assertTrue(notification.notify_start.called) class TestNotificationCastWrapper(trove_testtools.TestCase): def test_no_notification(self): with notification.NotificationCastWrapper(TroveContext(), "foo"): pass def test_with_notification(self): context = trove_testtools.TroveTestContext(self) self.assertTrue(context.notification.needs_end_notification) with notification.NotificationCastWrapper(context, "foo"): self.assertEqual('foo', context.notification.server_type) self.assertEqual('api', context.notification.server_type) self.assertFalse(context.notification.needs_end_notification) class TestTroveBaseTraits(trove_testtools.TestCase): def setUp(self): super(TestTroveBaseTraits, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(rpc, 'get_notifier') def test_n(self, notifier): notification.TroveBaseTraits( instance=self.instance).notify('event_type', 'publisher') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] required_payload_keys = [ 'created_at', 'name', 'instance_id', 'instance_name', 'instance_type_id', 'launched_at', 'nova_instance_id', 'region', 'state_description', 'state', 'tenant_id', 'user_id' ] self.assertTrue(set(required_payload_keys).issubset(set(payload))) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveBaseTraits(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveBaseTraits().deserialize(None, serialized) new_notify.notify('event_type', 'publisher') self.assertTrue(notifier().info.called) class TestTroveCommonTraits(trove_testtools.TestCase): def setUp(self): super(TestTroveCommonTraits, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveCommonTraits( instance=self.instance).notify('event_type', 'publisher') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertIn('availability_zone', payload) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveCommonTraits(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveCommonTraits().deserialize(None, serialized) new_notify.notify('event_type', 'publisher') self.assertTrue(notifier().info.called) class TestTroveInstanceCreate(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceCreate, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceCreate(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceCreate(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceCreate().deserialize(None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestTroveInstanceDelete(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceDelete, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceDelete(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceDelete(instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceDelete().deserialize(None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestTroveInstanceModifyVolume(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceModifyVolume, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceModifyVolume(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceModifyVolume( instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceModifyVolume().deserialize( None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestTroveInstanceModifyFlavor(trove_testtools.TestCase): def setUp(self): super(TestTroveInstanceModifyFlavor, self).setUp() self.instance = Mock(db_info=Mock(created=timeutils.utcnow())) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.TroveInstanceModifyFlavor(instance=self.instance).notify() self.assertTrue(notifier().info.called) @patch.object(cfg.CONF, 'get', Mock()) @patch.object(rpc, 'get_notifier') def test_notification_after_serialization(self, notifier): orig_notify = notification.TroveInstanceModifyFlavor( instance=self.instance) serialized = orig_notify.serialize(None) new_notify = notification.TroveInstanceModifyFlavor().deserialize( None, serialized) new_notify.notify() self.assertTrue(notifier().info.called) class TestDBaaSQuota(trove_testtools.TestCase): @patch.object(rpc, 'get_notifier') def test_notification(self, notifier): notification.DBaaSQuotas(None, Mock(), Mock()).notify() self.assertTrue(notifier().info.called) class DBaaSTestNotification(notification.DBaaSAPINotification): def event_type(self): return 'instance_test' def required_start_traits(self): return ['name', 'flavor_id', 'datastore'] def optional_start_traits(self): return ['databases', 'users'] def required_end_traits(self): return ['instance_id'] class TestDBaaSNotification(trove_testtools.TestCase): def setUp(self): super(TestDBaaSNotification, self).setUp() self.test_n = DBaaSTestNotification(Mock(), request=Mock()) def test_missing_required_start_traits(self): self.assertRaisesRegex(exception.TroveError, self.test_n.required_start_traits()[0], self.test_n.notify_start) def test_invalid_start_traits(self): self.assertRaisesRegex(exception.TroveError, "The following required keys", self.test_n.notify_start, foo='bar') def test_missing_required_end_traits(self): self.assertRaisesRegex(exception.TroveError, self.test_n.required_end_traits()[0], self.test_n.notify_end) def test_invalid_end_traits(self): self.assertRaisesRegex(exception.TroveError, "The following required keys", self.test_n.notify_end, foo='bar') def test_missing_required_error_traits(self): self.assertRaisesRegex(exception.TroveError, self.test_n.required_error_traits()[0], self.test_n._notify, 'error', self.test_n.required_error_traits(), []) @patch.object(rpc, 'get_notifier') def test_start_event(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args self.assertEqual('dbaas.instance_test.start', a[1]) @patch.object(rpc, 'get_notifier') def test_end_event(self, notifier): self.test_n.notify_end(instance_id='foo') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args self.assertEqual('dbaas.instance_test.end', a[1]) @patch.object(rpc, 'get_notifier') def test_verify_base_values(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertIn('client_ip', payload) self.assertIn('request_id', payload) self.assertIn('server_type', payload) self.assertIn('server_ip', payload) self.assertIn('tenant_id', payload) @patch.object(rpc, 'get_notifier') def test_verify_required_start_args(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertIn('name', payload) self.assertIn('flavor_id', payload) self.assertIn('datastore', payload) self.assertNotIn('users', payload) @patch.object(rpc, 'get_notifier') def test_verify_optional_start_args(self, notifier): self.test_n.notify_start(name='foo', flavor_id=7, datastore='db', users='the users') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertIn('users', payload) @patch.object(rpc, 'get_notifier') def test_verify_required_end_args(self, notifier): self.test_n.notify_end(instance_id='foo') self.assertTrue(notifier().info.called) a, _ = notifier().info.call_args payload = a[2] self.assertIn('instance_id', payload) def _test_notify_callback(self, fn, *args, **kwargs): with patch.object(rpc, 'get_notifier') as notifier: mock_callback = Mock() self.test_n.register_notify_callback(mock_callback) mock_context = Mock() mock_context.notification = Mock() self.test_n.context = mock_context fn(*args, **kwargs) self.assertTrue(notifier().info.called) self.assertTrue(mock_callback.called) self.test_n.register_notify_callback(None) def test_notify_callback(self): required_keys = { 'datastore': 'ds', 'name': 'name', 'flavor_id': 'flav_id', 'instance_id': 'inst_id', } self._test_notify_callback(self.test_n.notify_start, **required_keys) self._test_notify_callback(self.test_n.notify_end, **required_keys) self._test_notify_callback(self.test_n.notify_exc_info, 'error', 'exc') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_pagination.py0000644000175000017500000001173400000000000025552 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from trove.common import pagination from trove.tests.unittests import trove_testtools class TestPaginatedDataView(trove_testtools.TestCase): def test_creation_with_string_marker(self): view = pagination.PaginatedDataView("TestType", [], "http://current_page", next_page_marker="marker") self.assertEqual("marker", view.next_page_marker) def test_creation_with_none_marker(self): view = pagination.PaginatedDataView("TestType", [], "http://current_page", next_page_marker=None) self.assertIsNone(view.next_page_marker) def test_creation_with_none_string_marker(self): view = pagination.PaginatedDataView("TestType", [], "http://current_page", next_page_marker=52) self.assertEqual("52", view.next_page_marker) def _do_paginate_list(self, limit=None, marker='', include_marker=False): li = ['a', 'b', 'c', 'd', 'e'] return pagination.paginate_list(li, limit=limit, marker=marker, include_marker=include_marker) def test_paginate_list(self): # start list li_1, marker_1 = self._do_paginate_list(limit=2) self.assertEqual(['a', 'b'], li_1) self.assertEqual('b', marker_1) # continue list, do not include marker in result li_2, marker_2 = self._do_paginate_list(limit=2, marker=marker_1) self.assertEqual(['c', 'd'], li_2) self.assertEqual('d', marker_2) li_3, marker_3 = self._do_paginate_list(limit=2, marker=marker_2) self.assertEqual(['e'], li_3) self.assertIsNone(marker_3) # alternate continue list, include marker in result li_4, marker_4 = self._do_paginate_list(limit=2, marker=marker_1, include_marker=True) self.assertEqual(['b', 'c'], li_4) self.assertEqual('c', marker_4) li_5, marker_5 = self._do_paginate_list(limit=2, marker=marker_4, include_marker=True) self.assertEqual(['c', 'd'], li_5) self.assertEqual('d', marker_5) li_6, marker_6 = self._do_paginate_list(limit=2, marker=marker_5, include_marker=True) self.assertEqual(['d', 'e'], li_6) self.assertIsNone(marker_6) # bad marker li_4, marker_4 = self._do_paginate_list(marker='f') self.assertEqual([], li_4) self.assertIsNone(marker_4) li_5, marker_5 = self._do_paginate_list(limit=1, marker='f') self.assertEqual([], li_5) self.assertIsNone(marker_5) def test_dict_paginate(self): li = [{'_collate': 'en_US.UTF-8', '_character_set': 'UTF8', '_name': 'db1'}, {'_collate': 'en_US.UTF-8', '_character_set': 'UTF8', '_name': 'db3'}, {'_collate': 'en_US.UTF-8', '_character_set': 'UTF8', '_name': 'db2'}, {'_collate': 'en_US.UTF-8', '_character_set': 'UTF8', '_name': 'db5'}, {'_collate': 'en_US.UTF-8', '_character_set': 'UTF8', '_name': 'db4'} ] l, m = pagination.paginate_dict_list(li, '_name', limit=1, marker='db1', include_marker=True) self.assertEqual(l[0], li[0]) self.assertEqual(m, 'db1') def test_object_paginate(self): def build_mock_object(name): o = Mock() o.name = name o.attr = 'attr' return o li = [build_mock_object('db1'), build_mock_object('db2'), build_mock_object('db3')] l, m = pagination.paginate_object_list(li, 'name', limit=1, marker='db1', include_marker=True) self.assertEqual(l[0], li[0]) self.assertEqual(m, 'db1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_policy.py0000644000175000017500000000435700000000000024723 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import NonCallableMock from mock import patch from trove.common import exception as trove_exceptions from trove.common import policy as trove_policy from trove.tests.unittests import trove_testtools class TestPolicy(trove_testtools.TestCase): def setUp(self): super(TestPolicy, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.mock_enforcer = MagicMock() get_enforcer_patch = patch.object(trove_policy, 'get_enforcer', return_value=self.mock_enforcer) self.addCleanup(get_enforcer_patch.stop) self.mock_get_enforcer = get_enforcer_patch.start() def test_authorize_on_tenant(self): test_rule = NonCallableMock() trove_policy.authorize_on_tenant(self.context, test_rule) self.mock_get_enforcer.assert_called_once_with() self.mock_enforcer.authorize.assert_called_once_with( test_rule, {'tenant': self.context.project_id}, self.context.to_dict(), do_raise=True, exc=trove_exceptions.PolicyNotAuthorized, action=test_rule ) def test_authorize_on_target(self): test_rule = NonCallableMock() test_target = NonCallableMock() trove_policy.authorize_on_target(self.context, test_rule, test_target) self.mock_get_enforcer.assert_called_once_with() self.mock_enforcer.authorize.assert_called_once_with( test_rule, test_target, self.context.to_dict(), do_raise=True, exc=trove_exceptions.PolicyNotAuthorized, action=test_rule) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_secure_serializer.py0000644000175000017500000000464600000000000027144 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common.rpc import secure_serializer as ssz from trove.tests.unittests import trove_testtools class TestSecureSerializer(trove_testtools.TestCase): def setUp(self): self.key = 'xuUyAKn5mDANoM5sRxQsb6HGiugWVD' self.data = '5rzFfaKU630rRxL1g3c80EHnHDf534' self.context = {'fld1': 3, 'fld2': 'abc'} super(TestSecureSerializer, self).setUp() def tearDown(self): super(TestSecureSerializer, self).tearDown() def test_sz_nokey_serialize_entity(self): sz = ssz.SecureSerializer(base=None, key=None) en = sz.serialize_entity(self.context, self.data) self.assertEqual(en, self.data) def test_sz_nokey_deserialize_entity(self): sz = ssz.SecureSerializer(base=None, key=None) en = sz.deserialize_entity(self.context, self.data) self.assertEqual(en, self.data) def test_sz_nokey_serialize_context(self): sz = ssz.SecureSerializer(base=None, key=None) en = sz.serialize_context(self.context) self.assertEqual(en, self.context) def test_sz_nokey_deserialize_context(self): sz = ssz.SecureSerializer(base=None, key=None) en = sz.deserialize_context(self.context) self.assertEqual(en, self.context) def test_sz_entity(self): sz = ssz.SecureSerializer(base=None, key=self.key) en = sz.serialize_entity(self.context, self.data) self.assertNotEqual(en, self.data) self.assertEqual(sz.deserialize_entity(self.context, en), self.data) def test_sz_context(self): sz = ssz.SecureSerializer(base=None, key=self.key) sctxt = sz.serialize_context(self.context) self.assertNotEqual(sctxt, self.context) self.assertEqual(sz.deserialize_context(sctxt), self.context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_serializer.py0000644000175000017500000001121700000000000025566 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from trove.common.rpc import serializer from trove.tests.unittests import trove_testtools class TestSerializer(trove_testtools.TestCase): def setUp(self): self.data = 'abcdefghijklmnopqrstuvwxyz' self.context = {} super(TestSerializer, self).setUp() def tearDown(self): super(TestSerializer, self).tearDown() def test_serialize_1(self): base = mock.Mock() sz = serializer.TroveSerializer(base=base) sz.serialize_entity(self.context, self.data) base.serialize_entity.assert_called_with(self.context, self.data) def test_serialize_2(self): base = mock.Mock() sz1 = serializer.TroveSerializer(base=base) sz = serializer.TroveSerializer(base=sz1) sz.serialize_entity(self.context, self.data) base.serialize_entity.assert_called_with(self.context, self.data) def test_serialize_3(self): base = mock.Mock() sz = serializer.TroveSerializer(base=base) sz.deserialize_entity(self.context, self.data) base.deserialize_entity.assert_called_with(self.context, self.data) def test_serialize_4(self): base = mock.Mock() sz1 = serializer.TroveSerializer(base=base) sz = serializer.TroveSerializer(base=sz1) sz.deserialize_entity(self.context, self.data) base.deserialize_entity.assert_called_with(self.context, self.data) def test_serialize_5(self): base = mock.Mock() sz = serializer.TroveSerializer(base=base) sz.serialize_context(self.context) base.serialize_context.assert_called_with(self.context) def test_serialize_6(self): base = mock.Mock() sz1 = serializer.TroveSerializer(base=base) sz = serializer.TroveSerializer(base=sz1) sz.serialize_context(self.context) base.serialize_context.assert_called_with(self.context) def test_serialize_7(self): base = mock.Mock() sz = serializer.TroveSerializer(base=base) sz.deserialize_context(self.context) base.deserialize_context.assert_called_with(self.context) def test_serialize_8(self): base = mock.Mock() sz1 = serializer.TroveSerializer(base=base) sz = serializer.TroveSerializer(base=sz1) sz.deserialize_context(self.context) base.deserialize_context.assert_called_with(self.context) def test_serialize_9(self): sz = serializer.TroveSerializer(base=None) self.assertEqual(sz.serialize_entity(self.context, self.data), self.data) def test_serialize_10(self): sz = serializer.TroveSerializer(base=None) self.assertEqual(sz.deserialize_entity(self.context, self.data), self.data) def test_serialize_11(self): sz = serializer.TroveSerializer(base=None) self.assertEqual(sz.serialize_context(self.context), self.context) def test_serialize_12(self): sz = serializer.TroveSerializer(base=None) self.assertEqual(sz.deserialize_context(self.context), self.context) def test_serialize_13(self): bz = serializer.TroveSerializer(base=None) sz = serializer.TroveSerializer(base=bz) self.assertEqual(sz.serialize_entity(self.context, self.data), self.data) def test_serialize_14(self): bz = serializer.TroveSerializer(base=None) sz = serializer.TroveSerializer(base=bz) self.assertEqual(sz.deserialize_entity(self.context, self.data), self.data) def test_serialize_15(self): bz = serializer.TroveSerializer(base=None) sz = serializer.TroveSerializer(base=bz) self.assertEqual(sz.serialize_context(self.context), self.context) def test_serialize_16(self): bz = serializer.TroveSerializer(base=None) sz = serializer.TroveSerializer(base=bz) self.assertEqual(sz.deserialize_context(self.context), self.context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_server_group.py0000644000175000017500000001131700000000000026140 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from mock import Mock, patch from trove.common import server_group as srv_grp from trove.tests.unittests import trove_testtools class TestServerGroup(trove_testtools.TestCase): def setUp(self): super(TestServerGroup, self).setUp() self.ServerGroup = srv_grp.ServerGroup() self.context = trove_testtools.TroveTestContext(self) self.sg_id = 'sg-1234' self.locality = 'affinity' self.expected_hints = {'group': self.sg_id} self.server_group = Mock() self.server_group.id = self.sg_id self.server_group.policies = [self.locality] self.server_group.members = ['id-1', 'id-2'] self.empty_server_group = copy.copy(self.server_group) self.empty_server_group.members = ['id-1'] @patch.object(srv_grp, 'create_nova_client') def test_create(self, mock_client): mock_create = Mock(return_value=self.server_group) mock_client.return_value.server_groups.create = mock_create server_group = self.ServerGroup.create( self.context, self.locality, "name_suffix") mock_create.assert_called_with(name="locality_name_suffix", policies=[self.locality]) self.assertEqual(self.server_group, server_group) @patch.object(srv_grp, 'create_nova_client') def test_delete(self, mock_client): mock_delete = Mock() mock_client.return_value.server_groups.delete = mock_delete self.ServerGroup.delete(self.context, self.empty_server_group) mock_delete.assert_called_with(self.sg_id) @patch.object(srv_grp, 'create_nova_client') def test_delete_non_empty(self, mock_client): mock_delete = Mock() mock_client.return_value.server_groups.delete = mock_delete srv_grp.ServerGroup.delete(self.context, self.server_group) mock_delete.assert_not_called() @patch.object(srv_grp, 'create_nova_client') def test_delete_force(self, mock_client): mock_delete = Mock() mock_client.return_value.server_groups.delete = mock_delete self.ServerGroup.delete(self.context, self.server_group, force=True) mock_delete.assert_called_with(self.sg_id) def test_convert_to_hint(self): hint = srv_grp.ServerGroup.convert_to_hint(self.server_group) self.assertEqual(self.expected_hints, hint, "Unexpected hint") def test_convert_to_hints(self): hints = {'hint': 'myhint'} hints = srv_grp.ServerGroup.convert_to_hint(self.server_group, hints) self.expected_hints.update(hints) self.assertEqual(self.expected_hints, hints, "Unexpected hints") def test_convert_to_hint_none(self): self.assertIsNone(srv_grp.ServerGroup.convert_to_hint(None)) @patch.object(srv_grp, 'create_nova_client') def test_build_scheduler_hint(self, mock_client): mock_create = Mock(return_value=self.server_group) mock_client.return_value.server_groups.create = mock_create expected_hint = {'get_back': 'same_dict'} scheduler_hint = self.ServerGroup.build_scheduler_hint( self.context, expected_hint, "name_suffix") self.assertEqual(expected_hint, scheduler_hint, "Unexpected hint") @patch.object(srv_grp, 'create_nova_client') def test_build_scheduler_hint_from_locality(self, mock_client): mock_create = Mock(return_value=self.server_group) mock_client.return_value.server_groups.create = mock_create expected_hint = {'group': 'sg-1234'} scheduler_hint = self.ServerGroup.build_scheduler_hint( self.context, self.locality, "name_suffix") self.assertEqual(expected_hint, scheduler_hint, "Unexpected hint") def test_build_scheduler_hint_none(self): self.assertIsNone(srv_grp.ServerGroup.build_scheduler_hint( self.context, None, None)) def test_get_locality(self): locality = srv_grp.ServerGroup.get_locality(self.server_group) self.assertEqual(self.locality, locality, "Unexpected locality") def test_get_locality_none(self): self.assertIsNone(srv_grp.ServerGroup.get_locality(None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_stream_codecs.py0000644000175000017500000000266600000000000026240 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os from trove.common import stream_codecs from trove.tests.unittests import trove_testtools class TestStreamCodecs(trove_testtools.TestCase): def setUp(self): super(TestStreamCodecs, self).setUp() def tearDown(self): super(TestStreamCodecs, self).tearDown() def test_serialize_deserialize_base64codec(self): random_data = bytearray(os.urandom(12)) data = [b'abc', b'numbers01234', b'non-ascii:\xe9\xff', random_data] codec = stream_codecs.Base64Codec() for datum in data: serialized_data = codec.serialize(datum) deserialized_data = codec.deserialize(serialized_data) self. assertEqual(datum, deserialized_data, "Serialize/Deserialize failed") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_template.py0000644000175000017500000001072600000000000025234 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from mock import Mock from trove.common import template from trove.datastore.models import DatastoreVersion from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class TemplateTest(trove_testtools.TestCase): def setUp(self): super(TemplateTest, self).setUp() util.init_db() self.env = template.ENV self.template = self.env.get_template("mysql/config.template") self.flavor_dict = {'ram': 1024, 'name': 'small', 'id': '55'} self.server_id = "180b5ed1-3e57-4459-b7a3-2aeee4ac012a" def tearDown(self): super(TemplateTest, self).tearDown() def _find_in_template(self, contents, teststr): found_group = None for line in contents.split('\n'): m = re.search('^%s.*' % teststr, line) if m: found_group = m.group(0) return found_group def validate_template(self, contents, teststr, test_flavor, server_id): # expected query_cache_size = {{ 8 * flavor_multiplier }}M flavor_multiplier = test_flavor['ram'] // 512 found_group = self._find_in_template(contents, teststr) if not found_group: raise Exception("Could not find text in template") # Check that the last group has been rendered memsize = found_group.split(" ")[2] self.assertEqual("%sM" % (8 * flavor_multiplier), memsize) self.assertIsNotNone(server_id) self.assertGreater(len(server_id), 1) def test_rendering(self): rendered = self.template.render(flavor=self.flavor_dict, server_id=self.server_id) self.validate_template(rendered, "query_cache_size", self.flavor_dict, self.server_id) def test_single_instance_config_rendering(self): datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'MySql' datastore.name = 'mysql-5.7' datastore.manager = 'mysql' config = template.SingleInstanceConfigTemplate(datastore, self.flavor_dict, self.server_id) self.validate_template(config.render(), "query_cache_size", self.flavor_dict, self.server_id) def test_renderer_discovers_special_config(self): """Finds our special config file for the version 'mysql-test'.""" datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'mysql' datastore.name = 'mysql-test' datastore.manager = 'mysql' config = template.SingleInstanceConfigTemplate(datastore, self.flavor_dict, self.server_id) self.validate_template(config.render(), "hyper", {'ram': 0}, self.server_id) def test_replica_source_config_rendering(self): datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'MySql' datastore.name = 'mysql-5.7' datastore.manager = 'mysql' config = template.ReplicaSourceConfigTemplate(datastore, self.flavor_dict, self.server_id) self.assertTrue(self._find_in_template(config.render(), "log_bin")) def test_replica_config_rendering(self): datastore = Mock(spec=DatastoreVersion) datastore.datastore_name = 'MySql' datastore.name = 'mysql-5.7' datastore.manager = 'mysql' config = template.ReplicaConfigTemplate(datastore, self.flavor_dict, self.server_id) self.assertTrue(self._find_in_template(config.render(), "relay_log")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_timeutils.py0000644000175000017500000000734300000000000025441 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from datetime import timedelta from datetime import tzinfo from trove.common import timeutils from trove.tests.unittests import trove_testtools class bogus_tzinfo(tzinfo): """A bogus tzinfo class""" def utcoffset(self, dt): return timedelta(hours=2) def tzname(self, dt): return "BOGUS" def dst(self, dt): return timedelta(hours=1) class invalid_tzinfo(tzinfo): """A bogus tzinfo class""" def utcoffset(self, dt): return timedelta(hours=25) def tzname(self, dt): return "INVALID" def dst(self, dt): return timedelta(hours=25) class TestTroveTimeutils(trove_testtools.TestCase): def setUp(self): super(TestTroveTimeutils, self).setUp() def tearDown(self): super(TestTroveTimeutils, self).tearDown() def test_utcnow_tz(self): dt = timeutils.utcnow() self.assertIsNone(dt.tzinfo) def test_utcnow_aware_tz(self): dt = timeutils.utcnow_aware() self.assertEqual(timedelta(0), dt.utcoffset()) self.assertEqual('Z', dt.tzname()) def test_isotime(self): dt = timeutils.utcnow_aware() expected = "%04d-%02d-%02dT%02d:%02d:%02dZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) self.assertEqual(expected, timeutils.isotime(dt)) def test_isotime_subsecond(self): dt = timeutils.utcnow_aware() expected = "%04d-%02d-%02dT%02d:%02d:%02d.%06dZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(expected, timeutils.isotime(dt, subsecond=True)) def test_isotime_unaware(self): dt = timeutils.utcnow() expected = "%04d-%02d-%02dT%02d:%02d:%02d.%06dZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(expected, timeutils.isotime(dt, subsecond=True)) def test_isotime_unaware_subsecond(self): dt = timeutils.utcnow() expected = "%04d-%02d-%02dT%02d:%02d:%02d.%06dZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(expected, timeutils.isotime(dt, subsecond=True)) def test_bogus_unaware(self): dt = datetime.now(bogus_tzinfo()) expected = "%04d-%02d-%02dT%02d:%02d:%02d.%06d+02:00" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(expected, timeutils.isotime(dt, subsecond=True)) def test_bogus_unaware_subsecond(self): dt = datetime.now(bogus_tzinfo()) expected = "%04d-%02d-%02dT%02d:%02d:%02d.%06d+02:00" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(expected, timeutils.isotime(dt, subsecond=True)) def test_throws_exception(self): dt = datetime.now() dt = dt.replace(tzinfo=invalid_tzinfo()) self.assertRaises(ValueError, timeutils.isotime, dt) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_utils.py0000644000175000017500000001601600000000000024557 0ustar00coreycorey00000000000000# Copyright 2014 SUSE Linux GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from mock import patch from testtools import ExpectedException from trove.common import exception from trove.common import utils from trove.tests.unittests import trove_testtools from trove.tests.util import utils as test_utils import webob class TestUtils(trove_testtools.TestCase): def setUp(self): super(TestUtils, self).setUp() self.orig_utils_execute = utils.execute self.orig_utils_log_error = utils.LOG.error def tearDown(self): super(TestUtils, self).tearDown() utils.execute = self.orig_utils_execute utils.LOG.error = self.orig_utils_log_error def test_throws_process_execution_error(self): utils.execute = Mock( side_effect=exception.ProcessExecutionError( description='test-desc', exit_code=42, stderr='err', stdout='out', cmd='test')) with ExpectedException( exception.ProcessExecutionError, "test-desc\nCommand: test\nExit code: 42\n" "Stdout: 'out'\nStderr: 'err'"): utils.execute_with_timeout('/usr/bin/foo') def test_log_error_when_log_output_on_error_is_true(self): utils.execute = Mock( side_effect=exception.ProcessExecutionError( description='test-desc', exit_code=42, stderr='err', stdout='out', cmd='test')) utils.LOG.error = Mock() with ExpectedException( exception.ProcessExecutionError, "test-desc\nCommand: test\nExit code: 42\n" "Stdout: 'out'\nStderr: 'err'"): utils.execute_with_timeout( '/usr/bin/foo', log_output_on_error=True) utils.LOG.error.assert_called_with( u"Command '%(cmd)s' failed. %(description)s Exit code: " u"%(exit_code)s\nstderr: %(stderr)s\nstdout: %(stdout)s", {'description': 'test-desc', 'stderr': 'err', 'exit_code': 42, 'stdout': 'out', 'cmd': 'test'}) def test_unpack_singleton(self): self.assertEqual([1, 2, 3], utils.unpack_singleton([1, 2, 3])) self.assertEqual(0, utils.unpack_singleton([0])) self.assertEqual('test', utils.unpack_singleton('test')) self.assertEqual('test', utils.unpack_singleton(['test'])) self.assertEqual([], utils.unpack_singleton([])) self.assertIsNone(utils.unpack_singleton(None)) self.assertEqual([None, None], utils.unpack_singleton([None, None])) self.assertEqual('test', utils.unpack_singleton([['test']])) self.assertEqual([1, 2, 3], utils.unpack_singleton([[1, 2, 3]])) self.assertEqual(1, utils.unpack_singleton([[[1]]])) self.assertEqual([[1], [2]], utils.unpack_singleton([[1], [2]])) self.assertEqual(['a', 'b'], utils.unpack_singleton(['a', 'b'])) def test_pagination_limit(self): self.assertEqual(5, utils.pagination_limit(5, 9)) self.assertEqual(5, utils.pagination_limit(9, 5)) def test_format_output(self): data = [ ['', ''], ['Single line', 'Single line'], ['Long line no breaks ' * 10, 'Long line no breaks ' * 10], ['Long line. Has breaks ' * 5, 'Long line.\nHas breaks ' * 2 + 'Long line. Has breaks ' * 3], ['Long line with semi: ' * 4, 'Long line with semi:\n ' + 'Long line with semi: ' * 3], ['Long line with brack (' * 4, 'Long line with brack\n(' + 'Long line with brack (' * 3], ] for index, datum in enumerate(data): self.assertEqual(datum[1], utils.format_output(datum[0]), "Error formatting line %d of data" % index) def test_to_gb(self): result = utils.to_gb(123456789) self.assertEqual(0.11, result) def test_to_gb_small(self): result = utils.to_gb(2) self.assertEqual(0.01, result) def test_to_gb_zero(self): result = utils.to_gb(0) self.assertEqual(0.0, result) def test_to_mb(self): result = utils.to_mb(123456789) self.assertEqual(117.74, result) def test_to_mb_small(self): result = utils.to_mb(2) self.assertEqual(0.01, result) def test_to_mb_zero(self): result = utils.to_mb(0) self.assertEqual(0.0, result) @patch('trove.common.utils.LOG') def test_retry_decorator(self, _): class TestEx1(Exception): pass class TestEx2(Exception): pass class TestEx3(Exception): pass class TestExecutor(object): def _test_foo(self, arg): return arg @test_utils.retry(TestEx1, retries=5, delay_fun=lambda n: 0.2) def test_foo_1(self, arg): return self._test_foo(arg) @test_utils.retry((TestEx1, TestEx2), delay_fun=lambda n: 0.2) def test_foo_2(self, arg): return self._test_foo(arg) def assert_retry(fun, side_effect, exp_call_num, exp_exception): with patch.object(te, '_test_foo', side_effect=side_effect) as f: mock_arg = Mock() if exp_exception: self.assertRaises(exp_exception, fun, mock_arg) else: fun(mock_arg) f.assert_called_with(mock_arg) self.assertEqual(exp_call_num, f.call_count) te = TestExecutor() assert_retry(te.test_foo_1, [TestEx1, None], 2, None) assert_retry(te.test_foo_1, TestEx3, 1, TestEx3) assert_retry(te.test_foo_1, TestEx1, 5, TestEx1) assert_retry(te.test_foo_1, [TestEx1, TestEx3], 2, TestEx3) assert_retry(te.test_foo_2, [TestEx1, TestEx2, None], 3, None) assert_retry(te.test_foo_2, TestEx3, 1, TestEx3) assert_retry(te.test_foo_2, TestEx2, 3, TestEx2) assert_retry(te.test_foo_2, [TestEx1, TestEx3, TestEx2], 2, TestEx3) def test_req_to_text(self): req = webob.Request.blank('/') expected = u'GET / HTTP/1.0\r\nHost: localhost:80' self.assertEqual(expected, utils.req_to_text(req)) # add a header containing unicode characters req.headers.update({ 'X-Auth-Project-Id': u'\u6d4b\u8bd5'}) expected = (u'GET / HTTP/1.0\r\nHost: localhost:80\r\n' u'X-Auth-Project-Id: \u6d4b\u8bd5') self.assertEqual(expected, utils.req_to_text(req)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/common/test_wsgi.py0000644000175000017500000000440700000000000024371 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock, patch from testtools.matchers import Equals, Is, Not import webob.exc from trove.common import base_wsgi from trove.common import exception from trove.common import wsgi from trove.tests.unittests import trove_testtools import webob class TestWsgi(trove_testtools.TestCase): def test_process_request(self): middleware = wsgi.ContextMiddleware("test_trove") req = webob.BaseRequest({}) token = 'MI23fdf2defg123' user_id = 'test_user_id' req.headers = { 'X-User': 'do not use - deprecated', 'X-User-ID': user_id, 'X-Auth-Token': token, 'X-Service-Catalog': '[]' } req.environ = {} # invocation middleware.process_request(req) # assertions ctx = req.environ[wsgi.CONTEXT_KEY] self.assertThat(ctx, Not(Is(None))) self.assertThat(ctx.user, Equals(user_id)) self.assertThat(ctx.auth_token, Equals(token)) self.assertEqual(0, len(ctx.service_catalog)) class TestController(trove_testtools.TestCase): @patch.object(base_wsgi.Resource, 'execute_action', side_effect=exception.RootHistoryNotFound()) @patch.object(wsgi.Controller, 'delete', create=True) @patch.object(wsgi.Controller, 'validate_request') def test_exception_root_history_notfound(self, *args): controller = wsgi.Controller() resource = controller.create_resource() req = Mock() result = resource.execute_action('delete', req) self.assertIsInstance(result.wrapped_exc, webob.exc.HTTPNotFound) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7921112 trove-12.1.0.dev92/trove/tests/unittests/conductor/0000755000175000017500000000000000000000000022512 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/conductor/__init__.py0000644000175000017500000000000000000000000024611 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/conductor/test_conf.py0000644000175000017500000000502100000000000025046 0ustar00coreycorey00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import patch from oslo_service import service as os_service from trove.cmd import common as common_cmd from trove.cmd import conductor as conductor_cmd import trove.common.cfg as cfg import trove.tests.fakes.conf as fake_conf from trove.tests.unittests import trove_testtools CONF = cfg.CONF TROVE_UT = 'trove.tests.unittests' def mocked_conf(manager): return fake_conf.FakeConf({ 'conductor_queue': 'conductor', 'conductor_manager': manager, 'trove_conductor_workers': 1, 'host': 'mockhost', 'report_interval': 1, 'instance_rpc_encr_key': ''}) class NoopManager(object): RPC_API_VERSION = 1.0 class ConductorConfTests(trove_testtools.TestCase): def setUp(self): super(ConductorConfTests, self).setUp() def tearDown(self): super(ConductorConfTests, self).tearDown() def _test_manager(self, conf, rt_mgr_name): def mock_launch(conf, server, workers, restart_method): qualified_mgr = "%s.%s" % (server.manager_impl.__module__, server.manager_impl.__class__.__name__) self.assertEqual(rt_mgr_name, qualified_mgr, "Invalid manager") return MagicMock() os_service.launch = mock_launch with patch.object(common_cmd, 'initialize', MagicMock(return_value=conf)): conductor_cmd.main() def test_user_defined_manager(self): qualified_mgr = TROVE_UT + ".conductor.test_conf.NoopManager" self._test_manager(mocked_conf(qualified_mgr), qualified_mgr) def test_default_manager(self): qualified_mgr = "trove.conductor.manager.Manager" self._test_manager(CONF, qualified_mgr) def test_invalid_manager(self): self.assertRaises(ImportError, self._test_manager, mocked_conf('foo.bar.MissingMgr'), 'foo.bar.MissingMgr') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/conductor/test_methods.py0000644000175000017500000002010600000000000025565 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import patch from oslo_utils import timeutils from trove.backup import models as bkup_models from trove.backup import state from trove.common import exception as t_exception from trove.common.instance import ServiceStatuses from trove.common import utils from trove.conductor import manager as conductor_manager from trove.instance import models as t_models from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util # See LP bug #1255178 OLD_DBB_SAVE = bkup_models.DBBackup.save class ConductorMethodTests(trove_testtools.TestCase): def setUp(self): # See LP bug #1255178 bkup_models.DBBackup.save = OLD_DBB_SAVE super(ConductorMethodTests, self).setUp() util.init_db() self.cond_mgr = conductor_manager.Manager() self.instance_id = utils.generate_uuid() def tearDown(self): super(ConductorMethodTests, self).tearDown() def _create_iss(self): new_id = utils.generate_uuid() iss = t_models.InstanceServiceStatus( id=new_id, instance_id=self.instance_id, status=ServiceStatuses.NEW) iss.save() return new_id def _get_iss(self, id): return t_models.InstanceServiceStatus.find_by(id=id) def _create_backup(self, name='fake backup'): new_id = utils.generate_uuid() backup = bkup_models.DBBackup.create( id=new_id, name=name, description='This is a fake backup object.', tenant_id=utils.generate_uuid(), state=state.BackupState.NEW, instance_id=self.instance_id) backup.save() return new_id def _get_backup(self, id): return bkup_models.DBBackup.find_by(id=id) # --- Tests for heartbeat --- def test_heartbeat_instance_not_found(self): new_id = utils.generate_uuid() self.assertRaises(t_exception.ModelNotFoundError, self.cond_mgr.heartbeat, None, new_id, {}) @patch('trove.conductor.manager.LOG') def test_heartbeat_instance_no_changes(self, mock_logging): iss_id = self._create_iss() old_iss = self._get_iss(iss_id) self.cond_mgr.heartbeat(None, self.instance_id, {}) new_iss = self._get_iss(iss_id) self.assertEqual(old_iss.status_id, new_iss.status_id) self.assertEqual(old_iss.status_description, new_iss.status_description) @patch('trove.conductor.manager.LOG') def test_heartbeat_instance_status_bogus_change(self, mock_logging): iss_id = self._create_iss() old_iss = self._get_iss(iss_id) new_status = 'potato salad' payload = { 'service_status': new_status, } self.assertRaises(ValueError, self.cond_mgr.heartbeat, None, self.instance_id, payload) new_iss = self._get_iss(iss_id) self.assertEqual(old_iss.status_id, new_iss.status_id) self.assertEqual(old_iss.status_description, new_iss.status_description) @patch('trove.conductor.manager.LOG') def test_heartbeat_instance_status_changed(self, mock_logging): iss_id = self._create_iss() payload = {'service_status': ServiceStatuses.BUILDING.description} self.cond_mgr.heartbeat(None, self.instance_id, payload) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.BUILDING, iss.status) # --- Tests for update_backup --- def test_backup_not_found(self): new_bkup_id = utils.generate_uuid() self.assertRaises(t_exception.ModelNotFoundError, self.cond_mgr.update_backup, None, self.instance_id, new_bkup_id) @patch('trove.conductor.manager.LOG') def test_backup_instance_id_nomatch(self, mock_logging): new_iid = utils.generate_uuid() bkup_id = self._create_backup('nomatch') old_name = self._get_backup(bkup_id).name self.cond_mgr.update_backup(None, new_iid, bkup_id, name="remains unchanged") bkup = self._get_backup(bkup_id) self.assertEqual(old_name, bkup.name) @patch('trove.conductor.manager.LOG') def test_backup_bogus_fields_not_changed(self, mock_logging): bkup_id = self._create_backup('bogus') self.cond_mgr.update_backup(None, self.instance_id, bkup_id, not_a_valid_field="INVALID") bkup = self._get_backup(bkup_id) self.assertFalse(hasattr(bkup, 'not_a_valid_field')) @patch('trove.conductor.manager.LOG') def test_backup_real_fields_changed(self, mock_logging): bkup_id = self._create_backup('realrenamed') new_name = "recently renamed" self.cond_mgr.update_backup(None, self.instance_id, bkup_id, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(new_name, bkup.name) # --- Tests for discarding old messages --- @patch('trove.conductor.manager.LOG') def test_heartbeat_newer_timestamp_accepted(self, mock_logging): new_p = {'service_status': ServiceStatuses.NEW.description} build_p = {'service_status': ServiceStatuses.BUILDING.description} iss_id = self._create_iss() iss = self._get_iss(iss_id) now = timeutils.utcnow_ts(microsecond=True) future = now + 60 self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=now) self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=future) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.BUILDING, iss.status) @patch('trove.conductor.manager.LOG') def test_heartbeat_older_timestamp_discarded(self, mock_logging): new_p = {'service_status': ServiceStatuses.NEW.description} build_p = {'service_status': ServiceStatuses.BUILDING.description} iss_id = self._create_iss() iss = self._get_iss(iss_id) now = timeutils.utcnow_ts(microsecond=True) past = now - 60 self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=past) self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=past) iss = self._get_iss(iss_id) self.assertEqual(ServiceStatuses.NEW, iss.status) def test_backup_newer_timestamp_accepted(self): old_name = "oldname" new_name = "renamed" bkup_id = self._create_backup(old_name) bkup = self._get_backup(bkup_id) now = timeutils.utcnow_ts(microsecond=True) future = now + 60 self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=now, name=old_name) self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=future, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(new_name, bkup.name) def test_backup_older_timestamp_discarded(self): old_name = "oldname" new_name = "renamed" bkup_id = self._create_backup(old_name) bkup = self._get_backup(bkup_id) now = timeutils.utcnow_ts(microsecond=True) past = now - 60 self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=now, name=old_name) self.cond_mgr.update_backup(None, self.instance_id, bkup_id, sent=past, name=new_name) bkup = self._get_backup(bkup_id) self.assertEqual(old_name, bkup.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7921112 trove-12.1.0.dev92/trove/tests/unittests/configuration/0000755000175000017500000000000000000000000023361 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/configuration/__init__.py0000644000175000017500000000000000000000000025460 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/configuration/test_configuration_controller.py0000644000175000017500000002320300000000000032104 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import MagicMock from trove.common import configurations from trove.common.exception import UnprocessableEntity from trove.configuration.service import ConfigurationsController from trove.extensions.mgmt.configuration import service from trove.tests.unittests import trove_testtools class TestConfigurationParser(trove_testtools.TestCase): def setUp(self): super(TestConfigurationParser, self).setUp() def test_parse_my_cnf_correctly(self): config = """ [mysqld] pid-file = /var/run/mysqld/mysqld.pid connect_timeout = 15 # we need to test no value params skip-external-locking ;another comment !includedir /etc/mysql/conf.d/ """ cfg_parser = configurations.MySQLConfParser(config) parsed = cfg_parser.parse() d_parsed = dict(parsed) self.assertIsNotNone(d_parsed) self.assertEqual("/var/run/mysqld/mysqld.pid", d_parsed["pid-file"]) self.assertEqual(15, d_parsed["connect_timeout"]) self.assertEqual('1', d_parsed["skip-external-locking"]) class TestConfigurationController(trove_testtools.TestCase): def setUp(self): super(TestConfigurationController, self).setUp() self.controller = ConfigurationsController() def _test_validate_configuration_with_action(self, body, action, is_valid=True): schema = self.controller.get_schema(action, body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) if is_valid: self.assertTrue(validator.is_valid(body)) else: self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] return error_messages def test_validate_create_configuration(self): body = { "configuration": { "values": {}, "name": "test", "datastore": { "type": "test_type", "version": "test_version" } } } self._test_validate_configuration_with_action(body, action='create') def test_validate_create_configuration_no_datastore(self): body = { "configuration": { "values": {}, "name": "test" } } self._test_validate_configuration_with_action(body, action='create') def test_validate_create_invalid_values_param(self): body = { "configuration": { "values": '', "name": "test", "datastore": { "type": "test_type", "version": "test_version" } } } error_messages = ( self._test_validate_configuration_with_action(body, action='create', is_valid=False)) self.assertIn("'' is not of type 'object'", error_messages) def test_validate_create_invalid_name_param(self): body = { "configuration": { "values": {}, "name": "", "datastore": { "type": "test_type", "version": "test_version" } } } error_messages = ( self._test_validate_configuration_with_action(body, action='create', is_valid=False)) self.assertIn("'' is too short", error_messages) def test_validate_edit_configuration(self): body = { "configuration": { "values": {} } } self._test_validate_configuration_with_action(body, action="edit") def _test_validate_configuration(self, input_values, config_rules=None): if config_rules is None: config_val1 = MagicMock() config_val1.name = 'max_connections' config_val1.restart_required = 'false' config_val1.datastore_version_id = 5.5 config_val1.max = 1 config_val1.min = 0 config_val1.data_type = 'integer' config_rules = [config_val1] data_version = MagicMock() data_version.id = 42 data_version.name = 5.5 data_version.datastore_name = 'test' self.assertRaises(UnprocessableEntity, ConfigurationsController._validate_configuration, input_values, data_version, config_rules) def test_validate_configuration_with_no_rules(self): self._test_validate_configuration({'max_connections': 5}, []) def test_validate_configuration_with_invalid_param(self): self._test_validate_configuration({'test': 5}) def test_validate_configuration_with_invalid_type(self): self._test_validate_configuration({'max_connections': '1'}) def test_validate_configuration_with_invalid_max(self): self._test_validate_configuration({'max_connections': 5}) def test_validate_configuration_with_invalid_min(self): self._test_validate_configuration({'max_connections': -1}) def test_validate_long_value(self): config_val1 = MagicMock() config_val1.name = 'myisam_sort_buffer_size' config_val1.max_size = 18446744073709551615 config_val1.min_size = 4096 config_val1.data_type = 'integer' config_rules = [config_val1] ConfigurationsController._validate_configuration( {'myisam_sort_buffer_size': 18446744073709551615}, None, config_rules) class TestConfigurationsParameterController(trove_testtools.TestCase): def setUp(self): super(TestConfigurationsParameterController, self).setUp() self.controller = service.ConfigurationsParameterController() def _test_validate_configuration_with_action(self, body, action, is_valid=True): schema = self.controller.get_schema(action, body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) if is_valid: self.assertTrue(validator.is_valid(body)) else: self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] return error_messages def test_validate_create_configuration_param(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': 1, 'data_type': 'string', 'min': '0', 'max': '255' } } self._test_validate_configuration_with_action(body, action='create') def test_validate_create_invalid_restart_required(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': 5, 'data_type': 'string', 'min': 0, 'max': 255 } } error_messages = ( self._test_validate_configuration_with_action(body, action='create', is_valid=False)) self.assertIn("5 is greater than the maximum of 1", error_messages) self.assertIn("0 is not of type 'string'", error_messages) self.assertIn("255 is not of type 'string'", error_messages) def test_validate_create_invalid_restart_required_2(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': -1, 'data_type': 'string', 'min': '0', 'max': '255' } } error_messages = ( self._test_validate_configuration_with_action(body, action='create', is_valid=False)) self.assertIn("-1 is less than the minimum of 0", error_messages) def test_validate_create_invalid_restart_required_3(self): body = { 'configuration-parameter': { 'name': 'test', 'restart_required': 'yes', 'data_type': 'string', 'min': '0', 'max': '255' } } error_messages = ( self._test_validate_configuration_with_action(body, action='create', is_valid=False)) self.assertIn("'yes' is not of type 'integer'", error_messages) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.796111 trove-12.1.0.dev92/trove/tests/unittests/datastore/0000755000175000017500000000000000000000000022500 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/datastore/__init__.py0000644000175000017500000000000000000000000024577 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/datastore/base.py0000644000175000017500000000732700000000000023775 0ustar00coreycorey00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from trove.datastore import models as datastore_models from trove.datastore.models import Capability from trove.datastore.models import Datastore from trove.datastore.models import DatastoreVersion from trove.datastore.models import DatastoreVersionMetadata from trove.datastore.models import DBCapabilityOverrides from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class TestDatastoreBase(trove_testtools.TestCase): def setUp(self): # Basic setup and mock/fake structures for testing only super(TestDatastoreBase, self).setUp() util.init_db() self.rand_id = str(uuid.uuid4()) self.ds_name = "my-test-datastore" + self.rand_id self.ds_version = "my-test-version" + self.rand_id self.capability_name = "root_on_create" + self.rand_id self.capability_desc = "Enables root on create" self.capability_enabled = True self.datastore_version_id = str(uuid.uuid4()) self.flavor_id = 1 self.volume_type = 'some-valid-volume-type' datastore_models.update_datastore(self.ds_name, False) self.datastore = Datastore.load(self.ds_name) datastore_models.update_datastore_version( self.ds_name, self.ds_version, "mysql", "", "", True) DatastoreVersionMetadata.add_datastore_version_flavor_association( self.ds_name, self.ds_version, [self.flavor_id]) DatastoreVersionMetadata.add_datastore_version_volume_type_association( self.ds_name, self.ds_version, [self.volume_type]) self.datastore_version = DatastoreVersion.load(self.datastore, self.ds_version) self.test_id = self.datastore_version.id self.cap1 = Capability.create(self.capability_name, self.capability_desc, True) self.cap2 = Capability.create("require_volume" + self.rand_id, "Require external volume", True) self.cap3 = Capability.create("test_capability" + self.rand_id, "Test capability", False) def tearDown(self): super(TestDatastoreBase, self).tearDown() capabilities_overridden = DBCapabilityOverrides.find_all( datastore_version_id=self.datastore_version.id).all() for ce in capabilities_overridden: ce.delete() self.cap1.delete() self.cap2.delete() self.cap3.delete() datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id).delete() Datastore.load(self.ds_name).delete() def capability_name_filter(self, capabilities): new_capabilities = [] for capability in capabilities: if self.rand_id in capability.name: new_capabilities.append(capability) return new_capabilities ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/datastore/test_capability.py0000644000175000017500000000374300000000000026241 0ustar00coreycorey00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common.exception import CapabilityNotFound from trove.datastore.models import Capability from trove.datastore.models import CapabilityOverride from trove.tests.unittests.datastore.base import TestDatastoreBase class TestCapabilities(TestDatastoreBase): def setUp(self): super(TestCapabilities, self).setUp() def tearDown(self): super(TestCapabilities, self).tearDown() def test_capability(self): cap = Capability.load(self.capability_name) self.assertEqual(self.capability_name, cap.name) self.assertEqual(self.capability_desc, cap.description) self.assertEqual(self.capability_enabled, cap.enabled) def test_ds_capability_create_disabled(self): self.ds_cap = CapabilityOverride.create( self.cap1, self.datastore_version.id, enabled=False) self.assertFalse(self.ds_cap.enabled) self.ds_cap.delete() def test_capability_enabled(self): self.assertTrue(Capability.load(self.capability_name).enabled) def test_capability_disabled(self): capability = Capability.load(self.capability_name) capability.disable() self.assertFalse(capability.enabled) self.assertFalse(Capability.load(self.capability_name).enabled) def test_load_nonexistent_capability(self): self.assertRaises(CapabilityNotFound, Capability.load, "non-existent") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/datastore/test_datastore.py0000644000175000017500000000656300000000000026111 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from mock import patch from trove.common import exception from trove.datastore import models as datastore_models from trove.datastore.models import Datastore from trove.tests.unittests.datastore.base import TestDatastoreBase class TestDatastore(TestDatastoreBase): def test_create_failure_with_datastore_default_not_defined(self): self.assertRaises( exception.DatastoreDefaultDatastoreNotDefined, datastore_models.get_datastore_version) def test_load_datastore(self): datastore = Datastore.load(self.ds_name) self.assertEqual(self.ds_name, datastore.name) @patch.object(datastore_models, 'CONF') def test_create_failure_with_datastore_default(self, mock_conf): mock_conf.default_datastore = 'bad_ds' self.assertRaisesRegex(exception.DatastoreDefaultDatastoreNotFound, "Default datastore 'bad_ds' cannot be found", datastore_models.get_datastore_version) self.assertRaisesRegex(exception.DatastoreNotFound, "Datastore 'my_ds' cannot be found", datastore_models.get_datastore_version, 'my_ds') def test_get_datastore_or_version(self): # datastore, datastore_version, valid, exception data = [ [None, None, True], ['ds', None, True], ['ds', 'ds_ver', True], [None, 'ds_ver', False, exception.DatastoreNoVersion], ] for datum in data: ds_id = datum[0] ds_ver_id = datum[1] valid = datum[2] expected_exception = None if not valid: expected_exception = datum[3] ds = Mock() ds.id = ds_id ds.name = ds_id ds_ver = Mock() ds_ver.id = ds_ver_id ds_ver.name = ds_ver_id ds_ver.datastore_id = ds_id with patch.object(datastore_models.Datastore, 'load', return_value=ds): with patch.object(datastore_models.DatastoreVersion, 'load', return_value=ds_ver): if valid: (get_ds_id, get_ds_ver_id) = ( datastore_models.get_datastore_or_version( ds_id, ds_ver_id)) self.assertEqual(ds_id, get_ds_id) self.assertEqual(ds_ver_id, get_ds_ver_id) else: self.assertRaises( expected_exception, datastore_models.get_datastore_or_version, ds_id, ds_ver_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/datastore/test_datastore_version_metadata.py0000644000175000017500000002357400000000000031517 0ustar00coreycorey00000000000000# Copyright (c) 2015 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from trove.common import clients from trove.common import exception from trove.datastore import models as datastore_models from trove.tests.unittests.datastore.base import TestDatastoreBase class TestDatastoreVersionMetadata(TestDatastoreBase): def setUp(self): super(TestDatastoreVersionMetadata, self).setUp() self.dsmetadata = datastore_models.DatastoreVersionMetadata self.volume_types = [ {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'name': 'type_1'}, {'id': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'name': 'type_2'}, {'id': 'cccccccc-cccc-cccc-cccc-cccccccccccc', 'name': 'type_3'}, ] def tearDown(self): super(TestDatastoreVersionMetadata, self).tearDown() def test_map_flavors_to_datastore(self): datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=self.flavor_id, deleted=False, key='flavor') self.assertEqual(str(self.flavor_id), mapping.value) self.assertEqual(ds_version.id, mapping.datastore_version_id) self.assertEqual('flavor', str(mapping.key)) def test_map_volume_types_to_datastores(self): datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=self.volume_type, deleted=False, key='volume_type') self.assertEqual(str(self.volume_type), mapping.value) self.assertEqual(ds_version.id, mapping.datastore_version_id) self.assertEqual('volume_type', str(mapping.key)) def test_add_existing_flavor_associations(self): dsmetadata = datastore_models.DatastoreVersionMetadata self.assertRaisesRegex( exception.DatastoreFlavorAssociationAlreadyExists, "Flavor %s is already associated with datastore %s version %s" % (self.flavor_id, self.ds_name, self.ds_version), dsmetadata.add_datastore_version_flavor_association, self.ds_name, self.ds_version, [self.flavor_id]) def test_add_existing_volume_type_associations(self): dsmetadata = datastore_models.DatastoreVersionMetadata self.assertRaises( exception.DatastoreVolumeTypeAssociationAlreadyExists, dsmetadata.add_datastore_version_volume_type_association, self.ds_name, self.ds_version, [self.volume_type]) def test_delete_nonexistent_flavor_mapping(self): dsmeta = datastore_models.DatastoreVersionMetadata self.assertRaisesRegex( exception.DatastoreFlavorAssociationNotFound, "Flavor 2 is not supported for datastore %s version %s" % (self.ds_name, self.ds_version), dsmeta.delete_datastore_version_flavor_association, self.ds_name, self.ds_version, flavor_id=2) def test_delete_nonexistent_volume_type_mapping(self): dsmeta = datastore_models.DatastoreVersionMetadata self.assertRaises( exception.DatastoreVolumeTypeAssociationNotFound, dsmeta.delete_datastore_version_volume_type_association, self.ds_name, self.ds_version, volume_type_name='some random thing') def test_delete_flavor_mapping(self): flavor_id = 2 dsmetadata = datastore_models. DatastoreVersionMetadata dsmetadata.add_datastore_version_flavor_association(self.ds_name, self.ds_version, [flavor_id]) dsmetadata.delete_datastore_version_flavor_association(self.ds_name, self.ds_version, flavor_id) datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=flavor_id, key='flavor') self.assertTrue(mapping.deleted) # check update dsmetadata.add_datastore_version_flavor_association( self.ds_name, self.ds_version, [flavor_id]) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=flavor_id, key='flavor') self.assertFalse(mapping.deleted) # clear the mapping datastore_models.DatastoreVersionMetadata.\ delete_datastore_version_flavor_association(self.ds_name, self.ds_version, flavor_id) def test_delete_volume_type_mapping(self): volume_type = 'this is bogus' dsmetadata = datastore_models. DatastoreVersionMetadata dsmetadata.add_datastore_version_volume_type_association( self.ds_name, self.ds_version, [volume_type]) dsmetadata.delete_datastore_version_volume_type_association( self.ds_name, self.ds_version, volume_type) datastore = datastore_models.Datastore.load(self.ds_name) ds_version = datastore_models.DatastoreVersion.load(datastore, self.ds_version) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=volume_type, key='volume_type') self.assertTrue(mapping.deleted) # check update dsmetadata.add_datastore_version_volume_type_association( self.ds_name, self.ds_version, [volume_type]) mapping = datastore_models.DBDatastoreVersionMetadata.find_by( datastore_version_id=ds_version.id, value=volume_type, key='volume_type') self.assertFalse(mapping.deleted) # clear the mapping dsmetadata.delete_datastore_version_volume_type_association( self.ds_name, self.ds_version, volume_type) @mock.patch.object(datastore_models.DatastoreVersionMetadata, '_datastore_version_find') @mock.patch.object(datastore_models.DatastoreVersionMetadata, 'list_datastore_version_volume_type_associations') @mock.patch.object(clients, 'create_cinder_client') def _mocked_allowed_datastore_version_volume_types(self, trove_volume_types, mock_cinder_client, mock_list, *args): """Call this with a list of strings specifying volume types.""" cinder_vts = [] for vt in self.volume_types: cinder_type = mock.Mock() cinder_type.id = vt.get('id') cinder_type.name = vt.get('name') cinder_vts.append(cinder_type) mock_cinder_client.return_value.volume_types.list.return_value = ( cinder_vts) mock_trove_list_result = mock.MagicMock() mock_trove_list_result.count.return_value = len(trove_volume_types) mock_trove_list_result.__iter__.return_value = [] for trove_vt in trove_volume_types: trove_type = mock.Mock() trove_type.value = trove_vt mock_trove_list_result.__iter__.return_value.append(trove_type) mock_list.return_value = mock_trove_list_result return self.dsmetadata.allowed_datastore_version_volume_types( None, 'ds', 'dsv') def _assert_equal_types(self, test_dict, output_obj): self.assertEqual(test_dict.get('id'), output_obj.id) self.assertEqual(test_dict.get('name'), output_obj.name) def test_allowed_volume_types_from_ids(self): id1 = self.volume_types[0].get('id') id2 = self.volume_types[1].get('id') res = self._mocked_allowed_datastore_version_volume_types([id1, id2]) self._assert_equal_types(self.volume_types[0], res[0]) self._assert_equal_types(self.volume_types[1], res[1]) def test_allowed_volume_types_from_names(self): name1 = self.volume_types[0].get('name') name2 = self.volume_types[1].get('name') res = self._mocked_allowed_datastore_version_volume_types([name1, name2]) self._assert_equal_types(self.volume_types[0], res[0]) self._assert_equal_types(self.volume_types[1], res[1]) def test_allowed_volume_types_no_restrictions(self): res = self._mocked_allowed_datastore_version_volume_types([]) self._assert_equal_types(self.volume_types[0], res[0]) self._assert_equal_types(self.volume_types[1], res[1]) self._assert_equal_types(self.volume_types[2], res[2]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/datastore/test_datastore_versions.py0000644000175000017500000000456600000000000030042 0ustar00coreycorey00000000000000# Copyright (c) 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.datastore.models import DatastoreVersion from trove.tests.unittests.datastore.base import TestDatastoreBase class TestDatastoreVersions(TestDatastoreBase): def test_load_datastore_version(self): datastore_version = DatastoreVersion.load(self.datastore, self.ds_version) self.assertEqual(self.ds_version, datastore_version.name) def test_datastore_version_capabilities(self): self.datastore_version.capabilities.add(self.cap1, enabled=False) test_filtered_capabilities = self.capability_name_filter( self.datastore_version.capabilities) self.assertEqual(3, len(test_filtered_capabilities), 'Capabilities the test thinks it has are: %s, ' 'Filtered capabilities: %s' % (self.datastore_version.capabilities, test_filtered_capabilities)) # Test a fresh reloading of the datastore self.datastore_version = DatastoreVersion.load(self.datastore, self.ds_version) test_filtered_capabilities = self.capability_name_filter( self.datastore_version.capabilities) self.assertEqual(3, len(test_filtered_capabilities), 'Capabilities the test thinks it has are: %s, ' 'Filtered capabilities: %s' % (self.datastore_version.capabilities, test_filtered_capabilities)) self.assertIn(self.cap2.name, self.datastore_version.capabilities) self.assertNotIn("non-existent", self.datastore_version.capabilities) self.assertIn(self.cap1.name, self.datastore_version.capabilities) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.796111 trove-12.1.0.dev92/trove/tests/unittests/db/0000755000175000017500000000000000000000000021077 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/db/__init__.py0000644000175000017500000000000000000000000023176 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/db/test_migration_utils.py0000644000175000017500000001227700000000000025732 0ustar00coreycorey00000000000000# Copyright 2014 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import call from mock import Mock from mock import patch from sqlalchemy.engine import reflection from sqlalchemy.schema import Column from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy import utils as db_utils from trove.tests.unittests import trove_testtools class TestDbMigrationUtils(trove_testtools.TestCase): def setUp(self): super(TestDbMigrationUtils, self).setUp() def tearDown(self): super(TestDbMigrationUtils, self).tearDown() @patch.object(reflection.Inspector, 'from_engine') def test_get_foreign_key_constraint_names_single_match(self, mock_inspector): mock_engine = Mock() (mock_inspector.return_value. get_foreign_keys.return_value) = [{'constrained_columns': ['col1'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col1'], 'name': 'constraint1'}, {'constrained_columns': ['col2'], 'referred_table': 'ref_table2', 'referred_columns': ['ref_col2'], 'name': 'constraint2'}] ret_val = db_utils.get_foreign_key_constraint_names(mock_engine, 'table1', ['col1'], 'ref_table1', ['ref_col1']) self.assertEqual(['constraint1'], ret_val) @patch.object(reflection.Inspector, 'from_engine') def test_get_foreign_key_constraint_names_multi_match(self, mock_inspector): mock_engine = Mock() (mock_inspector.return_value. get_foreign_keys.return_value) = [ {'constrained_columns': ['col1'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col1'], 'name': 'constraint1'}, {'constrained_columns': ['col2', 'col3'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col2', 'ref_col3'], 'name': 'constraint2'}, {'constrained_columns': ['col2', 'col3'], 'referred_table': 'ref_table1', 'referred_columns': ['ref_col2', 'ref_col3'], 'name': 'constraint3'}, {'constrained_columns': ['col4'], 'referred_table': 'ref_table2', 'referred_columns': ['ref_col4'], 'name': 'constraint4'}] ret_val = db_utils.get_foreign_key_constraint_names( mock_engine, 'table1', ['col2', 'col3'], 'ref_table1', ['ref_col2', 'ref_col3']) self.assertEqual(['constraint2', 'constraint3'], ret_val) @patch.object(reflection.Inspector, 'from_engine') def test_get_foreign_key_constraint_names_no_match(self, mock_inspector): mock_engine = Mock() (mock_inspector.return_value. get_foreign_keys.return_value) = [] ret_val = db_utils.get_foreign_key_constraint_names(mock_engine, 'table1', ['col1'], 'ref_table1', ['ref_col1']) self.assertEqual([], ret_val) @patch('trove.db.sqlalchemy.utils.ForeignKeyConstraint') def test_drop_foreign_key_constraints(self, mock_constraint): test_columns = [Column('col1', String(5)), Column('col2', String(5))] test_refcolumns = [Column('ref_col1', String(5)), Column('ref_col2', String(5))] test_constraint_names = ['constraint1', 'constraint2'] db_utils.drop_foreign_key_constraints(test_constraint_names, test_columns, test_refcolumns) expected = [call(columns=test_columns, refcolumns=test_refcolumns, name='constraint1'), call(columns=test_columns, refcolumns=test_refcolumns, name='constraint2')] self.assertEqual(expected, mock_constraint.call_args_list) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.796111 trove-12.1.0.dev92/trove/tests/unittests/domain-name-service/0000755000175000017500000000000000000000000024335 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/domain-name-service/__init__.py0000644000175000017500000000000000000000000026434 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/domain-name-service/test_designate_driver.py0000644000175000017500000001221600000000000031266 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib from mock import MagicMock from mock import patch import six from trove.common import exception from trove.dns.designate import driver from trove.dns import driver as base_driver from trove.tests.unittests import trove_testtools class DesignateDriverV2Test(trove_testtools.TestCase): def setUp(self): super(DesignateDriverV2Test, self).setUp() self.records = [dict(name='record1.', type='A', data='10.0.0.1', ttl=3600, priority=1, id='11111111-1111-1111-1111-111111111111'), dict(name='record2.', type='CNAME', data='10.0.0.2', ttl=1800, priority=2, id='22222222-2222-2222-2222-222222222222'), dict(name='record3.', type='A', data='10.0.0.3', ttl=3600, priority=1, id='3333333-3333-3333-3333-333333333333')] self.mock_client = MagicMock() self.create_des_client_patch = patch.object( driver, 'create_designate_client', MagicMock( return_value=self.mock_client)) self.create_des_client_mock = self.create_des_client_patch.start() self.addCleanup(self.create_des_client_patch.stop) def test_create_entry(self): dns_driver = driver.DesignateDriverV2() zone = driver.DesignateDnsZone( id='22222222-2222-2222-2222-222222222222', name='www.trove.com') entry = base_driver.DnsEntry(name='www.example.com', content='None', type='A', ttl=3600, priority=None, dns_zone=zone) dns_driver.create_entry(entry, '1.2.3.4') self.mock_client.recordsets.create.assert_called_once_with( driver.DNS_DOMAIN_ID, entry.name + '.', entry.type, records=['1.2.3.4']) def test_delete_entry(self): with patch.object(driver.DesignateDriverV2, '_get_records', MagicMock(return_value=self.records)): dns_driver = driver.DesignateDriverV2() dns_driver.delete_entry('record1', 'A') self.mock_client.recordsets.delete(driver.DNS_DOMAIN_ID) def test_delete_no_entry(self): with patch.object(driver.DesignateDriverV2, '_get_records', MagicMock(return_value=self.records)): dns_driver = driver.DesignateDriverV2() self.assertRaises(exception.DnsRecordNotFound, dns_driver.delete_entry, 'nothere', 'A') self.mock_client.recordsets.assert_not_called() class DesignateInstanceEntryFactoryTest(trove_testtools.TestCase): def setUp(self): super(DesignateInstanceEntryFactoryTest, self).setUp() def tearDown(self): super(DesignateInstanceEntryFactoryTest, self).tearDown() def test_create_entry(self): instance_id = '11111111-2222-3333-4444-555555555555' driver.DNS_DOMAIN_ID = '00000000-0000-0000-0000-000000000000' driver.DNS_DOMAIN_NAME = 'trove.com' driver.DNS_TTL = 3600 hashed_id = hashlib.md5(instance_id.encode()).digest() hashed_id = base64.b32encode(hashed_id) if six.PY3: hashed_id = hashed_id.decode('ascii') hashed_id_concat = hashed_id[:11].lower() exp_hostname = ("%s.%s" % (hashed_id_concat, driver.DNS_DOMAIN_NAME)) factory = driver.DesignateInstanceEntryFactory() entry = factory.create_entry(instance_id) self.assertEqual(exp_hostname, entry.name) self.assertEqual('A', entry.type) self.assertEqual(3600, entry.ttl) zone = entry.dns_zone self.assertEqual(driver.DNS_DOMAIN_NAME, zone.name) self.assertEqual(driver.DNS_DOMAIN_ID, zone.id) def test_create_entry_ends_with_dot(self): instance_id = '11111111-2222-3333-4444-555555555555' driver.DNS_DOMAIN_ID = '00000000-0000-0000-0000-000000000000' driver.DNS_DOMAIN_NAME = 'trove.com.' driver.DNS_TTL = 3600 hashed_id = hashlib.md5(instance_id.encode()).digest() hashed_id = base64.b32encode(hashed_id) if six.PY3: hashed_id = hashed_id.decode('ascii') hashed_id_concat = hashed_id[:11].lower() exp_hostname = ("%s.%s" % (hashed_id_concat, driver.DNS_DOMAIN_NAME))[:-1] factory = driver.DesignateInstanceEntryFactory() entry = factory.create_entry(instance_id) self.assertEqual(exp_hostname, entry.name) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.796111 trove-12.1.0.dev92/trove/tests/unittests/extensions/0000755000175000017500000000000000000000000022711 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/extensions/__init__.py0000644000175000017500000000000000000000000025010 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.796111 trove-12.1.0.dev92/trove/tests/unittests/extensions/common/0000755000175000017500000000000000000000000024201 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/extensions/common/__init__.py0000644000175000017500000000000000000000000026300 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/extensions/common/test_service.py0000644000175000017500000004103700000000000027257 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from mock import patch from oslo_config.cfg import NoSuchOptError from trove.common import exception from trove.common import utils from trove.extensions.common import models from trove.extensions.common.service import ClusterRootController from trove.extensions.common.service import DefaultRootController from trove.extensions.common.service import RootController from trove.instance import models as instance_models from trove.instance.models import DBInstance from trove.tests.unittests import trove_testtools class TestDefaultRootController(trove_testtools.TestCase): def setUp(self): super(TestDefaultRootController, self).setUp() self.controller = DefaultRootController() @patch.object(models.Root, "load") def test_root_index(self, root_load): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False self.controller.root_index(req, tenant_id, uuid, is_cluster) root_load.assert_called_with(context, uuid) def test_root_index_with_cluster(self): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True self.assertRaises( exception.ClusterOperationNotSupported, self.controller.root_index, req, tenant_id, uuid, is_cluster) @patch.object(models.Root, "create") def test_root_create(self, root_create): user = Mock() context = Mock() context.user = Mock() context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False password = Mock() body = {'password': password} self.controller.root_create(req, body, tenant_id, uuid, is_cluster) root_create.assert_called_with(context, uuid, password) def test_root_create_with_cluster(self): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True password = Mock() body = {'password': password} self.assertRaises( exception.ClusterOperationNotSupported, self.controller.root_create, req, body, tenant_id, uuid, is_cluster) @patch.object(models.Root, "delete") @patch.object(models.Root, "load") def test_root_delete(self, root_load, root_delete): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() instance_id = utils.generate_uuid() is_cluster = False root_load.return_value = True self.controller.root_delete(req, tenant_id, instance_id, is_cluster) root_load.assert_called_with(context, instance_id) root_delete.assert_called_with(context, instance_id) @patch.object(models.Root, "delete") @patch.object(models.Root, "load") def test_root_delete_without_root_enabled(self, root_load, root_delete): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = Mock() instance_id = utils.generate_uuid() is_cluster = False root_load.return_value = False self.assertRaises( exception.RootHistoryNotFound, self.controller.root_delete, req, tenant_id, instance_id, is_cluster) root_load.assert_called_with(context, instance_id) root_delete.assert_not_called() def test_root_delete_with_cluster(self): req = Mock() tenant_id = Mock() instance_id = utils.generate_uuid() is_cluster = True self.assertRaises( exception.ClusterOperationNotSupported, self.controller.root_delete, req, tenant_id, instance_id, is_cluster) class TestRootController(trove_testtools.TestCase): def setUp(self): super(TestRootController, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.controller = RootController() @patch.object(instance_models.Instance, "load") @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_index(self, service_get_datastore, service_load_root_controller, service_load_instance): req = Mock() req.environ = {'trove.context': self.context} tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = False service_get_datastore.return_value = (ds_manager, is_cluster) root_controller = Mock() ret = Mock() root_controller.root_index = Mock(return_value=ret) service_load_root_controller.return_value = root_controller self.assertEqual(ret, self.controller.index(req, tenant_id, uuid)) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) root_controller.root_index.assert_called_with( req, tenant_id, uuid, is_cluster) @patch.object(instance_models.Instance, "load") @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_create(self, service_get_datastore, service_load_root_controller, service_load_instance): req = Mock() req.environ = {'trove.context': self.context} body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = False service_get_datastore.return_value = (ds_manager, is_cluster) root_controller = Mock() ret = Mock() root_controller.root_create = Mock(return_value=ret) service_load_root_controller.return_value = root_controller self.assertEqual( ret, self.controller.create(req, tenant_id, uuid, body=body)) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) root_controller.root_create.assert_called_with( req, body, tenant_id, uuid, is_cluster) @patch.object(instance_models.Instance, "load") @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_create_with_no_root_controller(self, service_get_datastore, service_load_root_controller, service_load_instance): req = Mock() req.environ = {'trove.context': self.context} body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = False service_get_datastore.return_value = (ds_manager, is_cluster) service_load_root_controller.return_value = None self.assertRaises( NoSuchOptError, self.controller.create, req, tenant_id, uuid, body=body) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) @patch.object(instance_models.Instance, "load") @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_delete(self, service_get_datastore, service_load_root_controller, service_load_instance): req = Mock() req.environ = {'trove.context': self.context} tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = False service_get_datastore.return_value = (ds_manager, is_cluster) root_controller = Mock() ret = Mock() root_controller.root_delete = Mock(return_value=ret) service_load_root_controller.return_value = root_controller self.assertEqual( ret, self.controller.delete(req, tenant_id, uuid)) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) root_controller.root_delete.assert_called_with( req, tenant_id, uuid, is_cluster) @patch.object(instance_models.Instance, "load") @patch.object(RootController, "load_root_controller") @patch.object(RootController, "_get_datastore") def test_delete_with_no_root_controller(self, service_get_datastore, service_load_root_controller, service_load_instance): req = Mock() req.environ = {'trove.context': self.context} tenant_id = Mock() uuid = utils.generate_uuid() ds_manager = Mock() is_cluster = False service_get_datastore.return_value = (ds_manager, is_cluster) service_load_root_controller.return_value = None self.assertRaises( NoSuchOptError, self.controller.delete, req, tenant_id, uuid) service_get_datastore.assert_called_with(tenant_id, uuid) service_load_root_controller.assert_called_with(ds_manager) class TestClusterRootController(trove_testtools.TestCase): def setUp(self): super(TestClusterRootController, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.controller = ClusterRootController() @patch.object(ClusterRootController, "cluster_root_index") def test_root_index_cluster(self, mock_cluster_root_index): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True self.controller.root_index(req, tenant_id, uuid, is_cluster) mock_cluster_root_index.assert_called_with(req, tenant_id, uuid) @patch.object(ClusterRootController, "instance_root_index") def test_root_index_instance(self, mock_instance_root_index): req = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False self.controller.root_index(req, tenant_id, uuid, is_cluster) mock_instance_root_index.assert_called_with(req, tenant_id, uuid) @patch.object(ClusterRootController, "cluster_root_create") def test_root_create_cluster(self, mock_cluster_root_create): req = Mock() body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = True self.controller.root_create(req, body, tenant_id, uuid, is_cluster) mock_cluster_root_create.assert_called_with(req, body, tenant_id, uuid) @patch.object(ClusterRootController, "check_cluster_instance_actions") @patch.object(ClusterRootController, "instance_root_create") def test_root_create_instance(self, mock_instance_root_create, mock_check): req = Mock() body = Mock() tenant_id = Mock() uuid = utils.generate_uuid() is_cluster = False self.controller.root_create(req, body, tenant_id, uuid, is_cluster) mock_check.assert_called_with(uuid) mock_instance_root_create.assert_called_with(req, body, uuid) @patch.object(models.ClusterRoot, "load") def test_instance_root_index(self, mock_cluster_root_load): req = Mock() req.environ = {'trove.context': self.context} tenant_id = Mock() instance_id = utils.generate_uuid() self.controller.instance_root_index(req, tenant_id, instance_id) mock_cluster_root_load.assert_called_with(self.context, instance_id) @patch.object(models.ClusterRoot, "load", side_effect=exception.UnprocessableEntity()) def test_instance_root_index_exception(self, mock_cluster_root_load): req = Mock() req.environ = {'trove.context': self.context} tenant_id = Mock() instance_id = utils.generate_uuid() self.assertRaises( exception.UnprocessableEntity, self.controller.instance_root_index, req, tenant_id, instance_id ) mock_cluster_root_load.assert_called_with(self.context, instance_id) @patch.object(ClusterRootController, "instance_root_index") @patch.object(ClusterRootController, "_get_cluster_instance_id") def test_cluster_root_index(self, mock_get_cluster_instance, mock_instance_root_index): req = Mock() tenant_id = Mock() cluster_id = utils.generate_uuid() single_instance_id = Mock() mock_get_cluster_instance.return_value = (single_instance_id, Mock()) self.controller.cluster_root_index(req, tenant_id, cluster_id) mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id) mock_instance_root_index.assert_called_with(req, tenant_id, single_instance_id) @patch.object(ClusterRootController, "instance_root_create") @patch.object(ClusterRootController, "_get_cluster_instance_id") def test_cluster_root_create(self, mock_get_cluster_instance, mock_instance_root_create): req = Mock() body = Mock() tenant_id = Mock() cluster_id = utils.generate_uuid() single_instance_id = Mock() cluster_instances = Mock() mock_get_cluster_instance.return_value = (single_instance_id, cluster_instances) self.controller.cluster_root_create(req, body, tenant_id, cluster_id) mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id) mock_instance_root_create.assert_called_with(req, body, single_instance_id, cluster_instances) @patch.object(DBInstance, "find_all") def test_get_cluster_instance_id(self, mock_find_all): tenant_id = Mock() cluster_id = Mock() db_inst_1 = Mock() db_inst_1.id.return_value = utils.generate_uuid() db_inst_2 = Mock() db_inst_2.id.return_value = utils.generate_uuid() cluster_instances = [db_inst_1, db_inst_2] mock_find_all.return_value.all.return_value = cluster_instances ret = self.controller._get_cluster_instance_id(tenant_id, cluster_id) self.assertEqual(db_inst_1.id, ret[0]) self.assertEqual([db_inst_1.id, db_inst_2.id], ret[1]) @patch.object(models.ClusterRoot, "create") def test_instance_root_create(self, mock_cluster_root_create): user = Mock() self.context.user = Mock() self.context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = {'trove.context': self.context} password = Mock() body = {'password': password} instance_id = utils.generate_uuid() cluster_instances = Mock() self.controller.instance_root_create( req, body, instance_id, cluster_instances) mock_cluster_root_create.assert_called_with( self.context, instance_id, password, cluster_instances) @patch.object(models.ClusterRoot, "create") def test_instance_root_create_no_body(self, mock_cluster_root_create): user = Mock() self.context.user = Mock() self.context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = {'trove.context': self.context} password = None body = None instance_id = utils.generate_uuid() cluster_instances = Mock() self.controller.instance_root_create( req, body, instance_id, cluster_instances) mock_cluster_root_create.assert_called_with( self.context, instance_id, password, cluster_instances) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.796111 trove-12.1.0.dev92/trove/tests/unittests/extensions/redis/0000755000175000017500000000000000000000000024017 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/extensions/redis/__init__.py0000644000175000017500000000000000000000000026116 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/extensions/redis/test_service.py0000644000175000017500000002173200000000000027075 0ustar00coreycorey00000000000000# Copyright 2017 Eayun, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import uuid from mock import Mock, patch from trove.common import exception from trove.datastore import models as datastore_models from trove.extensions.common import models from trove.extensions.redis.models import RedisRoot from trove.extensions.redis.service import RedisRootController from trove.instance import models as instance_models from trove.instance.models import DBInstance from trove.instance.tasks import InstanceTasks from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class TestRedisRootController(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def setUp(self): util.init_db() self.context = trove_testtools.TroveTestContext(self, is_admin=True) self.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='redis' + str(uuid.uuid4()), ) self.datastore_version = ( datastore_models.DBDatastoreVersion.create( id=str(uuid.uuid4()), datastore_id=self.datastore.id, name="3.2" + str(uuid.uuid4()), manager="redis", image_id="image_id", packages="", active=True)) self.tenant_id = "UUID" self.single_db_info = DBInstance.create( id="redis-single", name="redis-single", flavor_id=1, datastore_version_id=self.datastore_version.id, tenant_id=self.tenant_id, volume_size=None, task_status=InstanceTasks.NONE) self.master_db_info = DBInstance.create( id="redis-master", name="redis-master", flavor_id=1, datastore_version_id=self.datastore_version.id, tenant_id=self.tenant_id, volume_size=None, task_status=InstanceTasks.NONE) self.slave_db_info = DBInstance.create( id="redis-slave", name="redis-slave", flavor_id=1, datastore_version_id=self.datastore_version.id, tenant_id=self.tenant_id, volume_size=None, task_status=InstanceTasks.NONE, slave_of_id=self.master_db_info.id) super(TestRedisRootController, self).setUp() self.controller = RedisRootController() def tearDown(self): self.datastore.delete() self.datastore_version.delete() self.master_db_info.delete() self.slave_db_info.delete() super(TestRedisRootController, self).tearDown() @patch.object(instance_models.Instance, "load") @patch.object(models.Root, "create") def test_root_create_on_single_instance(self, root_create, *args): user = Mock() context = Mock() context.user = Mock() context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = self.tenant_id instance_id = self.single_db_info.id is_cluster = False password = Mock() body = {"password": password} self.controller.root_create(req, body, tenant_id, instance_id, is_cluster) root_create.assert_called_with(context, instance_id, password) @patch.object(instance_models.Instance, "load") @patch.object(models.Root, "create") def test_root_create_on_master_instance(self, root_create, *args): user = Mock() context = Mock() context.user = Mock() context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = self.tenant_id instance_id = self.master_db_info.id slave_instance_id = self.slave_db_info.id is_cluster = False password = Mock() body = {"password": password} self.controller.root_create(req, body, tenant_id, instance_id, is_cluster) root_create.assert_called_with(context, slave_instance_id, password) def test_root_create_on_slave(self): user = Mock() context = Mock() context.user = Mock() context.user.__getitem__ = Mock(return_value=user) req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = self.tenant_id instance_id = self.slave_db_info.id is_cluster = False body = {} self.assertRaises( exception.SlaveOperationNotSupported, self.controller.root_create, req, body, tenant_id, instance_id, is_cluster) def test_root_create_with_cluster(self): req = Mock() tenant_id = self.tenant_id instance_id = self.master_db_info.id is_cluster = True body = {} self.assertRaises( exception.ClusterOperationNotSupported, self.controller.root_create, req, body, tenant_id, instance_id, is_cluster) @patch.object(instance_models.Instance, "load") @patch.object(RedisRoot, "get_auth_password") @patch.object(models.Root, "delete") @patch.object(models.Root, "load") def test_root_delete_on_single_instance(self, root_load, root_delete, *args): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = self.tenant_id instance_id = self.single_db_info.id is_cluster = False root_load.return_value = True self.controller.root_delete(req, tenant_id, instance_id, is_cluster) root_load.assert_called_with(context, instance_id) root_delete.assert_called_with(context, instance_id) @patch.object(instance_models.Instance, "load") @patch.object(RedisRoot, "get_auth_password") @patch.object(models.Root, "delete") @patch.object(models.Root, "load") def test_root_delete_on_master_instance(self, root_load, root_delete, *args): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = self.tenant_id instance_id = self.master_db_info.id slave_instance_id = self.slave_db_info.id is_cluster = False root_load.return_value = True self.controller.root_delete(req, tenant_id, instance_id, is_cluster) root_load.assert_called_with(context, instance_id) root_delete.assert_called_with(context, slave_instance_id) def test_root_delete_on_slave(self): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = self.tenant_id instance_id = self.slave_db_info.id is_cluster = False self.assertRaises( exception.SlaveOperationNotSupported, self.controller.root_delete, req, tenant_id, instance_id, is_cluster) def test_root_delete_with_cluster(self): req = Mock() tenant_id = self.tenant_id instance_id = self.master_db_info.id is_cluster = True self.assertRaises( exception.ClusterOperationNotSupported, self.controller.root_delete, req, tenant_id, instance_id, is_cluster) @patch.object(instance_models.Instance, "load") @patch.object(models.Root, "delete") @patch.object(models.Root, "load") def test_root_delete_without_root_enabled(self, root_load, root_delete, *args): context = Mock() req = Mock() req.environ = Mock() req.environ.__getitem__ = Mock(return_value=context) tenant_id = self.tenant_id instance_id = self.single_db_info.id is_cluster = False root_load.return_value = False self.assertRaises( exception.RootHistoryNotFound, self.controller.root_delete, req, tenant_id, instance_id, is_cluster) root_load.assert_called_with(context, instance_id) root_delete.assert_not_called() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586541819.796111 trove-12.1.0.dev92/trove/tests/unittests/flavor/0000755000175000017500000000000000000000000022003 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/flavor/__init__.py0000644000175000017500000000000000000000000024102 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/flavor/test_flavor_views.py0000644000175000017500000000615300000000000026127 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock, patch from trove.flavor.views import FlavorView from trove.tests.unittests import trove_testtools class FlavorViewsTest(trove_testtools.TestCase): def setUp(self): super(FlavorViewsTest, self).setUp() self.flavor = Mock() self.flavor.id = 10 self.flavor.str_id = '10' self.flavor.name = 'test_flavor' self.flavor.ram = 512 self.links = 'my_links' self.flavor.vcpus = '10' self.flavor.disk = '0' self.flavor.ephemeral = '0' def tearDown(self): super(FlavorViewsTest, self).tearDown() def test_data(self): data = [ {'flavor_id': 10, 'expected_id': 10, 'expected_str_id': '10'}, {'flavor_id': 'uuid-10', 'expected_id': None, 'expected_str_id': 'uuid-10'}, {'flavor_id': '02', 'expected_id': None, 'expected_str_id': '02'}, ] for datum in data: flavor_id = datum['flavor_id'] expected_id = datum['expected_id'] expected_str_id = datum['expected_str_id'] msg = "Testing flavor_id: %s - " % flavor_id self.flavor.id = flavor_id with patch.object(FlavorView, '_build_links', Mock(return_value=(self.links))): view = FlavorView(self.flavor) result = view.data() self.assertEqual(expected_id, result['flavor']['id'], msg + 'invalid id') self.assertEqual(expected_str_id, result['flavor']['str_id'], msg + 'invalid str_id') self.assertEqual(self.flavor.name, result['flavor']['name'], msg + 'invalid name') self.assertEqual(self.flavor.ram, result['flavor']['ram'], msg + 'invalid ram') self.assertEqual(self.flavor.vcpus, result['flavor']['vcpus'], msg + 'invalid vcpus') self.assertEqual(self.flavor.disk, result['flavor']['disk'], msg + 'invalid disk') self.assertEqual(self.flavor.ephemeral, result['flavor']['ephemeral'], msg + 'invalid ephemeral') self.assertEqual(self.links, result['flavor']['links'], msg + 'invalid links') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/guestagent/0000755000175000017500000000000000000000000022660 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/__init__.py0000644000175000017500000000000000000000000024757 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_agent_heartbeats_models.py0000644000175000017500000002127400000000000031142 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import Mock from mock import patch import uuid from trove.common import exception from trove.guestagent.models import AgentHeartBeat from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class AgentHeartBeatTest(trove_testtools.TestCase): def setUp(self): super(AgentHeartBeatTest, self).setUp() util.init_db() def tearDown(self): super(AgentHeartBeatTest, self).tearDown() def test_create(self): """ Test the creation of a new agent heartbeat record """ instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id) self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNone(heartbeat.guest_agent_version) def test_create_with_version(self): """ Test the creation of a new agent heartbeat record w/ guest version """ instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version="1.2.3") self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNotNone(heartbeat.guest_agent_version) self.assertEqual("1.2.3", heartbeat.guest_agent_version) def test_create_invalid_model_error(self): """ Test the creation failure of a new agent heartbeat record """ instance = Mock() instance.errors = {} instance.is_valid = Mock(return_value=False) with patch.object(AgentHeartBeat, 'save', return_value=instance): self.assertRaises(exception.InvalidModelError, AgentHeartBeat.create) def test_save_invalid_model_error(self): """ Test the save failure of an agent heartbeat record """ instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id) with patch.object(AgentHeartBeat, 'is_valid', return_value=False): self.assertRaises(exception.InvalidModelError, heartbeat.save) def test_find_by_instance_id(self): """ Test to retrieve a guest agents by its id """ # create a unique record instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version="1.2.3") self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNotNone(heartbeat.guest_agent_version) self.assertEqual("1.2.3", heartbeat.guest_agent_version) # retrieve the record heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) self.assertIsNotNone(heartbeat_found) self.assertEqual(heartbeat.id, heartbeat_found.id) self.assertEqual(heartbeat.instance_id, heartbeat_found.instance_id) self.assertEqual(heartbeat.updated_at, heartbeat_found.updated_at) self.assertEqual(heartbeat.guest_agent_version, heartbeat_found.guest_agent_version) def test_find_by_instance_id_none(self): """ Test to retrieve a guest agents when id is None """ heartbeat_found = None exception_raised = False try: heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=None) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeat_found) self.assertTrue(exception_raised) @patch('trove.guestagent.models.LOG') def test_find_by_instance_id_not_found(self, mock_logging): """ Test to retrieve a guest agents when id is not found """ instance_id = str(uuid.uuid4()) heartbeat_found = None exception_raised = False try: heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeat_found) self.assertTrue(exception_raised) def test_find_all_by_version(self): """ Test to retrieve all guest agents with a particular version """ # create some unique records with the same version version = str(uuid.uuid4()) for x in range(5): instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version=version, deleted=0) self.assertIsNotNone(heartbeat) # get all guests by version heartbeats = AgentHeartBeat.find_all_by_version(version) self.assertIsNotNone(heartbeats) self.assertEqual(5, heartbeats.count()) def test_find_all_by_version_none(self): """ Test to retrieve all guest agents with a None version """ heartbeats = None exception_raised = False try: heartbeats = AgentHeartBeat.find_all_by_version(None) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeats) self.assertTrue(exception_raised) def test_find_all_by_version_not_found(self): """ Test to retrieve all guest agents with a non-existing version """ version = str(uuid.uuid4()) exception_raised = False heartbeats = None try: heartbeats = AgentHeartBeat.find_all_by_version(version) except exception.ModelNotFoundError: exception_raised = True self.assertIsNone(heartbeats) self.assertTrue(exception_raised) def test_update_heartbeat(self): """ Test to show the upgrade scenario that will be used by conductor """ # create a unique record instance_id = str(uuid.uuid4()) heartbeat = AgentHeartBeat.create( instance_id=instance_id, guest_agent_version="1.2.3") self.assertIsNotNone(heartbeat) self.assertIsNotNone(heartbeat.id) self.assertIsNotNone(heartbeat.instance_id) self.assertEqual(instance_id, heartbeat.instance_id) self.assertIsNotNone(heartbeat.updated_at) self.assertIsNotNone(heartbeat.guest_agent_version) self.assertEqual("1.2.3", heartbeat.guest_agent_version) # retrieve the record heartbeat_found = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) self.assertIsNotNone(heartbeat_found) self.assertEqual(heartbeat.id, heartbeat_found.id) self.assertEqual(heartbeat.instance_id, heartbeat_found.instance_id) self.assertEqual(heartbeat.updated_at, heartbeat_found.updated_at) self.assertEqual(heartbeat.guest_agent_version, heartbeat_found.guest_agent_version) # update AgentHeartBeat().update(id=heartbeat_found.id, instance_id=instance_id, guest_agent_version="1.2.3") # retrieve the record updated_heartbeat = AgentHeartBeat.find_by_instance_id( instance_id=instance_id) self.assertIsNotNone(updated_heartbeat) self.assertEqual(heartbeat.id, updated_heartbeat.id) self.assertEqual(heartbeat.instance_id, updated_heartbeat.instance_id) self.assertEqual(heartbeat.guest_agent_version, updated_heartbeat.guest_agent_version) self.assertEqual(heartbeat.updated_at, updated_heartbeat.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_api.py0000644000175000017500000004417700000000000025057 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import Timeout import mock import oslo_messaging as messaging from oslo_messaging.rpc.client import RemoteError from testtools.matchers import Is from trove.common.clients import guest_client import trove.common.context as context from trove.common import exception from trove.guestagent import api from trove import rpc from trove.tests.unittests import trove_testtools REPLICATION_SNAPSHOT = {'master': {'id': '123', 'host': '192.168.0.1', 'port': 3306}, 'dataset': {}, 'binlog_position': 'binpos'} RPC_API_VERSION = '1.0' def _mock_call_pwd_change(cmd, version=None, users=None): if users == 'dummy': return True else: raise BaseException("Test Failed") def _mock_call(cmd, timeout, version=None, username=None, hostname=None, database=None, databases=None): # To check get_user, list_access, grant_access, revoke_access in cmd. if cmd in ('get_user', 'list_access', 'grant_access', 'revoke_access'): return True else: raise BaseException("Test Failed") class ApiTest(trove_testtools.TestCase): @mock.patch.object(rpc, 'get_client') @mock.patch('trove.instance.models.get_instance_encryption_key', return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08') def setUp(self, mock_get_encryption_key, *args): super(ApiTest, self).setUp() self.context = context.TroveContext() self.guest = api.API(self.context, 0) self.guest._cast = _mock_call_pwd_change self.guest._call = _mock_call self.api = api.API(self.context, "instance-id-x23d2d") self._mock_rpc_client() mock_get_encryption_key.assert_called() def test_change_passwords(self): self.assertIsNone(self.guest.change_passwords("dummy")) def test_get_user(self): self.assertTrue(self.guest.get_user("dummyname", "dummyhost")) def test_list_access(self): self.assertTrue(self.guest.list_access("dummyname", "dummyhost")) def test_grant_access(self): self.assertTrue(self.guest.grant_access("dumname", "dumhost", "dumdb")) def test_revoke_access(self): self.assertTrue(self.guest.revoke_access("dumname", "dumhost", "dumdb")) def test_get_routing_key(self): self.assertEqual('guestagent.instance-id-x23d2d', self.api._get_routing_key()) def test_update_attributes(self): self.api.update_attributes('test_user', '%', {'name': 'new_user'}) self._verify_rpc_prepare_before_cast() self._verify_cast('update_attributes', username='test_user', hostname='%', user_attrs={'name': 'new_user'}) def test_create_user(self): self.api.create_user('test_user') self._verify_rpc_prepare_before_cast() self._verify_cast('create_user', users='test_user') @mock.patch('trove.guestagent.api.LOG') def test_api_cast_exception(self, mock_logging): self.call_context.cast.side_effect = IOError('host down') self.assertRaises(exception.GuestError, self.api.create_user, 'test_user') @mock.patch('trove.guestagent.api.LOG') def test_api_call_exception(self, mock_logging): self.call_context.call.side_effect = IOError('host_down') self.assertRaises(exception.GuestError, self.api.list_users) def test_api_call_timeout(self): self.call_context.call.side_effect = Timeout() self.assertRaises(exception.GuestTimeout, self.api.restart) @mock.patch('trove.guestagent.api.LOG') def test_api_cast_remote_error(self, mock_logging): self.call_context.cast.side_effect = RemoteError('Error') self.assertRaises(exception.GuestError, self.api.delete_database, 'test_db') @mock.patch('trove.guestagent.api.LOG') def test_api_call_remote_error(self, mock_logging): self.call_context.call.side_effect = RemoteError('Error') self.assertRaises(exception.GuestError, self.api.stop_db) def test_list_users(self): exp_resp = ['user1', 'user2', 'user3'] self.call_context.call.return_value = exp_resp resp = self.api.list_users() self._verify_rpc_prepare_before_call() self._verify_call('list_users', limit=None, marker=None, include_marker=False) self.assertEqual(exp_resp, resp) def test_delete_user(self): self.api.delete_user('test_user') self._verify_rpc_prepare_before_cast() self._verify_cast('delete_user', user='test_user') def test_create_database(self): databases = ['db1', 'db2', 'db3'] self.api.create_database(databases) self._verify_rpc_prepare_before_cast() self.call_context.cast.assert_called_once_with( self.context, "create_database", databases=databases) def test_list_databases(self): exp_resp = ['db1', 'db2', 'db3'] self.call_context.call.return_value = exp_resp resp = self.api.list_databases( limit=1, marker=2, include_marker=False) self._verify_rpc_prepare_before_call() self._verify_call("list_databases", limit=1, marker=2, include_marker=False) self.assertEqual(exp_resp, resp) def test_delete_database(self): self.api.delete_database('test_database_name') self._verify_rpc_prepare_before_cast() self._verify_cast("delete_database", database='test_database_name') def test_enable_root(self): self.call_context.call.return_value = True resp = self.api.enable_root() self._verify_rpc_prepare_before_call() self._verify_call('enable_root') self.assertThat(resp, Is(True)) def test_enable_root_with_password(self): self.call_context.call.return_value = True resp = self.api.enable_root_with_password() self._verify_rpc_prepare_before_call() self._verify_call('enable_root_with_password', root_password=None) self.assertThat(resp, Is(True)) def test_disable_root(self): self.call_context.call.return_value = True resp = self.api.disable_root() self._verify_rpc_prepare_before_call() self._verify_call('disable_root') self.assertThat(resp, Is(True)) def test_is_root_enabled(self): self.call_context.call.return_value = False resp = self.api.is_root_enabled() self._verify_rpc_prepare_before_call() self._verify_call('is_root_enabled') self.assertThat(resp, Is(False)) def test_get_hwinfo(self): self.call_context.call.return_value = '[blah]' resp = self.api.get_hwinfo() self._verify_rpc_prepare_before_call() self._verify_call('get_hwinfo') self.assertThat(resp, Is('[blah]')) def test_rpc_ping(self): # execute self.api.rpc_ping() # verify self._verify_rpc_prepare_before_call() self._verify_call('rpc_ping') def test_get_diagnostics(self): self.call_context.call.return_value = '[all good]' resp = self.api.get_diagnostics() self._verify_rpc_prepare_before_call() self._verify_call('get_diagnostics') self.assertThat(resp, Is('[all good]')) def test_restart(self): self.api.restart() self._verify_rpc_prepare_before_call() self._verify_call('restart') def test_start_db_with_conf_changes(self): self.api.start_db_with_conf_changes(None) self._verify_rpc_prepare_before_call() self._verify_call('start_db_with_conf_changes', config_contents=None) def test_reset_configuration(self): # execute self.api.reset_configuration({'config_contents': 'some junk'}) # verify self._verify_rpc_prepare_before_call() self._verify_call('reset_configuration', configuration={'config_contents': 'some junk'}) def test_stop_db(self): self.api.stop_db(do_not_start_on_reboot=False) self._verify_rpc_prepare_before_call() self._verify_call('stop_db', do_not_start_on_reboot=False) def test_get_volume_info(self): exp_resp = {'fake': 'resp'} self.call_context.call.return_value = exp_resp resp = self.api.get_volume_info() self._verify_rpc_prepare_before_call() self._verify_call('get_filesystem_stats', fs_path=None) self.assertThat(resp, Is(exp_resp)) def test_update_guest(self): self.api.update_guest() self._verify_rpc_prepare_before_call() self._verify_call('update_guest') def test_create_backup(self): self.api.create_backup({'id': '123'}) self._verify_rpc_prepare_before_cast() self._verify_cast('create_backup', backup_info={'id': '123'}) def test_unmount_volume(self): # execute self.api.unmount_volume('/dev/vdb', '/var/lib/mysql') # verify self._verify_rpc_prepare_before_call() self._verify_call('unmount_volume', device_path='/dev/vdb', mount_point='/var/lib/mysql') def test_mount_volume(self): # execute self.api.mount_volume('/dev/vdb', '/var/lib/mysql') # verify self._verify_rpc_prepare_before_call() self._verify_call('mount_volume', device_path='/dev/vdb', mount_point='/var/lib/mysql') def test_resize_fs(self): # execute self.api.resize_fs('/dev/vdb', '/var/lib/mysql') # verify self._verify_rpc_prepare_before_call() self._verify_call('resize_fs', device_path='/dev/vdb', mount_point='/var/lib/mysql') def test_update_overrides(self): self.api.update_overrides('123') self._verify_rpc_prepare_before_call() self._verify_call('update_overrides', overrides='123', remove=False) def test_apply_overrides(self): self.api.apply_overrides('123') self._verify_rpc_prepare_before_call() self._verify_call('apply_overrides', overrides='123') def test_get_replication_snapshot(self): # execute self.api.get_replication_snapshot({}) # verify self._verify_rpc_prepare_before_call() self._verify_call('get_replication_snapshot', snapshot_info={}, replica_source_config=None) def test_attach_replication_slave(self): # execute self.api.attach_replication_slave(REPLICATION_SNAPSHOT) # verify self._verify_rpc_prepare_before_cast() self._verify_cast('attach_replication_slave', snapshot=REPLICATION_SNAPSHOT, slave_config=None) def test_detach_replica(self): # execute self.api.detach_replica() # verify self._verify_rpc_prepare_before_call() self._verify_call('detach_replica', for_failover=False) def test_get_replica_context(self): # execute self.api.get_replica_context() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_replica_context') def test_attach_replica(self): # execute self.api.attach_replica(REPLICATION_SNAPSHOT, slave_config=None) # verify self._verify_rpc_prepare_before_call() self._verify_call('attach_replica', replica_info=REPLICATION_SNAPSHOT, slave_config=None) def test_make_read_only(self): # execute self.api.make_read_only(True) # verify self._verify_rpc_prepare_before_call() self._verify_call('make_read_only', read_only=True) def test_enable_as_master(self): # execute self.api.enable_as_master({}) # verify self._verify_rpc_prepare_before_call() self._verify_call('enable_as_master', replica_source_config={}) def test_get_txn_count(self): # execute self.api.get_txn_count() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_txn_count') def test_get_last_txn(self): # execute self.api.get_last_txn() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_last_txn') def test_get_latest_txn_id(self): # execute self.api.get_latest_txn_id() # verify self._verify_rpc_prepare_before_call() self._verify_call('get_latest_txn_id') def test_wait_for_txn(self): # execute self.api.wait_for_txn("") # verify self._verify_rpc_prepare_before_call() self._verify_call('wait_for_txn', txn="") def test_cleanup_source_on_replica_detach(self): # execute self.api.cleanup_source_on_replica_detach({'replication_user': 'test_user'}) # verify self._verify_rpc_prepare_before_call() self._verify_call('cleanup_source_on_replica_detach', replica_info={'replication_user': 'test_user'}) def test_demote_replication_master(self): # execute self.api.demote_replication_master() # verify self._verify_rpc_prepare_before_call() self._verify_call('demote_replication_master') @mock.patch.object(messaging, 'Target') @mock.patch.object(rpc, 'get_server') def test_prepare(self, *args): self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', '/mnt/opt', None, 'cont', '1-2-3-4', 'override', {'id': '2-3-4-5'}) self._verify_rpc_prepare_before_cast() self._verify_cast( 'prepare', packages=['package1'], databases='db1', memory_mb='2048', users='user1', device_path='/dev/vdt', mount_point='/mnt/opt', backup_info=None, config_contents='cont', root_password='1-2-3-4', overrides='override', cluster_config={'id': '2-3-4-5'}, snapshot=None, modules=None) @mock.patch.object(messaging, 'Target') @mock.patch.object(rpc, 'get_server') def test_prepare_with_backup(self, *args): backup = {'id': 'backup_id_123'} self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', '/mnt/opt', backup, 'cont', '1-2-3-4', 'overrides', {"id": "2-3-4-5"}, modules=None) self._verify_rpc_prepare_before_cast() self._verify_cast( 'prepare', packages=['package1'], databases='db1', memory_mb='2048', users='user1', device_path='/dev/vdt', mount_point='/mnt/opt', backup_info=backup, config_contents='cont', root_password='1-2-3-4', overrides='overrides', cluster_config={'id': '2-3-4-5'}, snapshot=None, modules=None) @mock.patch.object(messaging, 'Target') @mock.patch.object(rpc, 'get_server') def test_prepare_with_modules(self, *args): modules = [{'id': 'mod_id'}] self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', '/mnt/opt', None, 'cont', '1-2-3-4', 'overrides', {"id": "2-3-4-5"}, modules=modules) self._verify_rpc_prepare_before_cast() self._verify_cast( 'prepare', packages=['package1'], databases='db1', memory_mb='2048', users='user1', device_path='/dev/vdt', mount_point='/mnt/opt', backup_info=None, config_contents='cont', root_password='1-2-3-4', overrides='overrides', cluster_config={'id': '2-3-4-5'}, snapshot=None, modules=modules) def test_upgrade(self): instance_version = "v1.0.1" location = "http://swift/trove-guestagent-v1.0.1.tar.gz" # execute self.api.upgrade(instance_version, location) # verify self._verify_rpc_prepare_before_cast() self._verify_cast( 'upgrade', instance_version=instance_version, location=location, metadata=None) def _verify_rpc_prepare_before_call(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION, timeout=mock.ANY) def _verify_rpc_prepare_before_cast(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION) def _verify_cast(self, *args, **kwargs): self.call_context.cast.assert_called_once_with(self.context, *args, **kwargs) def _verify_call(self, *args, **kwargs): self.call_context.call.assert_called_once_with(self.context, *args, **kwargs) def _mock_rpc_client(self): self.call_context = mock.Mock() self.api.client.prepare = mock.Mock(return_value=self.call_context) self.call_context.call = mock.Mock() self.call_context.cast = mock.Mock() class ApiStrategyTest(trove_testtools.TestCase): @mock.patch('trove.guestagent.api.API.__init__', mock.Mock(return_value=None)) def test_guest_client_mongodb(self): client = guest_client(mock.Mock(), mock.Mock(), 'mongodb') self.assertFalse(hasattr(client, 'add_config_servers2')) self.assertTrue(callable(client.add_config_servers)) @mock.patch('trove.guestagent.api.API.__init__', mock.Mock(return_value=None)) def test_guest_client_vertica(self): client = guest_client(mock.Mock(), mock.Mock(), 'vertica') self.assertFalse(hasattr(client, 'get_public_keys2')) self.assertTrue(callable(client.get_public_keys)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_backups.py0000644000175000017500000015274100000000000025733 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import os from mock import ANY, call, DEFAULT, Mock, patch, PropertyMock from testtools.testcase import ExpectedException from trove.common import exception from trove.common import utils from trove.guestagent.common import configuration from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import ( service as cass_service ) from trove.guestagent.datastore.experimental.db2 import ( service as db2_service) from trove.guestagent.datastore.experimental.redis.service import RedisApp from trove.guestagent.strategies.backup import base as backupBase from trove.guestagent.strategies.backup.experimental import db2_impl from trove.guestagent.strategies.backup.experimental.postgresql_impl \ import PgBaseBackupUtil from trove.guestagent.strategies.backup.mysql_impl import MySqlApp from trove.guestagent.strategies.restore import base as restoreBase from trove.guestagent.strategies.restore.mysql_impl import MySQLRestoreMixin from trove.tests.unittests import trove_testtools BACKUP_XTRA_CLS = ("trove.guestagent.strategies.backup." "mysql_impl.InnoBackupEx") RESTORE_XTRA_CLS = ("trove.guestagent.strategies.restore." "mysql_impl.InnoBackupEx") BACKUP_XTRA_INCR_CLS = ("trove.guestagent.strategies.backup." "mysql_impl.InnoBackupExIncremental") RESTORE_XTRA_INCR_CLS = ("trove.guestagent.strategies.restore." "mysql_impl.InnoBackupExIncremental") BACKUP_SQLDUMP_CLS = ("trove.guestagent.strategies.backup." "mysql_impl.MySQLDump") RESTORE_SQLDUMP_CLS = ("trove.guestagent.strategies.restore." "mysql_impl.MySQLDump") BACKUP_CBBACKUP_CLS = ("trove.guestagent.strategies.backup." "experimental.couchbase_impl.CbBackup") RESTORE_CBBACKUP_CLS = ("trove.guestagent.strategies.restore." "experimental.couchbase_impl.CbBackup") BACKUP_MONGODUMP_CLS = ("trove.guestagent.strategies.backup." "experimental.mongo_impl.MongoDump") RESTORE_MONGODUMP_CLS = ("trove.guestagent.strategies.restore." "experimental.mongo_impl.MongoDump") BACKUP_REDIS_CLS = ("trove.guestagent.strategies.backup." "experimental.redis_impl.RedisBackup") RESTORE_REDIS_CLS = ("trove.guestagent.strategies.restore." "experimental.redis_impl.RedisBackup") BACKUP_NODETOOLSNAPSHOT_CLS = ("trove.guestagent.strategies.backup." "experimental.cassandra_impl.NodetoolSnapshot") RESTORE_NODETOOLSNAPSHOT_CLS = ("trove.guestagent.strategies.restore." "experimental.cassandra_impl.NodetoolSnapshot") BACKUP_DB2_CLS = ("trove.guestagent.strategies.backup." "experimental.db2_impl.DB2Backup") RESTORE_DB2_CLS = ("trove.guestagent.strategies.restore." "experimental.db2_impl.DB2Backup") BACKUP_COUCHDB_BACKUP_CLS = ("trove.guestagent.strategies.backup." "experimental.couchdb_impl.CouchDBBackup") RESTORE_COUCHDB_BACKUP_CLS = ("trove.guestagent.strategies.restore." "experimental.couchdb_impl.CouchDBBackup") PIPE = " | " ZIP = "gzip" UNZIP = "gzip -d -c" ENCRYPT = "openssl enc -aes-256-cbc -salt -pass pass:default_aes_cbc_key" DECRYPT = "openssl enc -d -aes-256-cbc -salt -pass pass:default_aes_cbc_key" XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s" " --user=os_admin --password=password --host=localhost" " --socket=/var/run/mysqld/mysqld.sock" " /var/lib/mysql/data 2>/tmp/innobackupex.log") XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''} XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'} XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream' ' --incremental --incremental-lsn=%(lsn)s' ' %(extra_opts)s' ' --user=os_admin --password=password --host=localhost' ' --socket=/var/run/mysqld/mysqld.sock' ' /var/lib/mysql/data' ' 2>/tmp/innobackupex.log') SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s " "--opt --password=password -u os_admin" " 2>/tmp/mysqldump.log") SQLDUMP_BACKUP = SQLDUMP_BACKUP_RAW % {'extra_opts': ''} SQLDUMP_BACKUP_EXTRA_OPTS = (SQLDUMP_BACKUP_RAW % {'extra_opts': '--events --routines --triggers'}) XTRA_RESTORE_RAW = ("sudo xbstream -x -C %(restore_location)s" " 2>/tmp/xbstream_extract.log") XTRA_RESTORE = XTRA_RESTORE_RAW % {'restore_location': '/var/lib/mysql/data'} XTRA_INCR_PREPARE = ("sudo innobackupex" " --defaults-file=/var/lib/mysql/data/backup-my.cnf" " --ibbackup=xtrabackup" " --apply-log" " --redo-only" " /var/lib/mysql/data" " %(incr)s" " 2>/tmp/innoprepare.log") SQLDUMP_RESTORE = "sudo mysql" PREPARE = ("sudo innobackupex" " --defaults-file=/var/lib/mysql/data/backup-my.cnf" " --ibbackup=xtrabackup" " --apply-log" " /var/lib/mysql/data" " 2>/tmp/innoprepare.log") CRYPTO_KEY = "default_aes_cbc_key" CBBACKUP_CMD = "tar cpPf - /tmp/backups" CBBACKUP_RESTORE = "sudo tar xpPf -" MONGODUMP_CMD = "sudo tar cPf - /var/lib/mongodb/dump" MONGODUMP_RESTORE = "sudo tar xPf -" REDISBACKUP_CMD = "sudo cat /var/lib/redis/dump.rdb" REDISBACKUP_RESTORE = "tee /var/lib/redis/dump.rdb" DB2BACKUP_CMD = "sudo tar cPf - /home/db2inst1/db2inst1/backup" DB2BACKUP_RESTORE = "sudo tar xPf -" COUCHDB_BACKUP_CMD = "sudo tar cpPf - /var/lib/couchdb" COUCHDB_RESTORE_CMD = "sudo tar xPf -" class GuestAgentBackupTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentBackupTest, self).setUp() self.patch_pc = patch('trove.guestagent.datastore.service.' 'BaseDbStatus.prepare_completed') self.mock_pc = self.patch_pc.start() self.mock_pc.__get__ = Mock(return_value=True) self.addCleanup(self.patch_pc.stop) self.get_auth_pwd_patch = patch.object( MySqlApp, 'get_auth_password', mock.Mock(return_value='password')) self.get_auth_pwd_mock = self.get_auth_pwd_patch.start() self.addCleanup(self.get_auth_pwd_patch.stop) self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout') self.exec_timeout_mock = self.exec_timeout_patch.start() self.addCleanup(self.exec_timeout_patch.stop) self.get_data_dir_patch = patch.object( MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') self.get_datadir_mock = self.get_data_dir_patch.start() self.addCleanup(self.get_data_dir_patch.stop) backupBase.BackupRunner.is_zipped = True backupBase.BackupRunner.is_encrypted = True restoreBase.RestoreRunner.is_zipped = True restoreBase.RestoreRunner.is_encrypted = True def tearDown(self): super(GuestAgentBackupTest, self).tearDown() def test_backup_decrypted_xtrabackup_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_CLS) bkup = RunnerClass(12345, extra_opts="") self.assertEqual(XTRA_BACKUP + PIPE + ZIP, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_decrypted_xtrabackup_with_extra_opts_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_CLS) bkup = RunnerClass(12345, extra_opts="--no-lock") self.assertEqual(XTRA_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_encrypted_xtrabackup_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_XTRA_CLS) bkup = RunnerClass(12345, extra_opts="") self.assertEqual(XTRA_BACKUP + PIPE + ZIP + PIPE + ENCRYPT, bkup.command) self.assertEqual("12345.xbstream.gz.enc", bkup.manifest) def test_backup_xtrabackup_incremental(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) opts = {'lsn': '54321', 'extra_opts': ''} expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP bkup = RunnerClass(12345, extra_opts="", lsn="54321") self.assertEqual(expected, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_xtrabackup_incremental_with_extra_opts_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) opts = {'lsn': '54321', 'extra_opts': '--no-lock'} expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP bkup = RunnerClass(12345, extra_opts="--no-lock", lsn="54321") self.assertEqual(expected, bkup.command) self.assertEqual("12345.xbstream.gz", bkup.manifest) def test_backup_xtrabackup_incremental_encrypted(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) opts = {'lsn': '54321', 'extra_opts': ''} expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP + PIPE + ENCRYPT bkup = RunnerClass(12345, extra_opts="", lsn="54321") self.assertEqual(expected, bkup.command) self.assertEqual("12345.xbstream.gz.enc", bkup.manifest) def test_backup_decrypted_mysqldump_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) bkup = RunnerClass(12345, extra_opts="") self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP, bkup.command) self.assertEqual("12345.gz", bkup.manifest) def test_backup_decrypted_mysqldump_with_extra_opts_command(self): backupBase.BackupRunner.is_encrypted = False RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) bkup = RunnerClass(12345, extra_opts="--events --routines --triggers") self.assertEqual(SQLDUMP_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command) self.assertEqual("12345.gz", bkup.manifest) def test_backup_encrypted_mysqldump_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) bkup = RunnerClass(12345, user="user", password="password", extra_opts="") self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP + PIPE + ENCRYPT, bkup.command) self.assertEqual("12345.gz.enc", bkup.manifest) def test_restore_decrypted_xtrabackup_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_XTRA_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + XTRA_RESTORE, restr.restore_cmd) self.assertEqual(PREPARE, restr.prepare_cmd) def test_restore_encrypted_xtrabackup_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_XTRA_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE, restr.restore_cmd) self.assertEqual(PREPARE, restr.prepare_cmd) def test_restore_xtrabackup_incremental_prepare_command(self): RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="m5d") # Final prepare command (same as normal xtrabackup) self.assertEqual(PREPARE, restr.prepare_cmd) # Incremental backup prepare command expected = XTRA_INCR_PREPARE % {'incr': '--incremental-dir=/foo/bar/'} observed = restr._incremental_prepare_cmd('/foo/bar/') self.assertEqual(expected, observed) # Full backup prepare command expected = XTRA_INCR_PREPARE % {'incr': ''} observed = restr._incremental_prepare_cmd(None) self.assertEqual(expected, observed) def test_restore_decrypted_xtrabackup_incremental_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="m5d") # Full restore command expected = UNZIP + PIPE + XTRA_RESTORE self.assertEqual(expected, restr.restore_cmd) # Incremental backup restore command opts = {'restore_location': '/foo/bar/'} expected = UNZIP + PIPE + (XTRA_RESTORE_RAW % opts) observed = restr._incremental_restore_cmd('/foo/bar/') self.assertEqual(expected, observed) def test_restore_encrypted_xtrabackup_incremental_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") # Full restore command expected = DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE self.assertEqual(expected, restr.restore_cmd) # Incremental backup restore command opts = {'restore_location': '/foo/bar/'} expected = DECRYPT + PIPE + UNZIP + PIPE + (XTRA_RESTORE_RAW % opts) observed = restr._incremental_restore_cmd('/foo/bar/') self.assertEqual(expected, observed) def test_restore_decrypted_mysqldump_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + SQLDUMP_RESTORE, restr.restore_cmd) def test_restore_encrypted_mysqldump_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS) restr = RunnerClass(None, restore_location="/var/lib/mysql/data", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + SQLDUMP_RESTORE, restr.restore_cmd) def test_backup_encrypted_cbbackup_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( CBBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) def test_backup_not_encrypted_cbbackup_command(self): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(CBBACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) def test_restore_decrypted_cbbackup_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + CBBACKUP_RESTORE, restr.restore_cmd) def test_restore_encrypted_cbbackup_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + CBBACKUP_RESTORE, restr.restore_cmd) @patch.multiple('trove.guestagent.common.operating_system', chmod=DEFAULT, remove=DEFAULT) def test_reset_root_password_on_mysql_restore(self, chmod, remove): with patch.object(MySQLRestoreMixin, '_start_mysqld_safe_with_init_file', return_value=True): inst = MySQLRestoreMixin() inst.reset_root_password() chmod.assert_called_once_with( ANY, operating_system.FileMode.ADD_READ_ALL, as_root=True) # Make sure the temporary files got deleted as root # (see bug/1423759). remove.assert_has_calls(2 * [call(ANY, force=True, as_root=True)]) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_backup_encrypted_mongodump_command(self, _): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( MONGODUMP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_backup_not_encrypted_mongodump_command(self, _): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS) utils.execute_with_timeout = mock.Mock(return_value=None) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(MONGODUMP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_restore_decrypted_mongodump_command(self, _): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_MONGODUMP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, UNZIP + PIPE + MONGODUMP_RESTORE) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def test_restore_encrypted_mongodump_command(self, _): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_MONGODUMP_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, DECRYPT + PIPE + UNZIP + PIPE + MONGODUMP_RESTORE) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_backup_encrypted_redisbackup_command(self, *mocks): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_REDIS_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( REDISBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_backup_not_encrypted_redisbackup_command(self, *mocks): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_REDIS_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(REDISBACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(operating_system, 'chown') @patch.object(operating_system, 'create_directory') @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_restore_decrypted_redisbackup_command(self, *mocks): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_REDIS_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, UNZIP + PIPE + REDISBACKUP_RESTORE) @patch.object(configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(operating_system, 'chown') @patch.object(operating_system, 'create_directory') @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_restore_encrypted_redisbackup_command(self, *mocks): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_REDIS_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, DECRYPT + PIPE + UNZIP + PIPE + REDISBACKUP_RESTORE) @patch.object(utils, 'execute_with_timeout') @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2_service, 'run_command') @patch.object(db2_service.DB2App, 'process_default_dbm_config') @patch.object(db2_impl.DB2Backup, 'list_dbnames') def test_backup_encrypted_db2backup_command(self, *mock, **kwargs): backupBase.BackupRunner.is_encrypted = True backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_DB2_CLS) bkp = RunnerClass(12345) # this is not db2 backup filename self.assertIsNotNone(bkp) # look into this self.assertEqual( DB2BACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) @patch.object(utils, 'execute_with_timeout') @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2_service, 'run_command') @patch.object(db2_service.DB2App, 'process_default_dbm_config') @patch.object(db2_impl.DB2Backup, 'list_dbnames') def test_backup_not_encrypted_db2backup_command(self, *mock, **kwargs): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_DB2_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(DB2BACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2_service, 'run_command') @patch.object(db2_service.DB2App, 'process_default_dbm_config') def test_restore_decrypted_db2backup_command(self, *args, **kwargs): restoreBase.RestoreRunner.is_zipped = True restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_DB2_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, UNZIP + PIPE + DB2BACKUP_RESTORE) @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2_service, 'run_command') @patch.object(db2_service.DB2App, 'process_default_dbm_config') def test_restore_encrypted_db2backup_command(self, *args, **kwargs): restoreBase.RestoreRunner.is_zipped = True restoreBase.RestoreRunner.is_encrypted = True restoreBase.RestoreRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_DB2_CLS) restr = RunnerClass(None, restore_location="/tmp", location="filename", checksum="md5") self.assertEqual(restr.restore_cmd, DECRYPT + PIPE + UNZIP + PIPE + DB2BACKUP_RESTORE) def test_backup_encrypted_couchdbbackup_command(self): backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual( COUCHDB_BACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn("gz.enc", bkp.manifest) def test_backup_not_encrypted_couchdbbackup_command(self): backupBase.BackupRunner.is_encrypted = False backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) bkp = RunnerClass(12345) self.assertIsNotNone(bkp) self.assertEqual(COUCHDB_BACKUP_CMD + PIPE + ZIP, bkp.command) self.assertIn("gz", bkp.manifest) def test_restore_decrypted_couchdbbackup_command(self): restoreBase.RestoreRunner.is_encrypted = False RunnerClass = utils.import_class(RESTORE_COUCHDB_BACKUP_CLS) restr = RunnerClass(None, restore_location="/var/lib/couchdb", location="filename", checksum="md5") self.assertEqual(UNZIP + PIPE + COUCHDB_RESTORE_CMD, restr.restore_cmd) def test_restore_encrypted_couchdbbackup_command(self): restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_COUCHDB_BACKUP_CLS) restr = RunnerClass(None, restore_location="/var/lib/couchdb", location="filename", checksum="md5") self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + COUCHDB_RESTORE_CMD, restr.restore_cmd) class CassandraBackupTest(trove_testtools.TestCase): _BASE_BACKUP_CMD = ('sudo tar --transform="s#snapshots/%s/##" -cpPf - ' '-C "%s" "%s"') _BASE_RESTORE_CMD = 'sudo tar -xpPf - -C "%(restore_location)s"' _DATA_DIR = 'data_dir' _SNAPSHOT_NAME = 'snapshot_name' _SNAPSHOT_FILES = {'foo.db', 'bar.db'} _RESTORE_LOCATION = {'restore_location': '/var/lib/cassandra'} def setUp(self): super(CassandraBackupTest, self).setUp() self.app_status_patcher = patch( 'trove.guestagent.datastore.experimental.cassandra.service.' 'CassandraAppStatus') self.addCleanup(self.app_status_patcher.stop) self.app_status_patcher.start() self.get_data_dirs_patcher = patch.object( cass_service.CassandraApp, 'cassandra_data_dir', new_callable=PropertyMock) self.addCleanup(self.get_data_dirs_patcher.stop) data_dir_mock = self.get_data_dirs_patcher.start() data_dir_mock.return_value = self._DATA_DIR self.os_list_patcher = patch.object( operating_system, 'list_files_in_directory', return_value=self._SNAPSHOT_FILES) self.addCleanup(self.os_list_patcher.stop) self.os_list_patcher.start() def tearDown(self): super(CassandraBackupTest, self).tearDown() @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_encrypted_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(True, True) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ) + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) self.assertIn(".gz.enc", bkp.manifest) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_not_encrypted_not_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(False, False) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ), bkp.command) self.assertNotIn(".gz.enc", bkp.manifest) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_not_encrypted_but_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(False, True) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ) + PIPE + ZIP, bkp.command) self.assertIn(".gz", bkp.manifest) self.assertNotIn(".enc", bkp.manifest) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_backup_encrypted_but_not_zipped_nodetoolsnapshot_command(self, _): bkp = self._build_backup_runner(True, False) bkp._run_pre_backup() self.assertIsNotNone(bkp) self.assertEqual(self._BASE_BACKUP_CMD % ( self._SNAPSHOT_NAME, self._DATA_DIR, '" "'.join(self._SNAPSHOT_FILES) ) + PIPE + ENCRYPT, bkp.command) self.assertIn(".enc", bkp.manifest) self.assertNotIn(".gz", bkp.manifest) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_restore_encrypted_but_not_zipped_nodetoolsnapshot_command( self, mock_logging, _): restoreBase.RestoreRunner.is_zipped = False restoreBase.RestoreRunner.is_encrypted = True restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(RESTORE_NODETOOLSNAPSHOT_CLS) rstr = RunnerClass(None, restore_location=self._RESTORE_LOCATION, location="filename", checksum="md5") self.assertIsNotNone(rstr) self.assertEqual(self._BASE_RESTORE_CMD % self._RESTORE_LOCATION, rstr.base_restore_cmd % self._RESTORE_LOCATION) @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def _build_backup_runner(self, is_encrypted, is_zipped, _): backupBase.BackupRunner.is_zipped = is_zipped backupBase.BackupRunner.is_encrypted = is_encrypted backupBase.BackupRunner.encrypt_key = CRYPTO_KEY RunnerClass = utils.import_class(BACKUP_NODETOOLSNAPSHOT_CLS) runner = RunnerClass(self._SNAPSHOT_NAME) runner._remove_snapshot = mock.MagicMock() runner._snapshot_all_keyspaces = mock.MagicMock() runner._find_in_subdirectories = mock.MagicMock( return_value=self._SNAPSHOT_FILES ) return runner class CouchbaseBackupTests(trove_testtools.TestCase): def setUp(self): super(CouchbaseBackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', return_value=('0', '')) self.exec_timeout_patch.start() self.backup_runner = utils.import_class(BACKUP_CBBACKUP_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) def tearDown(self): super(CouchbaseBackupTests, self).tearDown() self.backup_runner_patch.stop() self.exec_timeout_patch.stop() def test_backup_success(self): backup_runner_mocks = self.backup_runner_patch.start() with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): backup_runner_mocks = self.backup_runner_patch.start() backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) class CouchbaseRestoreTests(trove_testtools.TestCase): def setUp(self): super(CouchbaseRestoreTests, self).setUp() self.restore_runner = utils.import_class( RESTORE_CBBACKUP_CLS)( 'swift', location='http://some.where', checksum='True_checksum', restore_location='/tmp/somewhere') def tearDown(self): super(CouchbaseRestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.pre_restore = mock.Mock() self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_pre_restore(self): self.restore_runner.post_restore = mock.Mock() self.restore_runner.pre_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner._run_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) def test_restore_failed_due_to_run_restore(self): self.restore_runner.pre_restore = mock.Mock() self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class MongodbBackupTests(trove_testtools.TestCase): def setUp(self): super(MongodbBackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', return_value=('0', '')) self.exec_timeout_mock = self.exec_timeout_patch.start() self.addCleanup(self.exec_timeout_patch.stop) self.init_overrides_dir_patch = patch.object( ImportOverrideStrategy, '_initialize_import_directory') self.init_overrides_dir_mock = self.init_overrides_dir_patch.start() self.addCleanup(self.init_overrides_dir_patch.stop) self.backup_runner = utils.import_class(BACKUP_MONGODUMP_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) self.backup_runner_mocks = self.backup_runner_patch.start() self.addCleanup(self.backup_runner_patch.stop) def tearDown(self): super(MongodbBackupTests, self).tearDown() def test_backup_success(self): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): self.backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual( 0, self.backup_runner_mocks['_run_post_backup'].call_count) class MongodbRestoreTests(trove_testtools.TestCase): @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, _): super(MongodbRestoreTests, self).setUp() self.patch_ope = patch('os.path.expanduser', return_value='/tmp/mongo') self.mock_ope = self.patch_ope.start() self.addCleanup(self.patch_ope.stop) self.restore_runner = utils.import_class( RESTORE_MONGODUMP_CLS)('swift', location='http://some.where', checksum='True_checksum', restore_location='/var/lib/somewhere') def tearDown(self): super(MongodbRestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.pre_restore = mock.Mock() self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_pre_restore(self): self.restore_runner.post_restore = mock.Mock() self.restore_runner.pre_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner._run_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) def test_restore_failed_due_to_run_restore(self): self.restore_runner.pre_restore = mock.Mock() self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class RedisBackupTests(trove_testtools.TestCase): def setUp(self): super(RedisBackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', return_value=('0', '')) self.exec_timeout_patch.start() self.addCleanup(self.exec_timeout_patch.stop) self.conf_man_patch = patch.object( configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) self.conf_man_patch.start() self.addCleanup(self.conf_man_patch.stop) self.backup_runner = utils.import_class(BACKUP_REDIS_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) self.backup_runner_mocks = self.backup_runner_patch.start() self.addCleanup(self.backup_runner_patch.stop) def tearDown(self): super(RedisBackupTests, self).tearDown() @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_backup_success(self): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.backup_runner_mocks['_run_post_backup'].assert_called_once_with() @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_backup_failed_due_to_run_backup(self): self.backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() self.backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual( 0, self.backup_runner_mocks['_run_post_backup'].call_count) class RedisRestoreTests(trove_testtools.TestCase): @patch.object(RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def setUp(self): super(RedisRestoreTests, self).setUp() self.conf_man_patch = patch.object( configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) self.conf_man_patch.start() self.addCleanup(self.conf_man_patch.stop) self.os_patch = patch.multiple(operating_system, chown=DEFAULT, create_directory=DEFAULT) self.os_patch.start() self.addCleanup(self.os_patch.stop) self.restore_runner = utils.import_class( RESTORE_REDIS_CLS)('swift', location='http://some.where', checksum='True_checksum', restore_location='/var/lib/somewhere') self.restore_runner_patch = patch.multiple( self.restore_runner, _run_restore=DEFAULT, pre_restore=DEFAULT, post_restore=DEFAULT) self.restore_runner_mocks = self.restore_runner_patch.start() self.expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=self.expected_content_length) self.addCleanup(self.restore_runner_patch.stop) def tearDown(self): super(RedisRestoreTests, self).tearDown() def test_restore_success(self): actual_content_length = self.restore_runner.restore() self.assertEqual( self.expected_content_length, actual_content_length) def test_restore_failed_due_to_pre_restore(self): self.restore_runner_mocks['pre_restore'].side_effect = ( exception.ProcessExecutionError('Error')) self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) def test_restore_failed_due_to_run_restore(self): self.restore_runner._run_restore.side_effect = ( exception.ProcessExecutionError('Error')) self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class PostgresqlBackupTests(trove_testtools.TestCase): def setUp(self): super(PostgresqlBackupTests, self).setUp() self.bkutil = PgBaseBackupUtil() self.b1 = ['000000010000000000000003', '000000010000000000000004', '000000010000000000000005', '000000010000000000000006', '000000010000000000000006.00000168.backup'] self.b2 = ['000000010000000000000007', '000000010000000000000008', '000000010000000000000009', '000000010000000000000010', '000000010000000000000009.0008A168.backup'] def tearDown(self): super(PostgresqlBackupTests, self).tearDown() def test_check_most_recent_backup(self): with patch.object(os, 'listdir', return_value=self.b1): mrb = self.bkutil.most_recent_backup_file() self.assertEqual(mrb, self.b1[4]) mrbfile = self.bkutil.most_recent_backup_wal() self.assertEqual(mrbfile, self.b1[3]) with patch.object(os, 'listdir', return_value=self.b1 + self.b2): mrb = self.bkutil.most_recent_backup_file() self.assertEqual(mrb, self.b2[4]) mrbfile = self.bkutil.most_recent_backup_wal() self.assertEqual(mrbfile, self.b2[2]) def test_check_most_recent_wal_list(self): with patch.object(os, 'listdir', return_value=self.b1): logs = self.bkutil.log_files_since_last_backup() self.assertEqual(logs, [self.b1[3]]) with patch.object(os, 'listdir', return_value=self.b2): logs = self.bkutil.log_files_since_last_backup() self.assertEqual(logs, [self.b2[2], self.b2[3]]) with patch.object(os, 'listdir', return_value=self.b1 + self.b2): logs = self.bkutil.log_files_since_last_backup() self.assertEqual(logs, [self.b2[2], self.b2[3]]) class DB2BackupTests(trove_testtools.TestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2_service, 'run_command') @patch.object(db2_service.DB2App, 'process_default_dbm_config') def setUp(self, *args, **kwargs): super(DB2BackupTests, self).setUp() self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout') self.exec_timeout_patch.start() self.exec_list_database = patch.object(db2_impl.DB2Backup, 'list_dbnames') self.exec_list_database.start() self.backup_runner = utils.import_class(BACKUP_DB2_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) def tearDown(self): super(DB2BackupTests, self).tearDown() self.backup_runner_patch.stop() self.exec_list_database.stop() self.exec_timeout_patch.stop() def test_backup_success(self): backup_runner_mocks = self.backup_runner_patch.start() with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): backup_runner_mocks = self.backup_runner_patch.start() backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test')) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) class DB2RestoreTests(trove_testtools.TestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2_service, 'run_command') @patch.object(db2_service.DB2App, 'process_default_dbm_config') def setUp(self, *args, **kwargs): super(DB2RestoreTests, self).setUp() self.restore_runner = utils.import_class( RESTORE_DB2_CLS)('swift', location='http://some.where', checksum='True_checksum', restore_location='/var/lib/somewhere') def tearDown(self): super(DB2RestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_run_restore(self): self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class CouchDBBackupTests(trove_testtools.TestCase): def setUp(self): super(CouchDBBackupTests, self).setUp() self.backup_runner = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) self.backup_runner_patch = patch.multiple( self.backup_runner, _run=DEFAULT, _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) def tearDown(self): super(CouchDBBackupTests, self).tearDown() self.backup_runner_patch.stop() def test_backup_success(self): backup_runner_mocks = self.backup_runner_patch.start() with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() backup_runner_mocks['_run_post_backup'].assert_called_once_with() def test_backup_failed_due_to_run_backup(self): backup_runner_mocks = self.backup_runner_patch.start() backup_runner_mocks['_run'].configure_mock( side_effect=exception.TroveError('test') ) with ExpectedException(exception.TroveError, 'test'): with self.backup_runner(12345): pass backup_runner_mocks['_run_pre_backup'].assert_called_once_with() backup_runner_mocks['_run'].assert_called_once_with() self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) class CouchDBRestoreTests(trove_testtools.TestCase): def setUp(self): super(CouchDBRestoreTests, self).setUp() self.restore_runner = utils.import_class( RESTORE_COUCHDB_BACKUP_CLS)( 'swift', location='http://some.where', checksum='True_checksum', restore_location='/tmp/somewhere') def tearDown(self): super(CouchDBRestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.pre_restore = mock.Mock() self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_run_restore(self): self.restore_runner.pre_restore = mock.Mock() self.restore_runner._run_restore = mock.Mock( side_effect=exception.ProcessExecutionError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self.restore_runner.restore) class MySQLRestoreTests(trove_testtools.TestCase): def setUp(self): super(MySQLRestoreTests, self).setUp() self.restore_runner = utils.import_class( RESTORE_XTRA_CLS)( 'swift', location='http://some.where', checksum='True_checksum', restore_location='/tmp/somewhere') def tearDown(self): super(MySQLRestoreTests, self).tearDown() def test_restore_success(self): expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=expected_content_length) self.restore_runner.pre_restore = mock.Mock() self.restore_runner.post_restore = mock.Mock() actual_content_length = self.restore_runner.restore() self.assertEqual( expected_content_length, actual_content_length) def test_restore_failed_due_to_run_restore(self): self.restore_runner.pre_restore = mock.Mock() self.restore_runner._run_restore = mock.Mock( side_effect=restoreBase.RestoreError('Error')) self.restore_runner.post_restore = mock.Mock() self.assertRaises(restoreBase.RestoreError, self.restore_runner.restore) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_cassandra_manager.py0000644000175000017500000011165600000000000027734 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random import string from mock import ANY from mock import call from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import NonCallableMagicMock from mock import patch from oslo_utils import netutils from testtools import ExpectedException from trove.common.db.cassandra import models from trove.common import exception from trove.common.instance import ServiceStatuses from trove.guestagent import backup from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import ( manager as cass_manager) from trove.guestagent.datastore.experimental.cassandra import ( service as cass_service) from trove.guestagent import pkg from trove.guestagent import volume from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest from trove.tests.unittests import trove_testtools class GuestAgentCassandraDBManagerTest(DatastoreManagerTest): __MOUNT_POINT = '/var/lib/cassandra' __N_GAK = '_get_available_keyspaces' __N_GLU = '_get_listed_users' __N_BU = '_build_user' __N_RU = '_rename_user' __N_AUP = '_alter_user_password' __N_CAU = 'trove.common.db.cassandra.models.CassandraUser' __N_CU = '_create_user' __N_GFA = '_grant_full_access_on_keyspace' __N_DU = '_drop_user' __ACCESS_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT') __CREATE_DB_FORMAT = ( "CREATE KEYSPACE \"{}\" WITH REPLICATION = " "{{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};" ) __DROP_DB_FORMAT = "DROP KEYSPACE \"{}\";" __CREATE_USR_FORMAT = "CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;" __ALTER_USR_FORMAT = "ALTER USER '{}' WITH PASSWORD %s;" __DROP_USR_FORMAT = "DROP USER '{}';" __GRANT_FORMAT = "GRANT {} ON KEYSPACE \"{}\" TO '{}';" __REVOKE_FORMAT = "REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';" __LIST_PERMISSIONS_FORMAT = "LIST ALL PERMISSIONS NORECURSIVE;" __LIST_PERMISSIONS_OF_FORMAT = "LIST ALL PERMISSIONS OF '{}' NORECURSIVE;" __LIST_DB_FORMAT = "SELECT * FROM system.schema_keyspaces;" __LIST_USR_FORMAT = "LIST USERS;" @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def setUp(self, *args, **kwargs): super(GuestAgentCassandraDBManagerTest, self).setUp('cassandra') conn_patcher = patch.multiple(cass_service.CassandraConnection, _connect=DEFAULT, is_active=Mock(return_value=True)) self.addCleanup(conn_patcher.stop) conn_patcher.start() self.real_status = cass_service.CassandraAppStatus.set_status class FakeInstanceServiceStatus(object): status = ServiceStatuses.NEW def save(self): pass cass_service.CassandraAppStatus.set_status = MagicMock( return_value=FakeInstanceServiceStatus()) self.context = trove_testtools.TroveTestContext(self) self.manager = cass_manager.Manager() self.manager._app = cass_service.CassandraApp() self.manager._admin = cass_service.CassandraAdmin( models.CassandraUser('Test')) self.admin = self.manager._admin self.admin._CassandraAdmin__client = MagicMock() self.conn = self.admin._CassandraAdmin__client self.pkg = cass_service.packager self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_db = cass_service.CassandraApp.stop_db self.origin_start_db = cass_service.CassandraApp.start_db self.origin_install_db = cass_service.CassandraApp._install_db self.original_get_ip = netutils.get_my_ipv4 self.orig_make_host_reachable = ( cass_service.CassandraApp.apply_initial_guestagent_configuration) def tearDown(self): super(GuestAgentCassandraDBManagerTest, self).tearDown() cass_service.packager = self.pkg os.path.exists = self.origin_os_path_exists volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.mount_points = self.origin_mount_points cass_service.CassandraApp.stop_db = self.origin_stop_db cass_service.CassandraApp.start_db = self.origin_start_db cass_service.CassandraApp._install_db = self.origin_install_db netutils.get_my_ipv4 = self.original_get_ip cass_service.CassandraApp.apply_initial_guestagent_configuration = ( self.orig_make_host_reachable) cass_service.CassandraAppStatus.set_status = self.real_status def test_update_status(self): mock_status = MagicMock() mock_status.is_installed = True mock_status._is_restarting = False self.manager._app.status = mock_status self.manager.update_status(self.context) self.assertTrue(mock_status.set_status.called) def test_prepare_pkg(self): self._prepare_dynamic(['cassandra']) def test_prepare_no_pkg(self): self._prepare_dynamic([]) def test_prepare_db_not_installed(self): self._prepare_dynamic([], is_db_installed=False) def test_prepare_db_not_installed_no_package(self): self._prepare_dynamic([], is_db_installed=True) @patch.object(backup, 'restore') def test_prepare_db_restore(self, restore): backup_info = {'id': 'backup_id', 'instance_id': 'fake-instance-id', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum'} self._prepare_dynamic(['cassandra'], is_db_installed=False, backup_info=backup_info) restore.assert_called_once_with( self.context, backup_info, self.__MOUNT_POINT) @patch.multiple(operating_system, enable_service_on_boot=DEFAULT, disable_service_on_boot=DEFAULT) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_superuser_password_reset( self, _, enable_service_on_boot, disable_service_on_boot): fake_status = MagicMock() fake_status.is_running = False test_app = cass_service.CassandraApp() test_app.status = fake_status with patch.multiple( test_app, start_db=DEFAULT, stop_db=DEFAULT, restart=DEFAULT, _CassandraApp__disable_remote_access=DEFAULT, _CassandraApp__enable_remote_access=DEFAULT, _CassandraApp__disable_authentication=DEFAULT, _CassandraApp__enable_authentication=DEFAULT, _CassandraApp__reset_user_password_to_default=DEFAULT, secure=DEFAULT) as calls: test_app._reset_admin_password() disable_service_on_boot.assert_called_once_with( test_app.service_candidates) calls[ '_CassandraApp__disable_remote_access' ].assert_called_once_with() calls[ '_CassandraApp__disable_authentication' ].assert_called_once_with() calls['start_db'].assert_called_once_with(update_db=False, enable_on_boot=False), calls[ '_CassandraApp__enable_authentication' ].assert_called_once_with() pw_reset_mock = calls[ '_CassandraApp__reset_user_password_to_default' ] pw_reset_mock.assert_called_once_with(test_app._ADMIN_USER) calls['secure'].assert_called_once_with( update_user=pw_reset_mock.return_value) calls['restart'].assert_called_once_with() calls['stop_db'].assert_called_once_with() calls[ '_CassandraApp__enable_remote_access' ].assert_called_once_with() enable_service_on_boot.assert_called_once_with( test_app.service_candidates) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_change_cluster_name(self, _): fake_status = MagicMock() fake_status.is_running = True test_app = cass_service.CassandraApp() test_app.status = fake_status with patch.multiple( test_app, start_db=DEFAULT, stop_db=DEFAULT, restart=DEFAULT, _update_cluster_name_property=DEFAULT, _CassandraApp__reset_cluster_name=DEFAULT) as calls: sample_name = NonCallableMagicMock() test_app.change_cluster_name(sample_name) calls['_CassandraApp__reset_cluster_name'].assert_called_once_with( sample_name) calls['_update_cluster_name_property'].assert_called_once_with( sample_name) calls['restart'].assert_called_once_with() @patch.object(cass_service, 'CONF', DEFAULT) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_apply_post_restore_updates(self, _, conf_mock): fake_status = MagicMock() fake_status.is_running = False test_app = cass_service.CassandraApp() test_app.status = fake_status with patch.multiple( test_app, start_db=DEFAULT, stop_db=DEFAULT, _update_cluster_name_property=DEFAULT, _reset_admin_password=DEFAULT, change_cluster_name=DEFAULT) as calls: backup_info = {'instance_id': 'old_id'} conf_mock.guest_id = 'new_id' test_app._apply_post_restore_updates(backup_info) calls['_update_cluster_name_property'].assert_called_once_with( 'old_id') calls['_reset_admin_password'].assert_called_once_with() calls['start_db'].assert_called_once_with(update_db=False) calls['change_cluster_name'].assert_called_once_with('new_id') calls['stop_db'].assert_called_once_with() def _prepare_dynamic(self, packages, config_content='MockContent', device_path='/dev/vdb', is_db_installed=True, backup_info=None, is_root_enabled=False, overrides=None): mock_status = MagicMock() mock_app = MagicMock() mock_app.status = mock_status self.manager._app = mock_app mock_status.begin_install = MagicMock(return_value=None) mock_app.install_if_needed = MagicMock(return_value=None) mock_app.init_storage_structure = MagicMock(return_value=None) mock_app.write_config = MagicMock(return_value=None) mock_app.apply_initial_guestagent_configuration = MagicMock( return_value=None) mock_app.restart = MagicMock(return_value=None) mock_app.start_db = MagicMock(return_value=None) mock_app.stop_db = MagicMock(return_value=None) mock_app._remove_system_tables = MagicMock(return_value=None) os.path.exists = MagicMock(return_value=True) volume.VolumeDevice.format = MagicMock(return_value=None) volume.VolumeDevice.migrate_data = MagicMock(return_value=None) volume.VolumeDevice.mount = MagicMock(return_value=None) volume.VolumeDevice.mount_points = MagicMock(return_value=[]) with patch.object(pkg.Package, 'pkg_is_installed', return_value=is_db_installed): # invocation self.manager.prepare(context=self.context, packages=packages, config_contents=config_content, databases=None, memory_mb='2048', users=None, device_path=device_path, mount_point=self.__MOUNT_POINT, backup_info=backup_info, overrides=None, cluster_config=None) # verification/assertion mock_status.begin_install.assert_any_call() mock_app.install_if_needed.assert_any_call(packages) mock_app._remove_system_tables.assert_any_call() mock_app.init_storage_structure.assert_any_call('/var/lib/cassandra') mock_app.apply_initial_guestagent_configuration.assert_any_call( cluster_name=None) mock_app.start_db.assert_any_call(update_db=False) mock_app.stop_db.assert_any_call() if backup_info: mock_app._apply_post_restore_updates.assert_called_once_with( backup_info) def test_keyspace_validation(self): valid_name = self._get_random_name(32) db = models.CassandraSchema(valid_name) self.assertEqual(valid_name, db.name) with ExpectedException(ValueError): models.CassandraSchema(self._get_random_name(33)) def test_user_validation(self): valid_name = self._get_random_name(65535) usr = models.CassandraUser(valid_name, 'password') self.assertEqual(valid_name, usr.name) self.assertEqual('password', usr.password) with ExpectedException(ValueError): models.CassandraUser(self._get_random_name(65536)) @classmethod def _serialize_collection(cls, *collection): return [item.serialize() for item in collection] @classmethod def _get_random_name(cls, size, chars=string.ascii_letters + string.digits): return ''.join(random.choice(chars) for _ in range(size)) def test_create_database(self): db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') db3 = models.CassandraSchema(self._get_random_name(32)) self.manager.create_database(self.context, self._serialize_collection(db1, db2, db3)) self.conn.execute.assert_has_calls([ call(self.__CREATE_DB_FORMAT, (db1.name,)), call(self.__CREATE_DB_FORMAT, (db2.name,)), call(self.__CREATE_DB_FORMAT, (db3.name,)) ]) def test_delete_database(self): db = models.CassandraSchema(self._get_random_name(32)) self.manager.delete_database(self.context, db.serialize()) self.conn.execute.assert_called_once_with( self.__DROP_DB_FORMAT, (db.name,)) def test_create_user(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2', '') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') self.manager.create_user(self.context, self._serialize_collection(usr1, usr2, usr3)) self.conn.execute.assert_has_calls([ call(self.__CREATE_USR_FORMAT, (usr1.name,), (usr1.password,)), call(self.__CREATE_USR_FORMAT, (usr2.name,), (usr2.password,)), call(self.__CREATE_USR_FORMAT, (usr3.name,), (usr3.password,)) ]) def test_delete_user(self): usr = models.CassandraUser(self._get_random_name(1025), 'password') self.manager.delete_user(self.context, usr.serialize()) self.conn.execute.assert_called_once_with( self.__DROP_USR_FORMAT, (usr.name,)) def test_change_passwords(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2', '') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') self.manager.change_passwords(self.context, self._serialize_collection( usr1, usr2, usr3)) self.conn.execute.assert_has_calls([ call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)), call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)), call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,)) ]) def test_alter_user_password(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2', '') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') self.admin.alter_user_password(usr1) self.admin.alter_user_password(usr2) self.admin.alter_user_password(usr3) self.conn.execute.assert_has_calls([ call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)), call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)), call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,)) ]) def test_grant_access(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr1', 'password') db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') db3 = models.CassandraSchema('db3') self.manager.grant_access(self.context, usr1.name, None, [db1.name, db2.name]) self.manager.grant_access(self.context, usr2.name, None, [db3.name]) expected = [] for modifier in self.__ACCESS_MODIFIERS: expected.append(call(self.__GRANT_FORMAT, (modifier, db1.name, usr1.name))) expected.append(call(self.__GRANT_FORMAT, (modifier, db3.name, usr2.name))) self.conn.execute.assert_has_calls( expected, any_order=True) def test_revoke_access(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr1', 'password') db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') self.manager.revoke_access(self.context, usr1.name, None, db1.name) self.manager.revoke_access(self.context, usr2.name, None, db2.name) self.conn.execute.assert_has_calls([ call(self.__REVOKE_FORMAT, (db1.name, usr1.name)), call(self.__REVOKE_FORMAT, (db2.name, usr2.name)) ]) def test_get_available_keyspaces(self): self.manager.list_databases(self.context) self.conn.execute.assert_called_once_with( self.__LIST_DB_FORMAT) def test_list_databases(self): db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') db3 = models.CassandraSchema(self._get_random_name(32)) with patch.object(self.admin, self.__N_GAK, return_value={db1, db2, db3}): found = self.manager.list_databases(self.context) self.assertEqual(2, len(found)) self.assertEqual(3, len(found[0])) self.assertIsNone(found[1]) self.assertIn(db1.serialize(), found[0]) self.assertIn(db2.serialize(), found[0]) self.assertIn(db3.serialize(), found[0]) with patch.object(self.admin, self.__N_GAK, return_value=set()): found = self.manager.list_databases(self.context) self.assertEqual(([], None), found) def test_get_acl(self): r0 = NonCallableMagicMock(username='user1', resource='', permission='SELECT') r1 = NonCallableMagicMock(username='user2', resource='', permission='SELECT') r2 = NonCallableMagicMock(username='user2', resource='', permission='SELECT') r3 = NonCallableMagicMock(username='user2', resource='', permission='ALTER') r4 = NonCallableMagicMock(username='user3', resource='
', permission='SELECT') r5 = NonCallableMagicMock(username='user3', resource='', permission='ALTER') r6 = NonCallableMagicMock(username='user3', resource='', permission='') r7 = NonCallableMagicMock(username='user3', resource='', permission='') r8 = NonCallableMagicMock(username='user3', resource='', permission='DELETE') r9 = NonCallableMagicMock(username='user4', resource='', permission='UPDATE') r10 = NonCallableMagicMock(username='user4', resource='', permission='DELETE') available_ks = {models.CassandraSchema('ks1'), models.CassandraSchema('ks2'), models.CassandraSchema('ks3')} mock_result_set = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r9, r9, r10] execute_mock = MagicMock(return_value=mock_result_set) mock_client = MagicMock(execute=execute_mock) with patch.object(self.admin, self.__N_GAK, return_value=available_ks) as gak_mock: acl = self.admin._get_acl(mock_client) execute_mock.assert_called_once_with( self.__LIST_PERMISSIONS_FORMAT) gak_mock.assert_called_once_with(mock_client) self.assertEqual({'user1': {'ks1': {'SELECT'}, 'ks2': {'SELECT'}, 'ks3': {'SELECT'}}, 'user2': {'ks1': {'SELECT'}, 'ks2': {'SELECT', 'ALTER'}}, 'user3': {'ks1': {'DELETE'}}, 'user4': {'ks1': {'UPDATE', 'DELETE'}, 'ks2': {'UPDATE'}, 'ks3': {'UPDATE'}} }, acl) mock_result_set = [r1, r2, r3] execute_mock = MagicMock(return_value=mock_result_set) mock_client = MagicMock(execute=execute_mock) with patch.object(self.admin, self.__N_GAK, return_value=available_ks) as gak_mock: acl = self.admin._get_acl(mock_client, username='user2') execute_mock.assert_called_once_with( self.__LIST_PERMISSIONS_OF_FORMAT.format('user2')) gak_mock.assert_not_called() self.assertEqual({'user2': {'ks1': {'SELECT'}, 'ks2': {'SELECT', 'ALTER'}}}, acl) mock_result_set = [] execute_mock = MagicMock(return_value=mock_result_set) mock_client = MagicMock(execute=execute_mock) with patch.object(self.admin, self.__N_GAK, return_value=available_ks) as gak_mock: acl = self.admin._get_acl(mock_client, username='nonexisting') execute_mock.assert_called_once_with( self.__LIST_PERMISSIONS_OF_FORMAT.format('nonexisting')) gak_mock.assert_not_called() self.assertEqual({}, acl) def test_get_listed_users(self): usr1 = models.CassandraUser(self._get_random_name(1025)) usr2 = models.CassandraUser(self._get_random_name(1025)) usr3 = models.CassandraUser(self._get_random_name(1025)) db1 = models.CassandraSchema('db1') db2 = models.CassandraSchema('db2') usr1.databases.append(db1.serialize()) usr3.databases.append(db2.serialize()) rv_1 = NonCallableMagicMock() rv_1.configure_mock(name=usr1.name, super=False) rv_2 = NonCallableMagicMock() rv_2.configure_mock(name=usr2.name, super=False) rv_3 = NonCallableMagicMock() rv_3.configure_mock(name=usr3.name, super=True) with patch.object(self.conn, 'execute', return_value=iter( [rv_1, rv_2, rv_3])): with patch.object(self.admin, '_get_acl', return_value={usr1.name: {db1.name: {'SELECT'}, db2.name: {}}, usr3.name: {db2.name: {'SELECT'}}} ): usrs = self.manager.list_users(self.context) self.conn.execute.assert_has_calls([ call(self.__LIST_USR_FORMAT), ], any_order=True) self.assertIn(usr1.serialize(), usrs[0]) self.assertIn(usr2.serialize(), usrs[0]) self.assertIn(usr3.serialize(), usrs[0]) def test_list_access(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') db1 = models.CassandraSchema('db1').serialize() db2 = models.CassandraSchema('db2').serialize() usr2.databases.append(db1) usr3.databases.append(db1) usr3.databases.append(db2) with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, usr3}): usr1_dbs = self.manager.list_access(self.context, usr1.name, None) usr2_dbs = self.manager.list_access(self.context, usr2.name, None) usr3_dbs = self.manager.list_access(self.context, usr3.name, None) self.assertEqual([], usr1_dbs) self.assertEqual([db1], usr2_dbs) self.assertEqual([db1, db2], usr3_dbs) with patch.object(self.admin, self.__N_GLU, return_value=set()): with ExpectedException(exception.UserNotFound): self.manager.list_access(self.context, usr3.name, None) def test_list_users(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, usr3}): found = self.manager.list_users(self.context) self.assertEqual(2, len(found)) self.assertEqual(3, len(found[0])) self.assertIsNone(found[1]) self.assertIn(usr1.serialize(), found[0]) self.assertIn(usr2.serialize(), found[0]) self.assertIn(usr3.serialize(), found[0]) with patch.object(self.admin, self.__N_GLU, return_value=set()): self.assertEqual(([], None), self.manager.list_users(self.context)) def test_get_user(self): usr1 = models.CassandraUser('usr1') usr2 = models.CassandraUser('usr2') usr3 = models.CassandraUser(self._get_random_name(1025), 'password') with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, usr3}): found = self.manager.get_user(self.context, usr2.name, None) self.assertEqual(usr2.serialize(), found) with patch.object(self.admin, self.__N_GLU, return_value=set()): self.assertIsNone( self.manager.get_user(self.context, usr2.name, None)) @patch.object(cass_service.CassandraAdmin, '_deserialize_keyspace', side_effect=lambda p1: p1) def test_rename_user(self, ks_deserializer): usr = models.CassandraUser('usr') db1 = models.CassandraSchema('db1').serialize() db2 = models.CassandraSchema('db2').serialize() usr.databases.append(db1) usr.databases.append(db2) new_user = models.CassandraUser('new_user') with patch(self.__N_CAU, return_value=new_user): with patch.object(self.admin, self.__N_BU, return_value=usr): with patch.object(self.admin, self.__N_CU) as create: with patch.object(self.admin, self.__N_GFA) as grant: with patch.object(self.admin, self.__N_DU) as drop: usr_attrs = {'name': 'user', 'password': 'trove'} self.manager.update_attributes(self.context, usr.name, None, usr_attrs) create.assert_called_once_with(ANY, new_user) grant.assert_has_calls([call(ANY, db1, ANY), call(ANY, db2, ANY)]) drop.assert_called_once_with(ANY, usr) def test_update_attributes(self): usr = models.CassandraUser('usr', 'pwd') with patch.object(self.admin, self.__N_BU, return_value=usr): usr_attrs = {'name': usr.name, 'password': usr.password} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) self.assertEqual(0, rename.call_count) self.assertEqual(0, alter.call_count) usr_attrs = {'name': 'user', 'password': 'password'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) rename.assert_called_once_with(ANY, usr, usr_attrs['name'], usr_attrs['password']) self.assertEqual(0, alter.call_count) usr_attrs = {'name': 'user', 'password': usr.password} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) rename.assert_called_once_with(ANY, usr, usr_attrs['name'], usr_attrs['password']) self.assertEqual(0, alter.call_count) usr_attrs = {'name': 'user'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: with ExpectedException( exception.UnprocessableEntity, "Updating username " "requires specifying a password as well."): self.manager.update_attributes(self.context, usr.name, None, usr_attrs) self.assertEqual(0, rename.call_count) self.assertEqual(0, alter.call_count) usr_attrs = {'name': usr.name, 'password': 'password'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) alter.assert_called_once_with(ANY, usr) self.assertEqual(0, rename.call_count) usr_attrs = {'password': usr.password} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) self.assertEqual(0, rename.call_count) self.assertEqual(0, alter.call_count) usr_attrs = {'password': 'trove'} with patch.object(self.admin, self.__N_RU) as rename: with patch.object(self.admin, self.__N_AUP) as alter: self.manager.update_attributes(self.context, usr.name, None, usr_attrs) alter.assert_called_once_with(ANY, usr) self.assertEqual(0, rename.call_count) def test_update_overrides(self): cfg_mgr_mock = MagicMock() self.manager._app.configuration_manager = cfg_mgr_mock overrides = NonCallableMagicMock() self.manager.update_overrides(Mock(), overrides) cfg_mgr_mock.apply_user_override.assert_called_once_with(overrides) cfg_mgr_mock.remove_user_override.assert_not_called() def test_remove_overrides(self): cfg_mgr_mock = MagicMock() self.manager._app.configuration_manager = cfg_mgr_mock self.manager.update_overrides(Mock(), {}, remove=True) cfg_mgr_mock.remove_user_override.assert_called_once_with() cfg_mgr_mock.apply_user_override.assert_not_called() def test_apply_overrides(self): self.assertIsNone( self.manager.apply_overrides(Mock(), NonCallableMagicMock())) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') @patch.object(cass_service.CassandraApp, '_run_nodetool_command') def test_drain(self, command_runner_mock, _): self.manager._app.drain() command_runner_mock.assert_called_once_with('drain') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') @patch.object(cass_service.CassandraApp, '_run_nodetool_command') def test_upgrade_sstables(self, command_runner_mock, _): self.manager._app.upgrade_sstables() command_runner_mock.assert_called_once_with('upgradesstables') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_enable_root(self, _): with patch.object(self.manager._app, 'is_root_enabled', return_value=False): with patch.object(cass_service.CassandraAdmin, '_create_superuser') as create_mock: self.manager.enable_root(self.context) create_mock.assert_called_once_with(ANY) with patch.object(self.manager._app, 'is_root_enabled', return_value=True): with patch.object(cass_service.CassandraAdmin, 'alter_user_password') as alter_mock: self.manager.enable_root(self.context) alter_mock.assert_called_once_with(ANY) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_is_root_enabled(self, _): trove_admin = Mock() trove_admin.configure_mock(name=self.manager._app._ADMIN_USER) other_admin = Mock() other_admin.configure_mock(name='someuser') with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[]): self.assertFalse(self.manager.is_root_enabled(self.context)) with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[trove_admin]): self.assertFalse(self.manager.is_root_enabled(self.context)) with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[other_admin]): self.assertTrue(self.manager.is_root_enabled(self.context)) with patch.object(cass_service.CassandraAdmin, 'list_superusers', return_value=[trove_admin, other_admin]): self.assertTrue(self.manager.is_root_enabled(self.context)) def test_guest_log_enable(self): self._assert_guest_log_enable(False, 'INFO') self._assert_guest_log_enable(True, 'OFF') def _assert_guest_log_enable(self, disable, expected_level): with patch.multiple( self.manager._app, logback_conf_manager=DEFAULT, _run_nodetool_command=DEFAULT ) as app_mocks: self.assertFalse(self.manager.guest_log_enable( Mock(), Mock(), disable)) (app_mocks['logback_conf_manager'].apply_system_override. assert_called_once_with( {'configuration': {'root': {'@level': expected_level}}})) app_mocks['_run_nodetool_command'].assert_called_once_with( 'setlogginglevel', 'root', expected_level) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_configuration.py0000644000175000017500000004737600000000000027161 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import getpass from mock import call from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch import os import tempfile from trove.common.stream_codecs import IniCodec from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common.configuration import OneFileOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.tests.unittests import trove_testtools class TestConfigurationManager(trove_testtools.TestCase): @patch.multiple('trove.guestagent.common.operating_system', read_file=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def test_read_write_configuration(self, read_file, write_file, chown, chmod): sample_path = Mock() sample_owner = Mock() sample_group = Mock() sample_codec = MagicMock() sample_requires_root = Mock() sample_strategy = MagicMock() sample_strategy.configure = Mock() sample_strategy.parse_updates = Mock(return_value={}) manager = ConfigurationManager( sample_path, sample_owner, sample_group, sample_codec, requires_root=sample_requires_root, override_strategy=sample_strategy) manager.parse_configuration() read_file.assert_called_with(sample_path, codec=sample_codec, as_root=sample_requires_root) with patch.object(manager, 'parse_configuration', return_value={'key1': 'v1', 'key2': 'v2'}): self.assertEqual('v1', manager.get_value('key1')) self.assertIsNone(manager.get_value('key3')) sample_contents = Mock() manager.save_configuration(sample_contents) write_file.assert_called_with( sample_path, sample_contents, as_root=sample_requires_root) chown.assert_called_with(sample_path, sample_owner, sample_group, as_root=sample_requires_root) chmod.assert_called_with( sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) sample_data = {} manager.apply_system_override(sample_data) manager.apply_user_override(sample_data) manager.apply_system_override(sample_data, change_id='sys1') manager.apply_user_override(sample_data, change_id='usr1') manager.apply_system_override(sample_data, change_id='sys2', pre_user=True) sample_strategy.apply.has_calls([ call(manager.SYSTEM_POST_USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), call(manager.SYSTEM_POST_USER_GROUP, 'sys1', sample_data), call(manager.USER_GROUP, 'usr1', sample_data), call(manager.SYSTEM_PRE_USER_GROUP, 'sys2', sample_data), ]) class TestConfigurationOverrideStrategy(trove_testtools.TestCase): def setUp(self): trove_testtools.TestCase.setUp(self) self._temp_files_paths = [] self.chmod_patch = patch.object( operating_system, 'chmod', MagicMock(return_value=None)) self.chmod_patch_mock = self.chmod_patch.start() self.addCleanup(self.chmod_patch.stop) def tearDown(self): trove_testtools.TestCase.tearDown(self) # Remove temporary files in the LIFO order. while self._temp_files_paths: try: os.remove(self._temp_files_paths.pop()) except Exception: pass # Do not fail in cleanup. def _create_temp_dir(self): path = tempfile.mkdtemp() self._temp_files_paths.append(path) return path def test_import_override_strategy(self): # Data structures representing overrides. # ('change id', 'values', 'expected import index', # 'expected final import data') # Distinct IDs within each group mean that there is one file for each # override. user_overrides_v1 = ('id1', {'Section_1': {'name': 'sqrt(2)', 'value': '1.4142'}}, 1, {'Section_1': {'name': 'sqrt(2)', 'value': 1.4142}} ) user_overrides_v2 = ('id2', {'Section_1': {'is_number': False}}, 2, {'Section_1': {'is_number': False}} ) system_overrides_v1 = ('id1', {'Section_1': {'name': 'e', 'value': 2.7183}}, 1, {'Section_1': {'name': 'e', 'value': 2.7183}} ) system_overrides_v2 = ('id2', {'Section_2': {'is_number': True}}, 2, {'Section_2': {'is_number': True}} ) self._test_import_override_strategy( [system_overrides_v1, system_overrides_v2], [user_overrides_v1, user_overrides_v2], True) # Same IDs within a group mean that the overrides get written into a # single file. user_overrides_v1 = ('id1', {'Section_1': {'name': 'sqrt(2)', 'value': 1.4142}}, 1, {'Section_1': {'name': 'sqrt(2)', 'is_number': False, 'value': 1.4142}} ) user_overrides_v2 = ('id1', {'Section_1': {'is_number': False}}, 1, {'Section_1': {'name': 'sqrt(2)', 'is_number': False, 'value': 1.4142}} ) system_overrides_v1 = ('id1', {'Section_1': {'name': 'e', 'value': 2.7183}}, 1, {'Section_1': {'name': 'e', 'value': 2.7183}, 'Section_2': {'is_number': True}} ) system_overrides_v2 = ('id1', {'Section_2': {'is_number': True}}, 1, {'Section_1': {'name': 'e', 'value': 2.7183}, 'Section_2': {'is_number': True}} ) self._test_import_override_strategy( [system_overrides_v1, system_overrides_v2], [user_overrides_v1, user_overrides_v2], False) @patch.multiple(operating_system, chmod=Mock(), chown=Mock()) def _test_import_override_strategy( self, system_overrides, user_overrides, test_multi_rev): base_config_contents = {'Section_1': {'name': 'pi', 'is_number': True, 'value': 3.1415} } codec = IniCodec() current_user = getpass.getuser() revision_dir = self._create_temp_dir() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file( base_config.name, base_config_contents, codec) strategy = ImportOverrideStrategy(revision_dir, 'ext') strategy.configure( base_config.name, current_user, current_user, codec, False) self._assert_import_override_strategy( strategy, system_overrides, user_overrides, test_multi_rev) def _assert_import_override_strategy( self, strategy, system_overrides, user_overrides, test_multi_rev): def import_path_builder( root, group_name, change_id, file_index, file_ext): return os.path.join( root, '%s-%03d-%s.%s' % (group_name, file_index, change_id, file_ext)) # Apply and remove overrides sequentially. ########################################## # Apply the overrides and verify the files as they are created. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Verify the files again after applying all overrides. self._assert_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._assert_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Remove the overrides and verify the files are gone. self._remove_import_overrides( strategy, 'user', user_overrides, import_path_builder) self._remove_import_overrides( strategy, 'system', user_overrides, import_path_builder) # Remove a whole group. ########################################## # Apply overrides first. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Remove all user overrides and verify the files are gone. self._remove_import_overrides( strategy, 'user', None, import_path_builder) # Assert that the system files are still there intact. self._assert_import_overrides( strategy, 'system', system_overrides, import_path_builder) # Remove all system overrides and verify the files are gone. self._remove_import_overrides( strategy, 'system', None, import_path_builder) if test_multi_rev: # Remove at the end (only if we have multiple revision files). ########################################## # Apply overrides first. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # Remove the last user and system overrides. self._remove_import_overrides( strategy, 'user', [user_overrides[-1]], import_path_builder) self._remove_import_overrides( strategy, 'system', [system_overrides[-1]], import_path_builder) # Assert that the first overrides are still there intact. self._assert_import_overrides( strategy, 'user', [user_overrides[0]], import_path_builder) self._assert_import_overrides( strategy, 'system', [system_overrides[0]], import_path_builder) # Re-apply all overrides. self._apply_import_overrides( strategy, 'system', system_overrides, import_path_builder) self._apply_import_overrides( strategy, 'user', user_overrides, import_path_builder) # This should overwrite the existing files and resume counting from # their indices. self._assert_import_overrides( strategy, 'user', user_overrides, import_path_builder) self._assert_import_overrides( strategy, 'system', system_overrides, import_path_builder) def _apply_import_overrides( self, strategy, group_name, overrides, path_builder): # Apply the overrides and immediately check the file and its contents. for change_id, contents, index, _ in overrides: strategy.apply(group_name, change_id, contents) expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, True) def _remove_import_overrides( self, strategy, group_name, overrides, path_builder): if overrides: # Remove the overrides and immediately check the file was removed. for change_id, _, index, _ in overrides: strategy.remove(group_name, change_id) expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, False) else: # Remove the entire group. strategy.remove(group_name) found = operating_system.list_files_in_directory( strategy._revision_dir, pattern='^%s-.+$' % group_name) self.assertEqual(set(), found, "Some import files from group '%s' " "were not removed." % group_name) def _assert_import_overrides( self, strategy, group_name, overrides, path_builder): # Check all override files and their contents, for change_id, _, index, expected in overrides: expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, True) # Assert that the file contents. imported = operating_system.read_file( expected_path, codec=strategy._codec) self.assertEqual(expected, imported) def _assert_file_exists(self, file_path, exists): if exists: self.assertTrue(os.path.exists(file_path), "Revision import '%s' does not exist." % file_path) else: self.assertFalse(os.path.exists(file_path), "Revision import '%s' was not removed." % file_path) def test_get_value(self): revision_dir = self._create_temp_dir() self._assert_get_value(ImportOverrideStrategy(revision_dir, 'ext')) self._assert_get_value(OneFileOverrideStrategy(revision_dir)) @patch.multiple(operating_system, chmod=Mock(), chown=Mock()) def _assert_get_value(self, override_strategy): base_config_contents = {'Section_1': {'name': 'pi', 'is_number': True, 'value': 3.1415} } config_overrides_v1a = {'Section_1': {'name': 'sqrt(2)', 'value': 1.4142} } config_overrides_v2 = {'Section_1': {'name': 'e', 'value': 2.7183}, 'Section_2': {'foo': 'bar'} } config_overrides_v1b = {'Section_1': {'name': 'sqrt(4)', 'value': 2.0} } codec = IniCodec() current_user = getpass.getuser() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file( base_config.name, base_config_contents, codec) manager = ConfigurationManager( base_config.name, current_user, current_user, codec, requires_root=False, override_strategy=override_strategy) # Test default value. self.assertIsNone(manager.get_value('Section_2')) self.assertEqual('foo', manager.get_value('Section_2', 'foo')) # Test value before applying overrides. self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual(3.1415, manager.get_value('Section_1')['value']) # Test value after applying overrides. manager.apply_user_override(config_overrides_v1a, change_id='id1') self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) self.assertEqual(1.4142, manager.get_value('Section_1')['value']) manager.apply_user_override(config_overrides_v2, change_id='id2') self.assertEqual('e', manager.get_value('Section_1')['name']) self.assertEqual(2.7183, manager.get_value('Section_1')['value']) self.assertEqual('bar', manager.get_value('Section_2')['foo']) # Editing change 'id1' become visible only after removing # change 'id2', which overrides 'id1'. manager.apply_user_override(config_overrides_v1b, change_id='id1') self.assertEqual('e', manager.get_value('Section_1')['name']) self.assertEqual(2.7183, manager.get_value('Section_1')['value']) # Test value after removing overrides. # The edited values from change 'id1' should be visible after # removing 'id2'. manager.remove_user_override(change_id='id2') self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) self.assertEqual(2.0, manager.get_value('Section_1')['value']) # Back to the base. manager.remove_user_override(change_id='id1') self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual(3.1415, manager.get_value('Section_1')['value']) self.assertIsNone(manager.get_value('Section_2')) # Test system overrides. manager.apply_system_override( config_overrides_v1b, change_id='id1') self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) self.assertEqual(2.0, manager.get_value('Section_1')['value']) # The system values should take precedence over the user # override. manager.apply_user_override( config_overrides_v1a, change_id='id1') self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) self.assertEqual(2.0, manager.get_value('Section_1')['value']) # The user values should become visible only after removing the # system change. manager.remove_system_override(change_id='id1') self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) self.assertEqual(1.4142, manager.get_value('Section_1')['value']) # Back to the base. manager.remove_user_override(change_id='id1') self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual(3.1415, manager.get_value('Section_1')['value']) self.assertIsNone(manager.get_value('Section_2')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_couchbase_manager.py0000644000175000017500000001450600000000000027725 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat import tempfile import mock from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch from oslo_utils import netutils from trove.common import utils from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchbase import ( manager as couch_manager) from trove.guestagent.datastore.experimental.couchbase import ( service as couch_service) from trove.guestagent import volume from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest class GuestAgentCouchbaseManagerTest(DatastoreManagerTest): def setUp(self): super(GuestAgentCouchbaseManagerTest, self).setUp('couchbase') self.manager = couch_manager.Manager() self.packages = 'couchbase-server' app_patcher = patch.multiple( couch_service.CouchbaseApp, stop_db=DEFAULT, start_db=DEFAULT, restart=DEFAULT) self.addCleanup(app_patcher.stop) app_patcher.start() netutils_patcher = patch.object(netutils, 'get_my_ipv4') self.addCleanup(netutils_patcher.stop) netutils_patcher.start() def tearDown(self): super(GuestAgentCouchbaseManagerTest, self).tearDown() def test_update_status(self): mock_status = MagicMock() mock_status.is_installed = True mock_status._is_restarting = False self.manager.appStatus = mock_status self.manager.update_status(self.context) self.assertTrue(mock_status.set_status.called) def test_prepare_device_path_true(self): self._prepare_dynamic() def test_prepare_from_backup(self): self._prepare_dynamic(backup_id='backup_id_123abc') @patch.multiple(couch_service.CouchbaseApp, install_if_needed=DEFAULT, start_db_with_conf_changes=DEFAULT, initial_setup=DEFAULT) @patch.multiple(volume.VolumeDevice, format=DEFAULT, mount=DEFAULT, mount_points=Mock(return_value=[])) @patch.object(backup, 'restore') def _prepare_dynamic(self, device_path='/dev/vdb', backup_id=None, *mocks, **kwmocks): # covering all outcomes is starting to cause trouble here backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'CbBackup', 'checksum': 'fake-checksum'} if backup_id else None mock_status = MagicMock() mock_status.begin_install = MagicMock(return_value=None) self.manager.appStatus = mock_status instance_ram = 2048 mount_point = '/var/lib/couchbase' self.manager.prepare(self.context, self.packages, None, instance_ram, None, device_path=device_path, mount_point=mount_point, backup_info=backup_info, overrides=None, cluster_config=None) # verification/assertion mock_status.begin_install.assert_any_call() kwmocks['install_if_needed'].assert_any_call(self.packages) if backup_info: backup.restore.assert_any_call(self.context, backup_info, mount_point) def test_restart(self): mock_status = MagicMock() self.manager.appStatus = mock_status couch_service.CouchbaseApp.restart = MagicMock(return_value=None) # invocation self.manager.restart(self.context) # verification/assertion couch_service.CouchbaseApp.restart.assert_any_call() def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status couch_service.CouchbaseApp.stop_db = MagicMock(return_value=None) # invocation self.manager.stop_db(self.context) # verification/assertion couch_service.CouchbaseApp.stop_db.assert_any_call( do_not_start_on_reboot=False) def __fake_mkstemp(self): self.tempfd, self.tempname = self.original_mkstemp() return self.tempfd, self.tempname def __fake_mkstemp_raise(self): raise OSError(11, 'Resource temporarily unavailable') def __cleanup_tempfile(self): if self.tempname: os.unlink(self.tempname) @mock.patch.object(utils, 'execute_with_timeout', Mock(return_value=('0', ''))) def test_write_password_to_file1(self): self.original_mkstemp = tempfile.mkstemp self.tempname = None with mock.patch.object(tempfile, 'mkstemp', self.__fake_mkstemp): self.addCleanup(self.__cleanup_tempfile) rootaccess = couch_service.CouchbaseRootAccess() rootaccess.write_password_to_file('mypassword') filepermissions = os.stat(self.tempname).st_mode self.assertEqual(stat.S_IRUSR, filepermissions & 0o777) @mock.patch.object(utils, 'execute_with_timeout', Mock(return_value=('0', ''))) @mock.patch( 'trove.guestagent.datastore.experimental.couchbase.service.LOG') def test_write_password_to_file2(self, mock_logging): self.original_mkstemp = tempfile.mkstemp self.tempname = None with mock.patch.object(tempfile, 'mkstemp', self.__fake_mkstemp_raise): rootaccess = couch_service.CouchbaseRootAccess() self.assertRaises(RuntimeError, rootaccess.write_password_to_file, 'mypassword') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_couchdb_manager.py0000644000175000017500000003337300000000000027403 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from mock import MagicMock from mock import patch from oslo_utils import netutils from testtools.matchers import Is, Equals, Not from trove.common.instance import ServiceStatuses from trove.guestagent import backup from trove.guestagent.datastore.experimental.couchdb import ( manager as couchdb_manager) from trove.guestagent.datastore.experimental.couchdb import ( service as couchdb_service) from trove.guestagent import pkg from trove.guestagent import volume from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest class GuestAgentCouchDBManagerTest(DatastoreManagerTest): def setUp(self): super(GuestAgentCouchDBManagerTest, self).setUp('couchdb') self.real_status = couchdb_service.CouchDBAppStatus.set_status class FakeInstanceServiceStatus(object): status = ServiceStatuses.NEW def save(self): pass couchdb_service.CouchDBAppStatus.set_status = MagicMock( return_value=FakeInstanceServiceStatus()) self.manager = couchdb_manager.Manager() self.pkg = couchdb_service.packager self.real_db_app_status = couchdb_service.CouchDBAppStatus self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_db = couchdb_service.CouchDBApp.stop_db self.origin_start_db = couchdb_service.CouchDBApp.start_db self.original_get_ip = netutils.get_my_ipv4 self.orig_make_host_reachable = ( couchdb_service.CouchDBApp.make_host_reachable) self.orig_backup_restore = backup.restore self.orig_create_users = couchdb_service.CouchDBAdmin.create_user self.orig_delete_user = couchdb_service.CouchDBAdmin.delete_user self.orig_list_users = couchdb_service.CouchDBAdmin.list_users self.orig_get_user = couchdb_service.CouchDBAdmin.get_user self.orig_grant_access = couchdb_service.CouchDBAdmin.grant_access self.orig_revoke_access = couchdb_service.CouchDBAdmin.revoke_access self.orig_list_access = couchdb_service.CouchDBAdmin.list_access self.orig_enable_root = couchdb_service.CouchDBAdmin.enable_root self.orig_is_root_enabled = ( couchdb_service.CouchDBAdmin.is_root_enabled) self.orig_create_databases = ( couchdb_service.CouchDBAdmin.create_database) self.orig_list_databases = couchdb_service.CouchDBAdmin.list_databases self.orig_delete_database = ( couchdb_service.CouchDBAdmin.delete_database) def tearDown(self): super(GuestAgentCouchDBManagerTest, self).tearDown() couchdb_service.packager = self.pkg couchdb_service.CouchDBAppStatus.set_status = self.real_db_app_status os.path.exists = self.origin_os_path_exists volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.mount_points = self.origin_mount_points couchdb_service.CouchDBApp.stop_db = self.origin_stop_db couchdb_service.CouchDBApp.start_db = self.origin_start_db netutils.get_my_ipv4 = self.original_get_ip couchdb_service.CouchDBApp.make_host_reachable = ( self.orig_make_host_reachable) backup.restore = self.orig_backup_restore couchdb_service.CouchDBAdmin.create_user = self.orig_create_users couchdb_service.CouchDBAdmin.delete_user = self.orig_delete_user couchdb_service.CouchDBAdmin.list_users = self.orig_list_users couchdb_service.CouchDBAdmin.get_user = self.orig_get_user couchdb_service.CouchDBAdmin.grant_access = self.orig_grant_access couchdb_service.CouchDBAdmin.revoke_access = self.orig_revoke_access couchdb_service.CouchDBAdmin.list_access = self.orig_list_access couchdb_service.CouchDBAdmin.enable_root = self.orig_enable_root couchdb_service.CouchDBAdmin.is_root_enabled = ( self.orig_is_root_enabled) couchdb_service.CouchDBAdmin.create_database = ( self.orig_create_databases) couchdb_service.CouchDBAdmin.list_databases = self.orig_list_databases couchdb_service.CouchDBAdmin.delete_database = ( self.orig_delete_database) def test_update_status(self): mock_status = MagicMock() mock_status.is_installed = True mock_status._is_restarting = False self.manager.appStatus = mock_status self.manager.update_status(self.context) self.assertTrue(mock_status.set_status.called) def _prepare_dynamic(self, packages=None, databases=None, config_content=None, device_path='/dev/vdb', is_db_installed=True, backup_id=None, overrides=None): mock_status = MagicMock() mock_app = MagicMock() self.manager.appStatus = mock_status self.manager.app = mock_app mount_point = '/var/lib/couchdb' mock_status.begin_install = MagicMock(return_value=None) mock_app.install_if_needed = MagicMock(return_value=None) mock_app.make_host_reachable = MagicMock(return_value=None) mock_app.restart = MagicMock(return_value=None) mock_app.start_db = MagicMock(return_value=None) mock_app.stop_db = MagicMock(return_value=None) os.path.exists = MagicMock(return_value=True) volume.VolumeDevice.format = MagicMock(return_value=None) volume.VolumeDevice.migrate_data = MagicMock(return_value=None) volume.VolumeDevice.mount = MagicMock(return_value=None) volume.VolumeDevice.mount_points = MagicMock(return_value=[]) backup.restore = MagicMock(return_value=None) backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'CouchDBBackup', 'checksum': 'fake-checksum'} if backup_id else None couchdb_service.CouchDBAdmin.create_database = MagicMock( return_value=None) couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None) with patch.object(pkg.Package, 'pkg_is_installed', return_value=MagicMock( return_value=is_db_installed)): self.manager.prepare(context=self.context, packages=packages, config_contents=config_content, databases=databases, memory_mb='2048', users=None, device_path=device_path, mount_point=mount_point, backup_info=backup_info, overrides=None, cluster_config=None) # verification/assertion mock_status.begin_install.assert_any_call() mock_app.install_if_needed.assert_any_call(packages) mock_app.make_host_reachable.assert_any_call() mock_app.change_permissions.assert_any_call() if backup_id: backup.restore.assert_any_call(self.context, backup_info, mount_point) def test_prepare_pkg(self): self._prepare_dynamic(['couchdb']) def test_prepare_no_pkg(self): self._prepare_dynamic([]) def test_prepare_from_backup(self): self._prepare_dynamic(['couchdb'], backup_id='123abc456') def test_prepare_database(self): self._prepare_dynamic(databases=['db1']) def test_restart(self): mock_status = MagicMock() self.manager.appStatus = mock_status with patch.object(couchdb_service.CouchDBApp, 'restart', return_value=None): # invocation self.manager.restart(self.context) # verification/assertion couchdb_service.CouchDBApp.restart.assert_any_call() def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBApp.stop_db = MagicMock(return_value=None) # invocation self.manager.stop_db(self.context) # verification/assertion couchdb_service.CouchDBApp.stop_db.assert_any_call( do_not_start_on_reboot=False) def test_reset_configuration(self): try: configuration = {'config_contents': 'some junk'} self.manager.reset_configuration(self.context, configuration) except Exception: self.fail("reset_configuration raised exception unexpectedly.") def test_rpc_ping(self): output = self.manager.rpc_ping(self.context) self.assertTrue(output) def test_create_user(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None) self.manager.create_user(self.context, ['user1']) couchdb_service.CouchDBAdmin.create_user.assert_any_call(['user1']) def test_delete_user(self): user = ['user1'] mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.delete_user = MagicMock(return_value=None) self.manager.delete_user(self.context, user) couchdb_service.CouchDBAdmin.delete_user.assert_any_call(user) def test_list_users(self): couchdb_service.CouchDBAdmin.list_users = MagicMock( return_value=['user1']) users = self.manager.list_users(self.context) self.assertThat(users, Equals(['user1'])) couchdb_service.CouchDBAdmin.list_users.assert_any_call( None, None, False) def test_get_user(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.get_user = MagicMock( return_value=['user1']) self.manager.get_user(self.context, 'user1', None) couchdb_service.CouchDBAdmin.get_user.assert_any_call( 'user1', None) def test_grant_access(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.grant_access = MagicMock( return_value=None) self.manager.grant_access(self.context, 'user1', None, ['db1']) couchdb_service.CouchDBAdmin.grant_access.assert_any_call( 'user1', ['db1']) def test_revoke_access(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.revoke_access = MagicMock( return_value=None) self.manager.revoke_access(self.context, 'user1', None, ['db1']) couchdb_service.CouchDBAdmin.revoke_access.assert_any_call( 'user1', ['db1']) def test_list_access(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.list_access = MagicMock( return_value=['user1']) self.manager.list_access(self.context, 'user1', None) couchdb_service.CouchDBAdmin.list_access.assert_any_call( 'user1', None) def test_enable_root(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.enable_root = MagicMock( return_value=True) result = self.manager.enable_root(self.context) self.assertThat(result, Equals(True)) def test_is_root_enabled(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.is_root_enabled = MagicMock( return_value=True) result = self.manager.is_root_enabled(self.context) self.assertThat(result, Equals(True)) def test_create_databases(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.create_database = MagicMock( return_value=None) self.manager.create_database(self.context, ['db1']) couchdb_service.CouchDBAdmin.create_database.assert_any_call(['db1']) def test_delete_database(self): databases = ['db1'] mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.delete_database = MagicMock( return_value=None) self.manager.delete_database(self.context, databases) couchdb_service.CouchDBAdmin.delete_database.assert_any_call( databases) def test_list_databases(self): mock_status = MagicMock() self.manager.appStatus = mock_status couchdb_service.CouchDBAdmin.list_databases = MagicMock( return_value=['database1']) databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(['database1'])) couchdb_service.CouchDBAdmin.list_databases.assert_any_call( None, None, False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_datastore_manager.py0000644000175000017500000000164400000000000027756 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.tests.unittests import trove_testtools class DatastoreManagerTest(trove_testtools.TestCase): def setUp(self, manager_name): super(DatastoreManagerTest, self).setUp() self.patch_datastore_manager(manager_name) self.context = trove_testtools.TroveTestContext(self) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_dbaas.py0000644000175000017500000051604100000000000025352 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import os import subprocess import tempfile import time from uuid import uuid4 from mock import ANY from mock import call from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch from mock import PropertyMock from oslo_utils import netutils from six.moves import configparser import sqlalchemy from trove.common import cfg from trove.common import context as trove_context from trove.common.db.models import DatastoreUser from trove.common.db.mysql import models as mysql_models from trove.common.exception import BadRequest from trove.common.exception import GuestError from trove.common.exception import PollTimeOut from trove.common.exception import ProcessExecutionError from trove.common import instance as rd_instance from trove.common import utils from trove.conductor import api as conductor_api from trove.guestagent.common.configuration import ConfigurationManager from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.cassandra import ( service as cass_service) from trove.guestagent.datastore.experimental.couchbase import ( service as couchservice) from trove.guestagent.datastore.experimental.couchdb import ( service as couchdb_service) from trove.guestagent.datastore.experimental.db2 import ( service as db2service) from trove.guestagent.datastore.experimental.mariadb import ( service as mariadb_service) from trove.guestagent.datastore.experimental.mongodb import ( service as mongo_service) from trove.guestagent.datastore.experimental.mongodb import ( system as mongo_system) from trove.guestagent.datastore.experimental.postgresql import ( service as pg_service) from trove.guestagent.datastore.experimental.pxc import ( service as pxc_service) from trove.guestagent.datastore.experimental.redis import service as rservice from trove.guestagent.datastore.experimental.redis.service import RedisApp from trove.guestagent.datastore.experimental.redis import system as RedisSystem from trove.guestagent.datastore.experimental.vertica import ( system as vertica_system) from trove.guestagent.datastore.experimental.vertica.service import ( VerticaAppStatus) from trove.guestagent.datastore.experimental.vertica.service import VerticaApp import trove.guestagent.datastore.mysql.service as dbaas from trove.guestagent.datastore.mysql.service import KeepAliveConnection from trove.guestagent.datastore.mysql.service import MySqlAdmin from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.datastore.mysql.service import MySqlAppStatus from trove.guestagent.datastore.mysql.service import MySqlRootAccess import trove.guestagent.datastore.mysql_common.service as mysql_common_service import trove.guestagent.datastore.service as base_datastore_service from trove.guestagent.datastore.service import BaseDbStatus from trove.guestagent import dbaas as dbaas_sr from trove.guestagent.dbaas import get_filesystem_volume_stats from trove.guestagent import pkg from trove.guestagent.volume import VolumeDevice from trove.instance.models import InstanceServiceStatus from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util CONF = cfg.CONF """ Unit tests for the classes and functions in dbaas.py. """ FAKE_DB = {"_name": "testDB", "_character_set": "latin2", "_collate": "latin2_general_ci"} FAKE_DB_2 = {"_name": "testDB2", "_character_set": "latin2", "_collate": "latin2_general_ci"} FAKE_USER = [{"_name": "random", "_password": "guesswhat", "_host": "%", "_databases": [FAKE_DB]}] class FakeTime(object): COUNTER = 0 @classmethod def time(cls): cls.COUNTER += 1 return cls.COUNTER def faketime(*args, **kwargs): return FakeTime.time() class FakeAppStatus(BaseDbStatus): def __init__(self, id, status): self.id = id self.status = status self.next_fake_status = status self._prepare_completed = None self.start_db_service = MagicMock() self.stop_db_service = MagicMock() self.restart_db_service = MagicMock() def _get_actual_db_status(self): return self.next_fake_status def set_next_status(self, next_status): self.next_fake_status = next_status def _is_query_router(self): return False class DbaasTest(trove_testtools.TestCase): def setUp(self): super(DbaasTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_utils_execute = mysql_common_service.utils.execute def tearDown(self): super(DbaasTest, self).tearDown() mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout mysql_common_service.utils.execute = self.orig_utils_execute @patch.object(operating_system, 'remove') def test_clear_expired_password(self, mock_remove): secret_content = ("# The random password set for the " "root user at Wed May 14 14:06:38 2014 " "(local time): somepassword") with patch.object(mysql_common_service.utils, 'execute', return_value=(secret_content, None)): mysql_common_service.clear_expired_password() self.assertEqual(3, mysql_common_service.utils.execute.call_count) self.assertEqual(1, mock_remove.call_count) @patch.object(operating_system, 'remove') def test_no_secret_content_clear_expired_password(self, mock_remove): with patch.object(mysql_common_service.utils, 'execute', return_value=('', None)): mysql_common_service.clear_expired_password() self.assertEqual(2, mysql_common_service.utils.execute.call_count) mock_remove.assert_not_called() @patch.object(operating_system, 'remove') @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_fail_password_update_content_clear_expired_password(self, mock_logging, mock_remove): secret_content = ("# The random password set for the " "root user at Wed May 14 14:06:38 2014 " "(local time): somepassword") with patch.object(mysql_common_service.utils, 'execute', side_effect=[(secret_content, None), ProcessExecutionError]): mysql_common_service.clear_expired_password() self.assertEqual(2, mysql_common_service.utils.execute.call_count) mock_remove.assert_not_called() @patch.object(operating_system, 'remove') @patch.object(mysql_common_service.utils, 'execute', side_effect=[ProcessExecutionError, (None, None)]) def test_fail_retrieve_secret_content_clear_expired_password(self, mock_execute, mock_remove): mysql_common_service.clear_expired_password() self.assertEqual(2, mock_execute.call_count) mock_remove.assert_not_called() @patch.object(operating_system, 'read_file', return_value={'client': {'password': 'some password'}}) @patch.object(mysql_common_service.BaseMySqlApp.configuration_manager, 'get_value', return_value=MagicMock({'get': 'some password'})) def test_get_auth_password(self, get_cnf_mock, read_file_mock): password = MySqlApp.get_auth_password() read_file_mock.assert_called_once_with(MySqlApp.get_client_auth_file(), codec=MySqlApp.CFG_CODEC) self.assertEqual("some password", password) @patch.object(mysql_common_service.BaseMySqlApp.configuration_manager, 'get_value', side_effect=RuntimeError('Error')) @patch.object(operating_system, 'read_file', side_effect=RuntimeError('read_file error')) def test_get_auth_password_error(self, _, get_cnf_mock): self.assertRaisesRegex(RuntimeError, "read_file error", MySqlApp.get_auth_password) def test_service_discovery(self): with patch.object(os.path, 'isfile', return_value=True): mysql_service = mysql_common_service.operating_system.\ service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_load_mysqld_options(self): output = "mysqld would've been started with these args:\n"\ "--user=mysql --port=3306 --basedir=/usr "\ "--tmpdir=/tmp --skip-external-locking" with patch.object(os.path, 'isfile', return_value=True): mysql_common_service.utils.execute = Mock( return_value=(output, None)) options = mysql_common_service.load_mysqld_options() self.assertEqual(5, len(options)) self.assertEqual(["mysql"], options["user"]) self.assertEqual(["3306"], options["port"]) self.assertEqual(["/usr"], options["basedir"]) self.assertEqual(["/tmp"], options["tmpdir"]) self.assertIn("skip-external-locking", options) def test_load_mysqld_options_contains_plugin_loads_options(self): output = ("mysqld would've been started with these args:\n" "--plugin-load=blackhole=ha_blackhole.so " "--plugin-load=federated=ha_federated.so") with patch.object(os.path, 'isfile', return_value=True): mysql_common_service.utils.execute = Mock( return_value=(output, None)) options = mysql_common_service.load_mysqld_options() self.assertEqual(1, len(options)) self.assertEqual(["blackhole=ha_blackhole.so", "federated=ha_federated.so"], options["plugin-load"]) @patch.object(os.path, 'isfile', return_value=True) def test_load_mysqld_options_error(self, mock_exists): mysql_common_service.utils.execute = Mock( side_effect=ProcessExecutionError()) self.assertFalse(mysql_common_service.load_mysqld_options()) class ResultSetStub(object): def __init__(self, rows): self._rows = rows def __iter__(self): return self._rows.__iter__() @property def rowcount(self): return len(self._rows) def __repr__(self): return self._rows.__repr__() class BaseAppTest(object): """A wrapper to inhibit the base test methods from executing during a normal test run. """ class AppTestCase(trove_testtools.TestCase): def setUp(self, fake_id, manager_name): super(BaseAppTest.AppTestCase, self).setUp() self.patch_datastore_manager(manager_name) self.FAKE_ID = fake_id util.init_db() InstanceServiceStatus.create( instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) def tearDown(self): InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() super(BaseAppTest.AppTestCase, self).tearDown() @abc.abstractproperty def appStatus(self): pass @abc.abstractproperty def expected_state_change_timeout(self): pass @abc.abstractproperty def expected_service_candidates(self): pass def test_start_db(self): with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.appStatus.set_next_status( rd_instance.ServiceStatuses.RUNNING) self.app.start_db() self.appStatus.start_db_service.assert_called_once_with( self.expected_service_candidates, self.expected_state_change_timeout, enable_on_boot=True, update_db=False) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_stop_db(self): with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) self.app.stop_db() self.appStatus.stop_db_service.assert_called_once_with( self.expected_service_candidates, self.expected_state_change_timeout, disable_on_boot=False, update_db=False) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_restart_db(self): self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.app.restart() self.appStatus.restart_db_service.assert_called_once_with( self.expected_service_candidates, self.expected_state_change_timeout) def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) class MySqlAdminMockTest(trove_testtools.TestCase): def setUp(self): super(MySqlAdminMockTest, self).setUp() mysql_app_patcher = patch.multiple(MySqlApp, get_engine=DEFAULT, configuration_manager=DEFAULT) self.addCleanup(mysql_app_patcher.stop) mysql_app_patcher.start() create_engine_patcher = patch.object(sqlalchemy, 'create_engine') self.addCleanup(create_engine_patcher.stop) create_engine_patcher.start() exec_timeout_patcher = patch.object(utils, 'execute_with_timeout') self.addCleanup(exec_timeout_patcher.stop) exec_timeout_patcher.start() self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlAdmin, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() def tearDown(self): super(MySqlAdminMockTest, self).tearDown() @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', Mock(return_value='some_password')) def test_list_databases(self): with patch.object(self.mock_client, 'execute', return_value=ResultSetStub( [('db1', 'utf8', 'utf8_bin'), ('db2', 'utf8', 'utf8_bin'), ('db3', 'utf8', 'utf8_bin')])): databases, next_marker = MySqlAdmin().list_databases(limit=10) self.assertIsNone(next_marker) self.assertEqual(3, len(databases)) class MySqlAdminTest(trove_testtools.TestCase): def setUp(self): super(MySqlAdminTest, self).setUp() self.orig_get_engine = dbaas.get_engine self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlAdmin, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() self.orig_MySQLUser_is_valid_user_name = ( mysql_models.MySQLUser._is_valid_user_name) dbaas.get_engine = MagicMock(name='get_engine') # trove.guestagent.common.configuration import ConfigurationManager dbaas.orig_configuration_manager = dbaas.MySqlApp.configuration_manager dbaas.MySqlApp.configuration_manager = Mock() dbaas.orig_get_auth_password = dbaas.MySqlApp.get_auth_password dbaas.MySqlApp.get_auth_password = Mock(return_value='root_pwd') self.orig_configuration_manager = \ mysql_common_service.BaseMySqlApp.configuration_manager mysql_common_service.BaseMySqlApp.configuration_manager = Mock() self.mySqlAdmin = MySqlAdmin() def tearDown(self): dbaas.get_engine = self.orig_get_engine mysql_models.MySQLUser._is_valid_user_name = ( self.orig_MySQLUser_is_valid_user_name) dbaas.MySqlApp.configuration_manager = \ dbaas.orig_configuration_manager dbaas.MySqlApp.get_auth_password = \ dbaas.orig_get_auth_password mysql_common_service.BaseMySqlApp.configuration_manager = \ self.orig_configuration_manager super(MySqlAdminTest, self).tearDown() def test__associate_dbs(self): db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"}, {"grantee": "'test_user'@'%'", "table_schema": "db2"}, {"grantee": "'test_user'@'%'", "table_schema": "db3"}, {"grantee": "'test_user'@'%'", "table_schema": "db4"}, {"grantee": "'test_user1'@'%'", "table_schema": "db1"}, {"grantee": "'test_user1'@'%'", "table_schema": "db3"}] user = DatastoreUser(name='test_user', host='%') expected = ("SELECT grantee, table_schema FROM " "information_schema.SCHEMA_PRIVILEGES WHERE privilege_type" " != 'USAGE' GROUP BY grantee, table_schema;") with patch.object(self.mock_client, 'execute', return_value=db_result) as mock_execute: self.mySqlAdmin._associate_dbs(user) self.assertEqual(4, len(user.databases)) self._assert_execute_call(expected, mock_execute) def _assert_execute_call(self, expected_query, execute_mock, call_idx=0): args, _ = execute_mock.call_args_list[call_idx] self.assertTrue(execute_mock.called, "The client object was not called.") self.assertEqual(expected_query, args[0].text, "Queries are not the same.") def test_change_passwords(self): user = [{"name": "test_user", "host": "%", "password": "password"}] expected = ("SET PASSWORD FOR 'test_user'@'%' = PASSWORD('password');") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.change_passwords(user) self._assert_execute_call(expected, mock_execute) def test_update_attributes_password(self): expected = ("SET PASSWORD FOR 'test_user'@'%' = PASSWORD('password');") user = MagicMock() user.name = "test_user" user.host = "%" user_attrs = {"password": "password"} with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) self._assert_execute_call(expected, mock_execute) def test_update_attributes_name(self): user = MagicMock() user.name = "test_user" user.host = "%" user_attrs = {"name": "new_name"} expected = ("RENAME USER 'test_user'@'%' TO 'new_name'@'%';") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) self._assert_execute_call(expected, mock_execute) def test_update_attributes_host(self): user = MagicMock() user.name = "test_user" user.host = "%" user_attrs = {"host": "new_host"} expected = ("RENAME USER 'test_user'@'%' TO 'test_user'@'new_host';") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) self._assert_execute_call(expected, mock_execute) def test_create_database(self): databases = [] databases.append(FAKE_DB) expected = ("CREATE DATABASE IF NOT EXISTS " "`testDB` CHARACTER SET = 'latin2' " "COLLATE = 'latin2_general_ci';") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_database(databases) self._assert_execute_call(expected, mock_execute) def test_create_database_more_than_1(self): databases = [] databases.append(FAKE_DB) databases.append(FAKE_DB_2) expected_1 = ("CREATE DATABASE IF NOT EXISTS " "`testDB` CHARACTER SET = 'latin2' " "COLLATE = 'latin2_general_ci';") expected_2 = ("CREATE DATABASE IF NOT EXISTS " "`testDB2` CHARACTER SET = 'latin2' " "COLLATE = 'latin2_general_ci';") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_database(databases) self._assert_execute_call(expected_1, mock_execute, call_idx=0) self._assert_execute_call(expected_2, mock_execute, call_idx=1) def test_create_database_no_db(self): databases = [] with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_database(databases) mock_execute.assert_not_called() def test_delete_database(self): database = {"_name": "testDB"} expected = "DROP DATABASE `testDB`;" with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.delete_database(database) self._assert_execute_call(expected, mock_execute) def test_delete_user(self): user = {"_name": "testUser", "_host": None} expected = "DROP USER `testUser`@`%`;" with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.delete_user(user) self._assert_execute_call(expected, mock_execute) def test_create_user(self): access_grants_expected = ("GRANT ALL PRIVILEGES ON `testDB`.* TO " "`random`@`%`;") with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.create_user(FAKE_USER) mock_execute.assert_any_call(TextClauseMatcher('CREATE USER'), user='random', host='%') self._assert_execute_call(access_grants_expected, mock_execute, call_idx=1) @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', Mock(return_value='some_password')) def test_list_databases(self): expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " ORDER BY schema_name ASC;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_databases() self._assert_execute_call(expected, mock_execute) def test_list_databases_with_limit(self): limit = 2 expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " ORDER BY schema_name ASC LIMIT " + str(limit + 1) + ";" ) with patch.object(self.mock_client, 'execute') as mock_execute: mock_execute.return_value.rowcount = 0 self.mySqlAdmin.list_databases(limit) self._assert_execute_call(expected, mock_execute) def test_list_databases_with_marker(self): marker = "aMarker" expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " AND schema_name > '" + marker + "'" " ORDER BY schema_name ASC;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_databases(marker=marker) self._assert_execute_call(expected, mock_execute) def test_list_databases_with_include_marker(self): marker = "aMarker" expected = ("SELECT schema_name as name," " default_character_set_name as charset," " default_collation_name as collation" " FROM information_schema.schemata WHERE" " schema_name NOT IN ('" + "', '".join(cfg.get_ignored_dbs()) + "')" " AND schema_name >= '" + marker + "'" " ORDER BY schema_name ASC;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_databases(marker=marker, include_marker=True) self._assert_execute_call(expected, mock_execute) def test_list_users(self): expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " ORDER BY Marker;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_users() self._assert_execute_call(expected, mock_execute) def test_list_users_with_limit(self): limit = 2 expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " ORDER BY Marker" " LIMIT " + str(limit + 1) + ";" ) with patch.object(self.mock_client, 'execute') as mock_execute: mock_execute.return_value.rowcount = 0 self.mySqlAdmin.list_users(limit) self._assert_execute_call(expected, mock_execute) def test_list_users_with_marker(self): marker = "aMarker" expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " AND Marker > '" + marker + "'" " ORDER BY Marker;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_users(marker=marker) self._assert_execute_call(expected, mock_execute) def test_list_users_with_include_marker(self): marker = "aMarker" expected = ("SELECT User, Host, Marker FROM" " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" " AND Marker >= '" + marker + "'" " ORDER BY Marker;" ) with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.list_users(marker=marker, include_marker=True) self._assert_execute_call(expected, mock_execute) @patch.object(dbaas.MySqlAdmin, '_associate_dbs') def test_get_user(self, mock_associate_dbs): """ Unit tests for mySqlAdmin.get_user. This test case checks if the sql query formed by the get_user method is correct or not by checking with expected query. """ username = "user1" hostname = "%" user = [{"User": "user1", "Host": "%", 'Password': 'some_thing'}] expected = ("SELECT User, Host FROM mysql.user " "WHERE Host != 'localhost' AND User = 'user1' " "AND Host = '%' ORDER BY User, Host;") with patch.object(self.mock_client, 'execute') as mock_execute: fa_mock = Mock(return_value=user) mock_execute.return_value = Mock() mock_execute.return_value.fetchall = fa_mock self.mySqlAdmin.get_user(username, hostname) self.assertEqual(1, mock_associate_dbs.call_count) self._assert_execute_call(expected, mock_execute) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_fail_get_user(self, *args): username = "os_admin" hostname = "host" self.assertRaisesRegex(BadRequest, "Username os_admin is not valid", self.mySqlAdmin.get_user, username, hostname) def test_grant_access(self): user = MagicMock() user.name = "test_user" user.host = "%" user.password = 'some_password' databases = ['db1'] expected = ("GRANT ALL PRIVILEGES ON `db1`.* TO `test_user`@`%` " "IDENTIFIED BY PASSWORD 'some_password';") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.grant_access('test_user', '%', databases) self._assert_execute_call(expected, mock_execute) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_fail_grant_access(self, *args): user = MagicMock() user.name = "test_user" user.host = "%" user.password = 'some_password' databases = ['mysql'] with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.grant_access('test_user', '%', databases) # since mysql is not a database to be provided access to, # testing that executed was not called in grant access. mock_execute.assert_not_called() def test_is_root_enabled(self): expected = ("SELECT User FROM mysql.user WHERE " "User = 'root' AND Host != 'localhost';") with patch.object(dbaas.MySqlRootAccess, 'local_sql_client', return_value=self.mock_cli_ctx_mgr): with patch.object(self.mock_client, 'execute') as mock_execute: self.mySqlAdmin.is_root_enabled() self._assert_execute_call(expected, mock_execute) def test_revoke_access(self): user = MagicMock() user.name = "test_user" user.host = "%" user.password = 'some_password' databases = ['db1'] expected = ("REVOKE ALL ON `['db1']`.* FROM `test_user`@`%`;") with patch.object(self.mock_client, 'execute') as mock_execute: with patch.object(self.mySqlAdmin, '_get_user', return_value=user): self.mySqlAdmin.revoke_access('test_usr', '%', databases) self._assert_execute_call(expected, mock_execute) def test_list_access(self): user = MagicMock() user.name = "test_user" user.host = "%" user.databases = ['db1', 'db2'] with patch.object(self.mock_client, 'execute'): with patch.object(self.mySqlAdmin, '_get_user', return_value=user): databases = self.mySqlAdmin.list_access('test_usr', '%') self.assertEqual(2, len(databases), "List access queries are not the same") class MySqlAppTest(trove_testtools.TestCase): def setUp(self): conductor_cli_patcher = patch.object(conductor_api.API, 'get_client') self.addCleanup(conductor_cli_patcher.stop) conductor_cli_patcher.start() super(MySqlAppTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_unlink = os.unlink self.orig_service_discovery = operating_system.service_discovery mysql_app_patcher = patch.multiple(mysql_common_service.BaseMySqlApp, get_engine=DEFAULT, get_auth_password=DEFAULT, configuration_manager=DEFAULT) self.addCleanup(mysql_app_patcher.stop) mysql_app_patcher.start() self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.mySqlApp = MySqlApp(self.appStatus) mysql_service = {'cmd_start': Mock(), 'cmd_stop': Mock(), 'cmd_enable': Mock(), 'cmd_disable': Mock(), 'cmd_bootstrap_galera_cluster': Mock(), 'bin': Mock()} operating_system.service_discovery = Mock( return_value=mysql_service) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() self.mock_client = Mock() self.mock_execute = Mock() self.mock_client.__enter__ = Mock() self.mock_client.__exit__ = Mock() self.mock_client.__enter__.return_value.execute = self.mock_execute self.orig_create_engine = sqlalchemy.create_engine def tearDown(self): mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout time.sleep = self.orig_time_sleep time.time = self.orig_time_time os.unlink = self.orig_unlink operating_system.service_discovery = self.orig_service_discovery InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() sqlalchemy.create_engine = self.orig_create_engine super(MySqlAppTest, self).tearDown() def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) def mysql_starts_successfully(self): def start(update_db=False): self.appStatus.set_next_status( rd_instance.ServiceStatuses.RUNNING) self.mySqlApp.start_mysql.side_effect = start def mysql_starts_unsuccessfully(self): def start(): raise RuntimeError("MySQL failed to start!") self.mySqlApp.start_mysql.side_effect = start def mysql_stops_successfully(self): def stop(): self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) self.mySqlApp.stop_db.side_effect = stop def mysql_stops_unsuccessfully(self): def stop(): raise RuntimeError("MySQL failed to stop!") self.mySqlApp.stop_db.side_effect = stop def test_stop_mysql(self): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.stop_db() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_stop_mysql_with_db_update(self): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) self.patch_conf_property('guest_id', self.FAKE_ID) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: with patch.object(conductor_api.API, 'heartbeat') as patch_hb: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.stop_db(True) patch_hb.assert_called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.SHUTDOWN.description}, sent=ANY) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_stop_mysql_do_not_start_on_reboot(self, mock_execute): self.appStatus.set_next_status( rd_instance.ServiceStatuses.SHUTDOWN) self.patch_conf_property('guest_id', self.FAKE_ID) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: with patch.object(conductor_api.API, 'heartbeat') as patch_hb: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.stop_db(True, True) patch_hb.assert_called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.SHUTDOWN.description}, sent=ANY) self.assertEqual(2, mock_execute.call_count) @patch('trove.guestagent.datastore.service.LOG') @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_stop_mysql_error(self, *args): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) self.mySqlApp.state_change_wait_time = 1 with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.stop_db) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(operating_system, 'service_discovery', side_effect=KeyError('error')) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_stop_mysql_key_error(self, mock_execute, mock_service, mock_logging): with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaisesRegex(RuntimeError, 'Service is not discovered.', self.mySqlApp.stop_db) self.assertEqual(0, mock_execute.call_count) def test_restart_is_successful(self): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mysql_stops_successfully() self.mysql_starts_successfully() self.patch_conf_property('guest_id', self.FAKE_ID) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: with patch.object(conductor_api.API, 'heartbeat') as patch_hb: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.restart() self.assertTrue(self.mySqlApp.stop_db.called) self.assertTrue(self.mySqlApp.start_mysql.called) patch_hb.assert_called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.RUNNING.description}, sent=ANY) def test_restart_mysql_wont_start_up(self): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mysql_stops_unsuccessfully() self.mysql_starts_unsuccessfully() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.restart) self.assertTrue(self.mySqlApp.stop_db.called) self.assertFalse(self.mySqlApp.start_mysql.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(dbaas.MySqlApp, 'get_data_dir', return_value='some path') def test_wipe_ib_logfiles_error(self, get_datadir_mock, mock_logging): mocked = Mock(side_effect=ProcessExecutionError('Error')) mysql_common_service.utils.execute_with_timeout = mocked self.assertRaises(ProcessExecutionError, self.mySqlApp.wipe_ib_logfiles) def test_start_mysql(self): mysql_common_service.utils.execute_with_timeout = Mock() self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) self.mySqlApp._enable_mysql_on_boot = Mock() self.mySqlApp.start_mysql() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_start_mysql_with_db_update(self): mysql_common_service.utils.execute_with_timeout = Mock() self.mySqlApp._enable_mysql_on_boot = Mock() self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) self.patch_conf_property('guest_id', self.FAKE_ID) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: with patch.object(conductor_api.API, 'heartbeat') as patch_hb: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.start_mysql(update_db=True) patch_hb.assert_called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.RUNNING.description}, sent=ANY) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch('trove.guestagent.datastore.service.LOG') def test_start_mysql_runs_forever(self, *args): mysql_common_service.utils.execute_with_timeout = Mock() self.mySqlApp._enable_mysql_on_boot = Mock() self.mySqlApp.state_change_wait_time = 1 self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN) self.patch_conf_property('guest_id', self.FAKE_ID) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: with patch.object(conductor_api.API, 'heartbeat') as patch_hb: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.start_mysql) patch_hb.assert_called_once_with( self.FAKE_ID, {'service_status': rd_instance.ServiceStatuses.SHUTDOWN.description}, sent=ANY) @patch('trove.guestagent.datastore.service.LOG') @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_start_mysql_error(self, *args): self.mySqlApp._enable_mysql_on_boot = Mock() mocked = Mock(side_effect=ProcessExecutionError('Error')) mysql_common_service.utils.execute_with_timeout = mocked with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertRaises(RuntimeError, self.mySqlApp.start_mysql) def test_start_db_with_conf_changes(self): self.mySqlApp.start_mysql = Mock() self.mysql_starts_successfully() self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN with patch.object(self.mySqlApp, '_reset_configuration') as cfg_reset: configuration = 'some junk' self.mySqlApp.start_db_with_conf_changes(configuration) cfg_reset.assert_called_once_with(configuration) self.assertTrue(self.mySqlApp.start_mysql.called) self.assertEqual(rd_instance.ServiceStatuses.RUNNING, self.appStatus._get_actual_db_status()) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_start_db_with_conf_changes_mysql_is_running(self, *args): self.mySqlApp.start_mysql = Mock() self.appStatus.status = rd_instance.ServiceStatuses.RUNNING self.assertRaises(RuntimeError, self.mySqlApp.start_db_with_conf_changes, Mock()) def test_configuration_reset(self): with patch.object(self.mySqlApp, '_reset_configuration') as cfg_reset: configuration = {'config_contents': 'some junk'} self.mySqlApp.reset_configuration(configuration=configuration) cfg_reset.assert_called_once_with('some junk') @patch.object(dbaas.MySqlApp, 'get_auth_password', return_value='some_password') def test_reset_configuration(self, auth_pwd_mock): save_cfg_mock = Mock() save_auth_mock = Mock() wipe_ib_mock = Mock() configuration = {'config_contents': 'some junk'} self.mySqlApp.configuration_manager.save_configuration = save_cfg_mock self.mySqlApp._save_authentication_properties = save_auth_mock self.mySqlApp.wipe_ib_logfiles = wipe_ib_mock self.mySqlApp.reset_configuration(configuration=configuration) save_cfg_mock.assert_called_once_with('some junk') save_auth_mock.assert_called_once_with( auth_pwd_mock.return_value) wipe_ib_mock.assert_called_once_with() @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__enable_mysql_on_boot(self, mock_execute): mysql_service = \ mysql_common_service.operating_system.service_discovery(["mysql"]) self.mySqlApp._enable_mysql_on_boot() self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with(mysql_service['cmd_enable'], shell=True) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(operating_system, 'service_discovery', side_effect=KeyError('error')) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_fail__enable_mysql_on_boot(self, mock_execute, mock_service, mock_logging): self.assertRaisesRegex(RuntimeError, 'Service is not discovered.', self.mySqlApp._enable_mysql_on_boot) self.assertEqual(0, mock_execute.call_count) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__disable_mysql_on_boot(self, mock_execute): mysql_service = \ mysql_common_service.operating_system.service_discovery(["mysql"]) self.mySqlApp._disable_mysql_on_boot() self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with(mysql_service['cmd_disable'], shell=True) @patch('trove.guestagent.datastore.mysql_common.service.LOG') @patch.object(operating_system, 'service_discovery', side_effect=KeyError('error')) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_fail__disable_mysql_on_boot(self, mock_execute, mock_service, mock_logging): self.assertRaisesRegex(RuntimeError, 'Service is not discovered.', self.mySqlApp._disable_mysql_on_boot) self.assertEqual(0, mock_execute.call_count) def test_update_overrides(self): override_value = {'key': 'value'} with patch.object(self.mySqlApp.configuration_manager, 'apply_user_override') as apply_usr_mock: self.mySqlApp.update_overrides(override_value) apply_usr_mock.assert_called_once_with({'mysqld': override_value}) def test_remove_override(self): with patch.object(self.mySqlApp.configuration_manager, 'remove_user_override') as remove_usr_mock: self.mySqlApp.remove_overrides() remove_usr_mock.assert_called_once_with() def test_write_replication_source_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'apply_system_override') as apply_sys_mock: self.mySqlApp.write_replication_source_overrides('something') apply_sys_mock.assert_called_once_with( 'something', mysql_common_service.CNF_MASTER) def test_write_replication_replica_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'apply_system_override') as apply_sys_mock: self.mySqlApp.write_replication_replica_overrides('something') apply_sys_mock.assert_called_once_with( 'something', mysql_common_service.CNF_SLAVE) def test_remove_replication_source_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'remove_system_override') as remove_sys_mock: self.mySqlApp.remove_replication_source_overrides() remove_sys_mock.assert_called_once_with( mysql_common_service.CNF_MASTER) def test_remove_replication_replica_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'remove_system_override') as remove_sys_mock: self.mySqlApp.remove_replication_replica_overrides() remove_sys_mock.assert_called_once_with( mysql_common_service.CNF_SLAVE) def test_exists_replication_source_overrides(self): with patch.object(self.mySqlApp.configuration_manager, 'has_system_override', return_value=Mock()) as exists_mock: self.assertEqual( exists_mock.return_value, self.mySqlApp.exists_replication_source_overrides()) @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_grant_replication_privilege(self, *args): replication_user = {'name': 'testUSr', 'password': 'somePwd'} with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.grant_replication_privilege(replication_user) args, _ = self.mock_execute.call_args_list[0] expected = ("GRANT REPLICATION SLAVE ON *.* TO `testUSr`@`%` " "IDENTIFIED BY 'somePwd';") self.assertEqual(expected, args[0].text, "Replication grant statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_port(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.get_port() args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT @@port") self.assertEqual(expected, args[0], "Port queries are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_binlog_position(self, *args): result = {'File': 'mysql-bin.003', 'Position': '73'} self.mock_execute.return_value.first = Mock(return_value=result) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): found_result = self.mySqlApp.get_binlog_position() self.assertEqual(result['File'], found_result['log_file']) self.assertEqual(result['Position'], found_result['position']) args, _ = self.mock_execute.call_args_list[0] expected = ("SHOW MASTER STATUS") self.assertEqual(expected, args[0], "Master status queries are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_execute_on_client(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.execute_on_client('show tables') args, _ = self.mock_execute.call_args_list[0] expected = ("show tables") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') def test_start_slave(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.start_slave() args, _ = self.mock_execute.call_args_list[0] expected = ("START SLAVE") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') def test_stop_slave_with_failover(self, *args): self.mock_execute.return_value.first = Mock( return_value={'Master_User': 'root'}) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.stop_slave(True) self.assertEqual('root', result['replication_user']) expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL"] self.assertEqual(len(expected), len(self.mock_execute.call_args_list)) for i in range(len(self.mock_execute.call_args_list)): args, _ = self.mock_execute.call_args_list[i] self.assertEqual(expected[i], args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') def test_stop_slave_without_failover(self, *args): self.mock_execute.return_value.first = Mock( return_value={'Master_User': 'root'}) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.stop_slave(False) self.assertEqual('root', result['replication_user']) expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL", "DROP USER root"] self.assertEqual(len(expected), len(self.mock_execute.call_args_list)) for i in range(len(self.mock_execute.call_args_list)): args, _ = self.mock_execute.call_args_list[i] self.assertEqual(expected[i], args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_stop_master(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.stop_master() args, _ = self.mock_execute.call_args_list[0] expected = ("RESET MASTER") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test__wait_for_slave_status(self, *args): mock_client = Mock() mock_client.execute = Mock() result = ['Slave_running', 'on'] mock_client.execute.return_value.first = Mock(return_value=result) self.mySqlApp._wait_for_slave_status('ON', mock_client, 5) args, _ = mock_client.execute.call_args_list[0] expected = ("SHOW GLOBAL STATUS like 'slave_running'") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) @patch.object(utils, 'poll_until', side_effect=PollTimeOut) def test_fail__wait_for_slave_status(self, *args): self.assertRaisesRegex(RuntimeError, "Replication is not on after 5 seconds.", self.mySqlApp._wait_for_slave_status, 'ON', Mock(), 5) @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test__get_slave_status(self, *args): self.mock_execute.return_value.first = Mock(return_value='some_thing') with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp._get_slave_status() self.assertEqual('some_thing', result) args, _ = self.mock_execute.call_args_list[0] expected = ("SHOW SLAVE STATUS") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_latest_txn_id(self, *args): self.mock_execute.return_value.first = Mock(return_value=['some_thing'] ) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.get_latest_txn_id() self.assertEqual('some_thing', result) args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT @@global.gtid_executed") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_wait_for_txn(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.wait_for_txn('abcd') args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('abcd')") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_get_txn_count(self, *args): self.mock_execute.return_value.first = Mock( return_value=['b1f3f33a-0789-ee1c-43f3-f8373e12f1ea:1']) with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): result = self.mySqlApp.get_txn_count() self.assertEqual(1, result) args, _ = self.mock_execute.call_args_list[0] expected = ("SELECT @@global.gtid_executed") self.assertEqual(expected, args[0], "Sql statements are not the same") @patch.multiple(pkg.Package, pkg_is_installed=Mock(return_value=False), pkg_install=DEFAULT) def test_install(self, pkg_install): self.mySqlApp._install_mysql = Mock() utils.execute_with_timeout = Mock() self.mySqlApp._clear_mysql_config = Mock() self.mySqlApp._create_mysql_confd_dir = Mock() self.mySqlApp.start_mysql = Mock() self.mySqlApp.install_if_needed(["package"]) self.assertTrue(pkg_install.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(operating_system, 'write_file') def test_save_authentication_properties(self, write_file_mock): self.mySqlApp._save_authentication_properties("some_password") write_file_mock.assert_called_once_with( MySqlApp.get_client_auth_file(), {'client': {'host': 'localhost', 'password': 'some_password', 'user': mysql_common_service.ADMIN_USER_NAME}}, codec=MySqlApp.CFG_CODEC) @patch.object(utils, 'generate_random_password', return_value='some_password') @patch.object(mysql_common_service, 'clear_expired_password') def test_secure(self, clear_pwd_mock, auth_pwd_mock): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mySqlApp._reset_configuration = Mock() self.mySqlApp._apply_user_overrides = Mock() self.mysql_stops_successfully() self.mysql_starts_successfully() sqlalchemy.create_engine = Mock() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.mySqlApp.secure('contents') self.assertTrue(self.mySqlApp.stop_db.called) self.mySqlApp._reset_configuration.assert_has_calls( [call('contents', auth_pwd_mock.return_value)]) self.assertTrue(self.mySqlApp.start_mysql.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(dbaas, 'get_engine') @patch.object(utils, 'generate_random_password', return_value='some_password') @patch.object(operating_system, 'write_file') def test_secure_root(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.secure_root() update_root_password, _ = self.mock_execute.call_args_list[0] update_expected = ("SET PASSWORD FOR 'root'@'localhost' = " "PASSWORD('some_password');") remove_root, _ = self.mock_execute.call_args_list[1] remove_expected = ("DELETE FROM mysql.user WHERE " "User = 'root' AND Host != 'localhost';") self.assertEqual(update_expected, update_root_password[0].text, "Update root password queries are not the same") self.assertEqual(remove_expected, remove_root[0].text, "Remove root queries are not the same") @patch.object(operating_system, 'create_directory') def test__create_mysql_confd_dir(self, mkdir_mock): self.mySqlApp._create_mysql_confd_dir() mkdir_mock.assert_called_once_with('/etc/mysql/conf.d', as_root=True) @patch.object(operating_system, 'move') def test__clear_mysql_config(self, mock_move): self.mySqlApp._clear_mysql_config() self.assertEqual(3, mock_move.call_count) @patch.object(operating_system, 'move', side_effect=ProcessExecutionError) def test_exception__clear_mysql_config(self, mock_move): self.mySqlApp._clear_mysql_config() # call-count needs to be same as normal, # because exception is eaten to make the flow goto next file-move. self.assertEqual(3, mock_move.call_count) @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_apply_overrides(self, *args): overrides = {'sort_buffer_size': 1000000} with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.apply_overrides(overrides) args, _ = self.mock_execute.call_args_list[0] expected = ("SET GLOBAL sort_buffer_size=1000000") self.assertEqual(expected, args[0].text, "Set global statements are not the same") @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_make_read_only(self, *args): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp.make_read_only('ON') args, _ = self.mock_execute.call_args_list[0] expected = ("set global read_only = ON") self.assertEqual(expected, args[0].text, "Set read_only statements are not the same") @patch.multiple(pkg.Package, pkg_is_installed=Mock(return_value=False), pkg_install=Mock( side_effect=pkg.PkgPackageStateError("Install error"))) def test_install_install_error(self): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mySqlApp._clear_mysql_config = Mock() self.mySqlApp._create_mysql_confd_dir = Mock() self.assertRaises(pkg.PkgPackageStateError, self.mySqlApp.install_if_needed, ["package"]) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(mysql_common_service, 'clear_expired_password') def test_secure_write_conf_error(self, clear_pwd_mock): self.mySqlApp.start_mysql = Mock() self.mySqlApp.stop_db = Mock() self.mySqlApp._reset_configuration = Mock( side_effect=IOError("Could not write file")) self.mySqlApp._apply_user_overrides = Mock() self.mysql_stops_successfully() self.mysql_starts_successfully() sqlalchemy.create_engine = Mock() self.assertRaises(IOError, self.mySqlApp.secure, "foo") self.assertTrue(self.mySqlApp.stop_db.called) self.assertFalse(self.mySqlApp.start_mysql.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(dbaas.MySqlApp, '_save_authentication_properties') @patch.object(dbaas, 'get_engine', return_value=MagicMock(name='get_engine')) def test_reset_admin_password(self, mock_engine, mock_save_auth): with patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_client): self.mySqlApp._create_admin_user = Mock() self.mySqlApp.reset_admin_password("newpassword") self.assertEqual(1, self.mySqlApp._create_admin_user.call_count) mock_save_auth.assert_called_once_with("newpassword") class TextClauseMatcher(object): def __init__(self, text): self.text = text def __repr__(self): return "TextClause(%s)" % self.text def __eq__(self, arg): return self.text in arg.text class MySqlAppMockTest(trove_testtools.TestCase): def setUp(self): super(MySqlAppMockTest, self).setUp() self.orig_utils_execute_with_timeout = utils.execute_with_timeout create_engine_patcher = patch.object(sqlalchemy, 'create_engine') self.addCleanup(create_engine_patcher.stop) create_engine_patcher.start() self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlApp, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() def tearDown(self): utils.execute_with_timeout = self.orig_utils_execute_with_timeout super(MySqlAppMockTest, self).tearDown() @patch('trove.guestagent.common.configuration.ConfigurationManager' '.refresh_cache') @patch.object(mysql_common_service, 'clear_expired_password') @patch.object(utils, 'generate_random_password', return_value='some_password') def test_secure_keep_root(self, auth_pwd_mock, clear_pwd_mock, _): with patch.object(self.mock_client, 'execute', return_value=None) as mock_execute: utils.execute_with_timeout = MagicMock(return_value=None) # skip writing the file for now with patch.object(os.path, 'isfile', return_value=False): mock_status = MagicMock() mock_status.wait_for_real_status_to_change_to = MagicMock( return_value=True) app = MySqlApp(mock_status) app._reset_configuration = MagicMock() app.start_mysql = MagicMock(return_value=None) app.stop_db = MagicMock(return_value=None) app.secure('foo') reset_config_calls = [call('foo', auth_pwd_mock.return_value)] app._reset_configuration.assert_has_calls(reset_config_calls) self.assertTrue(mock_execute.called) @patch('trove.guestagent.common.configuration.ConfigurationManager' '.refresh_cache') @patch.object(mysql_common_service, 'clear_expired_password') @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_secure_with_mycnf_error(self, *args): with patch.object(self.mock_client, 'execute', return_value=None) as mock_execute: with patch.object(operating_system, 'service_discovery', return_value={'cmd_stop': 'service mysql stop'}): utils.execute_with_timeout = MagicMock(return_value=None) # skip writing the file for now with patch.object(dbaas.MySqlApp, '_reset_configuration', side_effect=RuntimeError('Error')): mock_status = MagicMock() mock_status.wait_for_real_status_to_change_to = MagicMock( return_value=True) mysql_common_service.clear_expired_password = \ MagicMock(return_value=None) app = MySqlApp(mock_status) mysql_common_service.clear_expired_password = \ MagicMock(return_value=None) self.assertRaises(RuntimeError, app.secure, None) self.assertTrue(mock_execute.called) # At least called twice self.assertGreaterEqual(mock_execute.call_count, 2) (mock_status.wait_for_real_status_to_change_to. assert_called_with(rd_instance.ServiceStatuses.SHUTDOWN, app.state_change_wait_time, False)) class MySqlRootStatusTest(trove_testtools.TestCase): def setUp(self): super(MySqlRootStatusTest, self).setUp() self.orig_utils_execute_with_timeout = utils.execute_with_timeout create_engine_patcher = patch.object(sqlalchemy, 'create_engine') self.addCleanup(create_engine_patcher.stop) create_engine_patcher.start() mysql_app_patcher = patch.multiple(MySqlApp, get_engine=DEFAULT, configuration_manager=DEFAULT) self.addCleanup(mysql_app_patcher.stop) mysql_app_patcher.start() self.mock_cli_ctx_mgr = Mock() self.mock_client = MagicMock() self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) self.mock_cli_ctx_mgr.__exit__ = Mock() local_client_patcher = patch.object(dbaas.MySqlRootAccess, 'local_sql_client', return_value=self.mock_cli_ctx_mgr) self.addCleanup(local_client_patcher.stop) local_client_patcher.start() def tearDown(self): utils.execute_with_timeout = self.orig_utils_execute_with_timeout super(MySqlRootStatusTest, self).tearDown() @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_root_is_enabled(self, auth_pwd_mock): mock_rs = MagicMock() mock_rs.rowcount = 1 with patch.object(self.mock_client, 'execute', return_value=mock_rs): self.assertTrue(MySqlRootAccess().is_root_enabled()) @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_root_is_not_enabled(self, auth_pwd_mock): mock_rs = MagicMock() mock_rs.rowcount = 0 with patch.object(self.mock_client, 'execute', return_value=mock_rs): self.assertFalse(MySqlRootAccess().is_root_enabled()) @patch.object(mysql_common_service, 'clear_expired_password') @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', return_value='some_password') def test_enable_root(self, auth_pwd_mock, clear_pwd_mock): with patch.object(self.mock_client, 'execute', return_value=None) as mock_execute: # invocation user_ser = MySqlRootAccess().enable_root() # verification self.assertIsNotNone(user_ser) mock_execute.assert_any_call(TextClauseMatcher('CREATE USER'), user='root', host='%') mock_execute.assert_any_call(TextClauseMatcher( 'GRANT ALL PRIVILEGES ON *.*')) mock_execute.assert_any_call(TextClauseMatcher( 'SET PASSWORD')) @patch.object(MySqlRootAccess, 'enable_root') def test_root_disable(self, enable_root_mock): # invocation MySqlRootAccess().disable_root() # verification enable_root_mock.assert_called_once_with(root_password=None) class MockStats(object): f_blocks = 1024 ** 2 f_bsize = 4096 f_bfree = 512 * 1024 class InterrogatorTest(trove_testtools.TestCase): def tearDown(self): super(InterrogatorTest, self).tearDown() def test_get_filesystem_volume_stats(self): with patch.object(os, 'statvfs', return_value=MockStats): result = get_filesystem_volume_stats('/some/path/') self.assertEqual(4096, result['block_size']) self.assertEqual(1048576, result['total_blocks']) self.assertEqual(524288, result['free_blocks']) self.assertEqual(4.0, result['total']) self.assertEqual(2147483648, result['free']) self.assertEqual(2.0, result['used']) @patch('trove.guestagent.dbaas.LOG') def test_get_filesystem_volume_stats_error(self, *args): with patch.object(os, 'statvfs', side_effect=OSError): self.assertRaises( RuntimeError, get_filesystem_volume_stats, '/nonexistent/path') class ServiceRegistryTest(trove_testtools.TestCase): def setUp(self): super(ServiceRegistryTest, self).setUp() def tearDown(self): super(ServiceRegistryTest, self).tearDown() def test_datastore_registry_with_extra_manager(self): datastore_registry_ext_test = { 'test': 'trove.guestagent.datastore.test.manager.Manager', } with patch.object(dbaas_sr, 'get_custom_managers', return_value=datastore_registry_ext_test): test_dict = dbaas_sr.datastore_registry() self.assertEqual(datastore_registry_ext_test.get('test', None), test_dict.get('test')) self.assertEqual('trove.guestagent.datastore.mysql.' 'manager.Manager', test_dict.get('mysql')) self.assertEqual('trove.guestagent.datastore.experimental.' 'percona.manager.Manager', test_dict.get('percona')) self.assertEqual('trove.guestagent.datastore.experimental.redis.' 'manager.Manager', test_dict.get('redis')) self.assertEqual('trove.guestagent.datastore.experimental.' 'cassandra.manager.Manager', test_dict.get('cassandra')) self.assertEqual('trove.guestagent.datastore.experimental.' 'couchbase.manager.Manager', test_dict.get('couchbase')) self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' 'manager.Manager', test_dict.get('mongodb')) self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' 'manager.Manager', test_dict.get('couchdb')) self.assertEqual('trove.guestagent.datastore.experimental.db2.' 'manager.Manager', test_dict.get('db2')) def test_datastore_registry_with_existing_manager(self): datastore_registry_ext_test = { 'mysql': 'trove.guestagent.datastore.mysql.' 'manager.Manager123', } with patch.object(dbaas_sr, 'get_custom_managers', return_value=datastore_registry_ext_test): test_dict = dbaas_sr.datastore_registry() self.assertEqual('trove.guestagent.datastore.mysql.' 'manager.Manager123', test_dict.get('mysql')) self.assertEqual('trove.guestagent.datastore.experimental.' 'percona.manager.Manager', test_dict.get('percona')) self.assertEqual('trove.guestagent.datastore.experimental.redis.' 'manager.Manager', test_dict.get('redis')) self.assertEqual('trove.guestagent.datastore.experimental.' 'cassandra.manager.Manager', test_dict.get('cassandra')) self.assertEqual('trove.guestagent.datastore.experimental.' 'couchbase.manager.Manager', test_dict.get('couchbase')) self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' 'manager.Manager', test_dict.get('mongodb')) self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' 'manager.Manager', test_dict.get('couchdb')) self.assertEqual('trove.guestagent.datastore.experimental.vertica.' 'manager.Manager', test_dict.get('vertica')) self.assertEqual('trove.guestagent.datastore.experimental.db2.' 'manager.Manager', test_dict.get('db2')) self.assertEqual('trove.guestagent.datastore.experimental.mariadb.' 'manager.Manager', test_dict.get('mariadb')) def test_datastore_registry_with_blank_dict(self): datastore_registry_ext_test = dict() with patch.object(dbaas_sr, 'get_custom_managers', return_value=datastore_registry_ext_test): test_dict = dbaas_sr.datastore_registry() self.assertEqual('trove.guestagent.datastore.mysql.' 'manager.Manager', test_dict.get('mysql')) self.assertEqual('trove.guestagent.datastore.experimental.' 'percona.manager.Manager', test_dict.get('percona')) self.assertEqual('trove.guestagent.datastore.experimental.redis.' 'manager.Manager', test_dict.get('redis')) self.assertEqual('trove.guestagent.datastore.experimental.' 'cassandra.manager.Manager', test_dict.get('cassandra')) self.assertEqual('trove.guestagent.datastore.experimental.' 'couchbase.manager.Manager', test_dict.get('couchbase')) self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' 'manager.Manager', test_dict.get('mongodb')) self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' 'manager.Manager', test_dict.get('couchdb')) self.assertEqual('trove.guestagent.datastore.experimental.vertica.' 'manager.Manager', test_dict.get('vertica')) self.assertEqual('trove.guestagent.datastore.experimental.db2.' 'manager.Manager', test_dict.get('db2')) self.assertEqual('trove.guestagent.datastore.experimental.mariadb.' 'manager.Manager', test_dict.get('mariadb')) class KeepAliveConnectionTest(trove_testtools.TestCase): class OperationalError(Exception): def __init__(self, value): self.args = [value] def __str__(self): return repr(self.value) def setUp(self): super(KeepAliveConnectionTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_LOG_err = dbaas.LOG def tearDown(self): super(KeepAliveConnectionTest, self).tearDown() mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout dbaas.LOG = self.orig_LOG_err def test_checkout_type_error(self): dbapi_con = Mock() dbapi_con.ping = Mock(side_effect=TypeError("Type Error")) self.keepAliveConn = KeepAliveConnection() self.assertRaises(TypeError, self.keepAliveConn.checkout, dbapi_con, Mock(), Mock()) def test_checkout_disconnection_error(self): dbapi_con = Mock() dbapi_con.OperationalError = self.OperationalError dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(2013)) self.keepAliveConn = KeepAliveConnection() self.assertRaises(sqlalchemy.exc.DisconnectionError, self.keepAliveConn.checkout, dbapi_con, Mock(), Mock()) def test_checkout_operation_error(self): dbapi_con = Mock() dbapi_con.OperationalError = self.OperationalError dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(1234)) self.keepAliveConn = KeepAliveConnection() self.assertRaises(self.OperationalError, self.keepAliveConn.checkout, dbapi_con, Mock(), Mock()) class BaseDbStatusTest(trove_testtools.TestCase): def setUp(self): super(BaseDbStatusTest, self).setUp() util.init_db() self.orig_dbaas_time_sleep = time.sleep self.orig_time_time = time.time self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) dbaas.CONF.guest_id = self.FAKE_ID patcher_log = patch.object(base_datastore_service, 'LOG') patcher_context = patch.object(trove_context, 'TroveContext') patcher_api = patch.object(conductor_api, 'API') patcher_log.start() patcher_context.start() patcher_api.start() self.addCleanup(patcher_log.stop) self.addCleanup(patcher_context.stop) self.addCleanup(patcher_api.stop) def tearDown(self): time.sleep = self.orig_dbaas_time_sleep time.time = self.orig_time_time InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() dbaas.CONF.guest_id = None super(BaseDbStatusTest, self).tearDown() @patch.object(operating_system, 'write_file') def test_begin_install(self, mock_write_file): base_db_status = BaseDbStatus() base_db_status.begin_install() self.assertEqual(rd_instance.ServiceStatuses.BUILDING, base_db_status.status) def test_begin_restart(self): base_db_status = BaseDbStatus() base_db_status.restart_mode = False base_db_status.begin_restart() self.assertTrue(base_db_status.restart_mode) def test_end_restart(self): base_db_status = BaseDbStatus() base_db_status._get_actual_db_status = Mock( return_value=rd_instance.ServiceStatuses.SHUTDOWN) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) base_db_status.end_restart() self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, base_db_status.status) self.assertFalse(base_db_status.restart_mode) def test_is_installed(self): base_db_status = BaseDbStatus() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) self.assertTrue(base_db_status.is_installed) def test_is_installed_failed(self): base_db_status = BaseDbStatus() with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=False) self.assertFalse(base_db_status.is_installed) def test_is_restarting(self): base_db_status = BaseDbStatus() base_db_status.restart_mode = True self.assertTrue(base_db_status._is_restarting) def test_is_running(self): base_db_status = BaseDbStatus() base_db_status.status = rd_instance.ServiceStatuses.RUNNING self.assertTrue(base_db_status.is_running) def test_is_running_not(self): base_db_status = BaseDbStatus() base_db_status.status = rd_instance.ServiceStatuses.SHUTDOWN self.assertFalse(base_db_status.is_running) def test_wait_for_real_status_to_change_to(self): base_db_status = BaseDbStatus() base_db_status._get_actual_db_status = Mock( return_value=rd_instance.ServiceStatuses.RUNNING) time.sleep = Mock() time.time = Mock(side_effect=faketime) self.assertTrue(base_db_status. wait_for_real_status_to_change_to (rd_instance.ServiceStatuses.RUNNING, 10)) def test_wait_for_real_status_to_change_to_timeout(self): base_db_status = BaseDbStatus() base_db_status._get_actual_db_status = Mock( return_value=rd_instance.ServiceStatuses.RUNNING) time.sleep = Mock() time.time = Mock(side_effect=faketime) self.assertFalse(base_db_status. wait_for_real_status_to_change_to (rd_instance.ServiceStatuses.SHUTDOWN, 10)) def _test_set_status(self, initial_status, new_status, expected_status, install_done=False, force=False): base_db_status = BaseDbStatus() base_db_status.status = initial_status with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=install_done) base_db_status.set_status(new_status, force=force) self.assertEqual(expected_status, base_db_status.status) def test_set_status_force_heartbeat(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.RUNNING, force=True) def test_set_status_skip_heartbeat_with_building(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.BUILDING) def test_set_status_skip_heartbeat_with_new(self): self._test_set_status(rd_instance.ServiceStatuses.NEW, rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.NEW) def test_set_status_to_failed(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.FAILED, rd_instance.ServiceStatuses.FAILED, force=True) def test_set_status_to_build_pending(self): self._test_set_status(rd_instance.ServiceStatuses.BUILDING, rd_instance.ServiceStatuses.INSTANCE_READY, rd_instance.ServiceStatuses.INSTANCE_READY, force=True) def test_set_status_to_shutdown(self): self._test_set_status(rd_instance.ServiceStatuses.RUNNING, rd_instance.ServiceStatuses.SHUTDOWN, rd_instance.ServiceStatuses.SHUTDOWN, install_done=True) def test_wait_for_database_service_status(self): status = BaseDbStatus() expected_status = rd_instance.ServiceStatuses.RUNNING timeout = 10 update_db = False # Test a successful call. with patch.multiple( status, wait_for_real_status_to_change_to=Mock(return_value=True), cleanup_stalled_db_services=DEFAULT): self.assertTrue( status._wait_for_database_service_status( expected_status, timeout, update_db)) status.wait_for_real_status_to_change_to.assert_called_once_with( expected_status, timeout, update_db) self.assertFalse(status.cleanup_stalled_db_services.called) # Test a failing call. with patch.multiple( status, wait_for_real_status_to_change_to=Mock(return_value=False), cleanup_stalled_db_services=DEFAULT): self.assertFalse( status._wait_for_database_service_status( expected_status, timeout, update_db)) status.wait_for_real_status_to_change_to.assert_called_once_with( expected_status, timeout, update_db) status.cleanup_stalled_db_services.assert_called_once_with() # Test a failing call with an error raised from the cleanup code. # No exception should propagate out of the cleanup block. with patch.multiple( status, wait_for_real_status_to_change_to=Mock(return_value=False), cleanup_stalled_db_services=Mock( side_effect=Exception("Error in cleanup."))): self.assertFalse( status._wait_for_database_service_status( expected_status, timeout, update_db)) status.wait_for_real_status_to_change_to.assert_called_once_with( expected_status, timeout, update_db) status.cleanup_stalled_db_services.assert_called_once_with() def test_start_db_service(self): status = BaseDbStatus() service_candidates = ['name1', 'name2'] # Test a successful call with setting auto-start enabled. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, start_service=DEFAULT, enable_service_on_boot=DEFAULT) as os_cmd: status.start_db_service( service_candidates, 10, enable_on_boot=True) service_call.assert_called_once_with( rd_instance.ServiceStatuses.RUNNING, 10, False) os_cmd['start_service'].assert_called_once_with( service_candidates, timeout=10) os_cmd['enable_service_on_boot'].assert_called_once_with( service_candidates) # Test a successful call without auto-start. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, start_service=DEFAULT, enable_service_on_boot=DEFAULT) as os_cmd: status.start_db_service( service_candidates, 10, enable_on_boot=False) service_call.assert_called_once_with( rd_instance.ServiceStatuses.RUNNING, 10, False) os_cmd['start_service'].assert_called_once_with( service_candidates, timeout=10) self.assertFalse(os_cmd['enable_service_on_boot'].called) # Test a failing call. # The auto-start setting should not get updated if the service call # fails. with patch.object( status, '_wait_for_database_service_status', return_value=False) as service_call: with patch.multiple(operating_system, start_service=DEFAULT, enable_service_on_boot=DEFAULT) as os_cmd: self.assertRaisesRegex( RuntimeError, "Database failed to start.", status.start_db_service, service_candidates, 10, enable_on_boot=True) os_cmd['start_service'].assert_called_once_with( service_candidates, timeout=10) self.assertFalse(os_cmd['enable_service_on_boot'].called) def test_stop_db_service(self): status = BaseDbStatus() service_candidates = ['name1', 'name2'] # Test a successful call with setting auto-start disabled. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, stop_service=DEFAULT, disable_service_on_boot=DEFAULT) as os_cmd: status.stop_db_service( service_candidates, 10, disable_on_boot=True) service_call.assert_called_once_with( rd_instance.ServiceStatuses.SHUTDOWN, 10, False) os_cmd['stop_service'].assert_called_once_with( service_candidates, timeout=10) os_cmd['disable_service_on_boot'].assert_called_once_with( service_candidates) # Test a successful call without auto-start. with patch.object( status, '_wait_for_database_service_status', return_value=True) as service_call: with patch.multiple(operating_system, stop_service=DEFAULT, disable_service_on_boot=DEFAULT) as os_cmd: status.stop_db_service( service_candidates, 10, disable_on_boot=False) service_call.assert_called_once_with( rd_instance.ServiceStatuses.SHUTDOWN, 10, False) os_cmd['stop_service'].assert_called_once_with( service_candidates, timeout=10) self.assertFalse(os_cmd['disable_service_on_boot'].called) # Test a failing call. # The auto-start setting should not get updated if the service call # fails. with patch.object( status, '_wait_for_database_service_status', return_value=False) as service_call: with patch.multiple(operating_system, stop_service=DEFAULT, disable_service_on_boot=DEFAULT) as os_cmd: self.assertRaisesRegex( RuntimeError, "Database failed to stop.", status.stop_db_service, service_candidates, 10, disable_on_boot=True) os_cmd['stop_service'].assert_called_once_with( service_candidates, timeout=10) self.assertFalse(os_cmd['disable_service_on_boot'].called) def test_restart_db_service(self): status = BaseDbStatus() service_candidates = ['name1', 'name2'] # Test the restart flow (stop followed by start). # Assert that the auto-start setting does not get changed and the # Trove instance status updates are suppressed during restart. with patch.multiple( status, start_db_service=DEFAULT, stop_db_service=DEFAULT, begin_restart=DEFAULT, end_restart=DEFAULT): status.restart_db_service(service_candidates, 10) status.begin_restart.assert_called_once_with() status.stop_db_service.assert_called_once_with( service_candidates, 10, disable_on_boot=False, update_db=False) status.start_db_service.assert_called_once_with( service_candidates, 10, enable_on_boot=False, update_db=False) status.end_restart.assert_called_once_with() # Test a failing call. # Assert the status heartbeat gets re-enabled. with patch.multiple( status, start_db_service=Mock( side_effect=Exception("Error in database start.")), stop_db_service=DEFAULT, begin_restart=DEFAULT, end_restart=DEFAULT): self.assertRaisesRegex( RuntimeError, "Database restart failed.", status.restart_db_service, service_candidates, 10) status.begin_restart.assert_called_once_with() status.end_restart.assert_called_once_with() class MySqlAppStatusTest(trove_testtools.TestCase): def setUp(self): super(MySqlAppStatusTest, self).setUp() util.init_db() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_load_mysqld_options = \ mysql_common_service.load_mysqld_options self.orig_mysql_common_service_os_path_exists = \ mysql_common_service.os.path.exists self.orig_dbaas_time_sleep = time.sleep self.orig_time_time = time.time self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) dbaas.CONF.guest_id = self.FAKE_ID def tearDown(self): mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout mysql_common_service.load_mysqld_options = \ self.orig_load_mysqld_options mysql_common_service.os.path.exists = \ self.orig_mysql_common_service_os_path_exists time.sleep = self.orig_dbaas_time_sleep time.time = self.orig_time_time InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() dbaas.CONF.guest_id = None super(MySqlAppStatusTest, self).tearDown() def test_get_actual_db_status(self): mysql_common_service.utils.execute_with_timeout = \ Mock(return_value=("111", None)) self.mySqlAppStatus = MySqlAppStatus.get() status = self.mySqlAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status) @patch.object(utils, 'execute_with_timeout', side_effect=ProcessExecutionError()) @patch.object(os.path, 'exists', return_value=True) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_get_actual_db_status_error_crashed(self, mock_logging, mock_exists, mock_execute): mysql_common_service.load_mysqld_options = Mock(return_value={}) self.mySqlAppStatus = MySqlAppStatus.get() status = self.mySqlAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status) @patch('trove.guestagent.datastore.mysql_common.service.LOG') def test_get_actual_db_status_error_shutdown(self, *args): mocked = Mock(side_effect=ProcessExecutionError()) mysql_common_service.utils.execute_with_timeout = mocked mysql_common_service.load_mysqld_options = Mock(return_value={}) mysql_common_service.os.path.exists = Mock(return_value=False) self.mySqlAppStatus = MySqlAppStatus.get() status = self.mySqlAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status) class TestRedisApp(BaseAppTest.AppTestCase): def setUp(self): super(TestRedisApp, self).setUp(str(uuid4()), 'redis') self.orig_os_path_eu = os.path.expanduser os.path.expanduser = Mock(return_value='/tmp/.file') with patch.object(RedisApp, '_build_admin_client'): with patch.object(ImportOverrideStrategy, '_initialize_import_directory'): self.redis = RedisApp(state_change_wait_time=0) self.redis.status = FakeAppStatus( self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.orig_os_path_isfile = os.path.isfile self.orig_utils_execute_with_timeout = utils.execute_with_timeout utils.execute_with_timeout = Mock() @property def app(self): return self.redis @property def appStatus(self): return self.redis.status @property def expected_state_change_timeout(self): return self.redis.state_change_wait_time @property def expected_service_candidates(self): return RedisSystem.SERVICE_CANDIDATES def tearDown(self): os.path.isfile = self.orig_os_path_isfile os.path.expanduser = self.orig_os_path_eu utils.execute_with_timeout = self.orig_utils_execute_with_timeout super(TestRedisApp, self).tearDown() def test_install_if_needed_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=True): with patch.object(RedisApp, '_install_redis', return_value=None): self.app.install_if_needed('bar') pkg.Package.pkg_is_installed.assert_any_call('bar') self.assertEqual(0, RedisApp._install_redis.call_count) def test_install_if_needed_not_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=False): with patch.object(RedisApp, '_install_redis', return_value=None): self.app.install_if_needed('asdf') pkg.Package.pkg_is_installed.assert_any_call('asdf') RedisApp._install_redis.assert_any_call('asdf') def test_install_redis(self): with patch.object(utils, 'execute_with_timeout', return_value=('0', '')): with patch.object(pkg.Package, 'pkg_install', return_value=None): with patch.object(RedisApp, 'start_db', return_value=None): self.app._install_redis('redis') pkg.Package.pkg_install.assert_any_call('redis', {}, 1200) RedisApp.start_db.assert_any_call() self.assertTrue(utils.execute_with_timeout.called) @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test_service_cleanup(self, exec_mock): rservice.RedisAppStatus(Mock()).cleanup_stalled_db_services() exec_mock.assert_called_once_with('pkill', '-9', 'redis-server', run_as_root=True, root_helper='sudo') class CassandraDBAppTest(BaseAppTest.AppTestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def setUp(self, mock_logging, _): super(CassandraDBAppTest, self).setUp(str(uuid4()), 'cassandra') self.sleep = time.sleep self.orig_time_time = time.time self.pkg_version = cass_service.packager.pkg_version self.pkg = cass_service.packager util.init_db() self.cassandra = cass_service.CassandraApp() self.cassandra.status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.orig_unlink = os.unlink @property def app(self): return self.cassandra @property def appStatus(self): return self.cassandra.status @property def expected_state_change_timeout(self): return self.cassandra.state_change_wait_time @property def expected_service_candidates(self): return self.cassandra.service_candidates def tearDown(self): time.sleep = self.sleep time.time = self.orig_time_time cass_service.packager.pkg_version = self.pkg_version cass_service.packager = self.pkg super(CassandraDBAppTest, self).tearDown() def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) @patch.object(utils, 'execute_with_timeout') def test_service_cleanup(self, exec_mock): cass_service.CassandraAppStatus(Mock()).cleanup_stalled_db_services() exec_mock.assert_called_once_with(self.cassandra.CASSANDRA_KILL_CMD, shell=True) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_install(self, _): self.cassandra._install_db = Mock() self.pkg.pkg_is_installed = Mock(return_value=False) self.cassandra.install_if_needed(['cassandra']) self.assertTrue(self.cassandra._install_db.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') def test_install_install_error(self, _): self.cassandra.start_db = Mock() self.cassandra.stop_db = Mock() self.pkg.pkg_is_installed = Mock(return_value=False) self.cassandra._install_db = Mock( side_effect=pkg.PkgPackageStateError("Install error")) self.assertRaises(pkg.PkgPackageStateError, self.cassandra.install_if_needed, ['cassandra=1.2.10']) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class CouchbaseAppTest(BaseAppTest.AppTestCase): def fake_couchbase_service_discovery(self, candidates): return { 'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable' } def setUp(self): super(CouchbaseAppTest, self).setUp(str(uuid4()), 'couchbase') self.orig_utils_execute_with_timeout = ( couchservice.utils.execute_with_timeout) self.orig_time_sleep = time.sleep self.orig_time_time = time.time time.sleep = Mock() time.time = Mock(side_effect=faketime) self.orig_service_discovery = operating_system.service_discovery self.orig_get_ip = netutils.get_my_ipv4 operating_system.service_discovery = ( self.fake_couchbase_service_discovery) netutils.get_my_ipv4 = Mock() status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.couchbaseApp = couchservice.CouchbaseApp(status) dbaas.CONF.guest_id = self.FAKE_ID @property def app(self): return self.couchbaseApp @property def appStatus(self): return self.couchbaseApp.status @property def expected_state_change_timeout(self): return self.couchbaseApp.state_change_wait_time @property def expected_service_candidates(self): return couchservice.system.SERVICE_CANDIDATES @patch.object(utils, 'execute_with_timeout') def test_service_cleanup(self, exec_mock): couchservice.CouchbaseAppStatus().cleanup_stalled_db_services() exec_mock.assert_called_once_with(couchservice.system.cmd_kill) def tearDown(self): couchservice.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) netutils.get_my_ipv4 = self.orig_get_ip operating_system.service_discovery = self.orig_service_discovery time.sleep = self.orig_time_sleep time.time = self.orig_time_time dbaas.CONF.guest_id = None super(CouchbaseAppTest, self).tearDown() def test_install_when_couchbase_installed(self): couchservice.packager.pkg_is_installed = Mock(return_value=True) couchservice.utils.execute_with_timeout = Mock() self.couchbaseApp.install_if_needed(["package"]) self.assertTrue(couchservice.packager.pkg_is_installed.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class CouchDBAppTest(BaseAppTest.AppTestCase): def fake_couchdb_service_discovery(self, candidates): return { 'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable' } def setUp(self): super(CouchDBAppTest, self).setUp(str(uuid4()), 'couchdb') self.orig_utils_execute_with_timeout = ( couchdb_service.utils.execute_with_timeout) self.orig_time_sleep = time.sleep self.orig_time_time = time.time time.sleep = Mock() time.time = Mock(side_effect=faketime) self.orig_service_discovery = operating_system.service_discovery self.orig_get_ip = netutils.get_my_ipv4 operating_system.service_discovery = ( self.fake_couchdb_service_discovery) netutils.get_my_ipv4 = Mock() util.init_db() status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.couchdbApp = couchdb_service.CouchDBApp(status) dbaas.CONF.guest_id = self.FAKE_ID @property def app(self): return self.couchdbApp @property def appStatus(self): return self.couchdbApp.status @property def expected_state_change_timeout(self): return self.couchdbApp.state_change_wait_time @property def expected_service_candidates(self): return couchdb_service.system.SERVICE_CANDIDATES def tearDown(self): couchdb_service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) netutils.get_my_ipv4 = self.orig_get_ip operating_system.service_discovery = self.orig_service_discovery time.sleep = self.orig_time_sleep time.time = self.orig_time_time dbaas.CONF.guest_id = None super(CouchDBAppTest, self).tearDown() def test_install_when_couchdb_installed(self): couchdb_service.packager.pkg_is_installed = Mock(return_value=True) couchdb_service.utils.execute_with_timeout = Mock() self.couchdbApp.install_if_needed(["package"]) self.assertTrue(couchdb_service.packager.pkg_is_installed.called) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class MongoDBAppTest(BaseAppTest.AppTestCase): def fake_mongodb_service_discovery(self, candidates): return { 'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable' } @patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, _): super(MongoDBAppTest, self).setUp(str(uuid4()), 'mongodb') self.orig_utils_execute_with_timeout = (mongo_service. utils.execute_with_timeout) self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_packager = mongo_system.PACKAGER self.orig_service_discovery = operating_system.service_discovery self.orig_os_unlink = os.unlink self.orig_os_path_eu = os.path.expanduser os.path.expanduser = Mock(return_value='/tmp/.file') operating_system.service_discovery = ( self.fake_mongodb_service_discovery) util.init_db() self.mongoDbApp = mongo_service.MongoDBApp() self.mongoDbApp.status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() @property def app(self): return self.mongoDbApp @property def appStatus(self): return self.mongoDbApp.status @property def expected_state_change_timeout(self): return self.mongoDbApp.state_change_wait_time @property def expected_service_candidates(self): return mongo_system.MONGOD_SERVICE_CANDIDATES @patch.object(utils, 'execute_with_timeout') def test_service_cleanup(self, exec_mock): self.appStatus.cleanup_stalled_db_services() # def cleanup_stalled_db_services(self): # out, err = utils.execute_with_timeout(system.FIND_PID, shell=True) # pid = "".join(out.split(" ")[1:2]) # utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) def tearDown(self): mongo_service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) time.sleep = self.orig_time_sleep time.time = self.orig_time_time mongo_system.PACKAGER = self.orig_packager operating_system.service_discovery = self.orig_service_discovery os.unlink = self.orig_os_unlink os.path.expanduser = self.orig_os_path_eu super(MongoDBAppTest, self).tearDown() def test_start_db_with_conf_changes_db_is_running(self): self.mongoDbApp.start_db = Mock() self.mongoDbApp.status.status = rd_instance.ServiceStatuses.RUNNING self.assertRaises(RuntimeError, self.mongoDbApp.start_db_with_conf_changes, Mock()) def test_install_when_db_installed(self): packager_mock = MagicMock() packager_mock.pkg_is_installed = MagicMock(return_value=True) mongo_system.PACKAGER = packager_mock self.mongoDbApp.install_if_needed(['package']) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) def test_install_when_db_not_installed(self): packager_mock = MagicMock() packager_mock.pkg_is_installed = MagicMock(return_value=False) mongo_system.PACKAGER = packager_mock self.mongoDbApp.install_if_needed(['package']) packager_mock.pkg_install.assert_any_call(ANY, {}, ANY) self.assert_reported_status(rd_instance.ServiceStatuses.NEW) class VerticaAppStatusTest(trove_testtools.TestCase): def setUp(self): super(VerticaAppStatusTest, self).setUp() util.init_db() self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) def tearDown(self): super(VerticaAppStatusTest, self).tearDown() InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() def test_get_actual_db_status(self): self.verticaAppStatus = VerticaAppStatus() with patch.object(vertica_system, 'shell_execute', MagicMock(return_value=['db_srvr', None])): status = self.verticaAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status) def test_get_actual_db_status_shutdown(self): self.verticaAppStatus = VerticaAppStatus() with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', None], ['db_srvr', None]])): status = self.verticaAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_get_actual_db_status_error_crashed(self, *args): self.verticaAppStatus = VerticaAppStatus() with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=ProcessExecutionError('problem' ))): status = self.verticaAppStatus._get_actual_db_status() self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status) class VerticaAppTest(trove_testtools.TestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def setUp(self, *args, **kwargs): super(VerticaAppTest, self).setUp() self.FAKE_ID = 1000 self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.app = VerticaApp(self.appStatus) self.setread = VolumeDevice.set_readahead_size self.Popen = subprocess.Popen vertica_system_patcher = patch.multiple( vertica_system, shell_execute=MagicMock(return_value=('', '')), exec_vsql_command=MagicMock(return_value=('', ''))) self.addCleanup(vertica_system_patcher.stop) vertica_system_patcher.start() VolumeDevice.set_readahead_size = Mock() subprocess.Popen = Mock() self.test_config = configparser.ConfigParser() self.test_config.add_section('credentials') self.test_config.set('credentials', 'dbadmin_password', 'some_password') def tearDown(self): self.app = None VolumeDevice.set_readahead_size = self.setread subprocess.Popen = self.Popen super(VerticaAppTest, self).tearDown() def test_enable_root_is_root_not_enabled(self): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=False): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[['', ''], ['', ''], ['', '']])): self.app.enable_root('root_password') create_user_arguments = ( vertica_system.exec_vsql_command.call_args_list[0]) expected_create_user_cmd = ( vertica_system.CREATE_USER % ('root', 'root_password')) create_user_arguments.assert_called_with( 'some_password', expected_create_user_cmd) grant_role_arguments = ( vertica_system.exec_vsql_command.call_args_list[1]) expected_grant_role_cmd = ( vertica_system.GRANT_TO_USER % ('pseudosuperuser', 'root')) grant_role_arguments.assert_called_with( 'some_password', expected_grant_role_cmd) enable_user_arguments = ( vertica_system.exec_vsql_command.call_args_list[2]) expected_enable_user_cmd = ( vertica_system.ENABLE_FOR_USER % ('root', 'pseudosuperuser' )) enable_user_arguments.assert_called_with( 'some_password', expected_enable_user_cmd) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_enable_root_is_root_not_enabled_failed(self, *args): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=False): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[ ['', vertica_system.VSqlError( 'ERROR 123: Test' )]])): self.assertRaises(RuntimeError, self.app.enable_root, 'root_password') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_enable_root_is_root_enabled(self, *args): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=True): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[['', '']])): self.app.enable_root('root_password') alter_user_password_arguments = ( vertica_system.exec_vsql_command.call_args_list[0]) expected_alter_user_cmd = ( vertica_system.ALTER_USER_PASSWORD % ('root', 'root_password' )) alter_user_password_arguments.assert_called_with( 'some_password', expected_alter_user_cmd) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_enable_root_is_root_enabled_failed(self, *arg): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.app, 'is_root_enabled', return_value=True): with patch.object(vertica_system, 'exec_vsql_command', MagicMock(side_effect=[ ['', vertica_system.VSqlError( 'ERROR 123: Test' )]])): self.assertRaises(RuntimeError, self.app.enable_root, 'root_password') def test_is_root_enable(self): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', '']])): self.app.is_root_enabled() user_exists_args = ( vertica_system.shell_execute.call_args_list[0]) expected_user_exists_cmd = vertica_system.USER_EXISTS % ( 'some_password', 'root') user_exists_args.assert_called_with(expected_user_exists_cmd, 'dbadmin') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_is_root_enable_failed(self, *args): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[ ['', ProcessExecutionError]])): self.assertRaises(RuntimeError, self.app.is_root_enabled) def test_install_if_needed_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=True): with patch.object(pkg.Package, 'pkg_install', return_value=None): self.app.install_if_needed('vertica') pkg.Package.pkg_is_installed.assert_any_call('vertica') self.assertEqual(0, pkg.Package.pkg_install.call_count) def test_install_if_needed_not_installed(self): with patch.object(pkg.Package, 'pkg_is_installed', return_value=False): with patch.object(pkg.Package, 'pkg_install', return_value=None): self.app.install_if_needed('vertica') pkg.Package.pkg_is_installed.assert_any_call('vertica') self.assertEqual(1, pkg.Package.pkg_install.call_count) def test_prepare_for_install_vertica(self): self.app.prepare_for_install_vertica() arguments = vertica_system.shell_execute.call_args_list[0] self.assertEqual(1, VolumeDevice.set_readahead_size.call_count) expected_command = ( "VERT_DBA_USR=dbadmin VERT_DBA_HOME=/home/dbadmin " "VERT_DBA_GRP=verticadba /opt/vertica/oss/python/bin/python" " -m vertica.local_coerce") arguments.assert_called_with(expected_command) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure_prepare_for_install_vertica(self, *args): with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('Error')): self.assertRaises(ProcessExecutionError, self.app.prepare_for_install_vertica) def test_install_vertica(self): with patch.object(self.app, 'write_config', return_value=None): self.app.install_vertica(members='10.0.0.2') arguments = vertica_system.shell_execute.call_args_list[0] expected_command = ( vertica_system.INSTALL_VERTICA % ('10.0.0.2', '/var/lib/vertica')) arguments.assert_called_with(expected_command) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure_install_vertica(self, *args): with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('some exception')): self.assertRaisesRegex(RuntimeError, 'install_vertica failed.', self.app.install_vertica, members='10.0.0.2') def test_create_db(self): with patch.object(self.app, 'read_config', return_value=self.test_config): self.app.create_db(members='10.0.0.2') arguments = vertica_system.shell_execute.call_args_list[0] expected_command = (vertica_system.CREATE_DB % ('10.0.0.2', 'db_srvr', '/var/lib/vertica', '/var/lib/vertica', 'some_password')) arguments.assert_called_with(expected_command, 'dbadmin') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure_create_db(self, *args): with patch.object(self.app, 'read_config', side_effect=RuntimeError('Error')): self.assertRaisesRegex(RuntimeError, 'Vertica database create failed.', self.app.create_db) # Because of an exception in read_config there was no shell execution. self.assertEqual(0, vertica_system.shell_execute.call_count) def test_vertica_write_config(self): temp_file_handle = tempfile.NamedTemporaryFile("w", delete=False) mock_mkstemp = MagicMock(return_value=(temp_file_handle)) mock_unlink = Mock(return_value=0) self.app.write_config(config=self.test_config, temp_function=mock_mkstemp, unlink_function=mock_unlink) arguments = vertica_system.shell_execute.call_args_list[0] expected_command = ( ("install -o root -g root -m 644 %(source)s %(target)s" ) % {'source': temp_file_handle.name, 'target': vertica_system.VERTICA_CONF}) arguments.assert_called_with(expected_command) self.assertEqual(1, mock_mkstemp.call_count) configuration_data = configparser.ConfigParser() configuration_data.read(temp_file_handle.name) self.assertEqual( self.test_config.get('credentials', 'dbadmin_password'), configuration_data.get('credentials', 'dbadmin_password')) self.assertEqual(1, mock_unlink.call_count) # delete the temporary_config_file os.unlink(temp_file_handle.name) def test_vertica_error_in_write_config_verify_unlink(self): mock_unlink = Mock(return_value=0) temp_file_handle = tempfile.NamedTemporaryFile("w", delete=False) mock_mkstemp = MagicMock(return_value=temp_file_handle) with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('some exception')): self.assertRaises(ProcessExecutionError, self.app.write_config, config=self.test_config, temp_function=mock_mkstemp, unlink_function=mock_unlink) self.assertEqual(1, mock_unlink.call_count) # delete the temporary_config_file os.unlink(temp_file_handle.name) @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def test_restart(self, *args, **kwargs): mock_status = MagicMock() app = VerticaApp(mock_status) mock_status.begin_restart = MagicMock(return_value=None) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) with patch.object(VerticaApp, 'stop_db', return_value=None): with patch.object(VerticaApp, 'start_db', return_value=None): mock_status.end_restart = MagicMock( return_value=None) app.restart() mock_status.begin_restart.assert_any_call() VerticaApp.stop_db.assert_any_call() VerticaApp.start_db.assert_any_call() @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) def test_start_db(self, *args, **kwargs): mock_status = MagicMock() type(mock_status)._is_restarting = PropertyMock(return_value=False) app = VerticaApp(mock_status) with patch.object(app, '_enable_db_on_boot', return_value=None): with patch.object(app, 'read_config', return_value=self.test_config): mock_status.end_restart = MagicMock( return_value=None) app.start_db() agent_start, db_start = subprocess.Popen.call_args_list agent_expected_command = [ 'sudo', 'su', '-', 'root', '-c', (vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'start')] db_expected_cmd = [ 'sudo', 'su', '-', 'dbadmin', '-c', (vertica_system.START_DB % ('db_srvr', 'some_password'))] self.assertTrue(mock_status.end_restart.called) agent_start.assert_called_with(agent_expected_command) db_start.assert_called_with(db_expected_cmd) def test_start_db_failure(self): with patch.object(self.app, '_enable_db_on_boot', side_effect=RuntimeError()): with patch.object(self.app, 'read_config', return_value=self.test_config): self.assertRaises(RuntimeError, self.app.start_db) def test_stop_db(self): type(self.appStatus)._is_restarting = PropertyMock(return_value=False) with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', ''], ['db_srvr', None], ['', '']])): self.appStatus.wait_for_real_status_to_change_to = \ MagicMock(return_value=True) self.appStatus.end_restart = MagicMock( return_value=None) self.app.stop_db() self.assertEqual( 3, vertica_system.shell_execute.call_count) # There are 3 shell-executions: # a) stop vertica-agent service # b) check daatabase status # c) stop_db # We are matcing that 3rd command called was stop_db arguments = vertica_system.shell_execute.call_args_list[2] expected_cmd = (vertica_system.STOP_DB % ('db_srvr', 'some_password')) self.assertTrue(self.appStatus. wait_for_real_status_to_change_to.called) arguments.assert_called_with(expected_cmd, 'dbadmin') def test_stop_db_do_not_start_on_reboot(self): type(self.appStatus)._is_restarting = PropertyMock(return_value=True) with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', ''], ['db_srvr', None], ['', '']])): self.app.stop_db(do_not_start_on_reboot=True) self.assertEqual( 3, vertica_system.shell_execute.call_count) self.app._disable_db_on_boot.assert_any_call() def test_stop_db_database_not_running(self): with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): self.app.stop_db() # Since database stop command does not gets executed, # so only 2 shell calls were there. self.assertEqual( 2, vertica_system.shell_execute.call_count) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_stop_db_failure(self, *args): type(self.appStatus)._is_restarting = PropertyMock(return_value=False) with patch.object(self.app, '_disable_db_on_boot', return_value=None): with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(vertica_system, 'shell_execute', MagicMock(side_effect=[['', ''], ['db_srvr', None], ['', '']])): self.appStatus.wait_for_real_status_to_change_to = \ MagicMock(return_value=None) self.appStatus.end_restart = MagicMock( return_value=None) self.assertRaises(RuntimeError, self.app.stop_db) def test_export_conf_to_members(self): self.app._export_conf_to_members(members=['member1', 'member2']) self.assertEqual(2, vertica_system.shell_execute.call_count) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail__export_conf_to_members(self, *args): # app = VerticaApp(MagicMock()) with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('Error')): self.assertRaises(ProcessExecutionError, self.app._export_conf_to_members, ['member1', 'member2']) def test_authorize_public_keys(self): user = 'test_user' keys = ['test_key@machine1', 'test_key@machine2'] with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): self.app.authorize_public_keys(user=user, public_keys=keys) self.assertEqual(2, vertica_system.shell_execute.call_count) vertica_system.shell_execute.assert_any_call( 'cat ' + '/home/' + user + '/.ssh/authorized_keys') def test_authorize_public_keys_authorized_file_not_exists(self): user = 'test_user' keys = ['test_key@machine1', 'test_key@machine2'] with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ['', '']])): self.app.authorize_public_keys(user=user, public_keys=keys) self.assertEqual(2, vertica_system.shell_execute.call_count) vertica_system.shell_execute.assert_any_call( 'cat ' + '/home/' + user + '/.ssh/authorized_keys') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail_authorize_public_keys(self, *args): user = 'test_user' keys = ['test_key@machine1', 'test_key@machine2'] with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ProcessExecutionError('Some Error') ])): self.assertRaises(ProcessExecutionError, self.app.authorize_public_keys, user, keys) def test_get_public_keys(self): user = 'test_user' with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): self.app.get_public_keys(user=user) self.assertEqual(2, vertica_system.shell_execute.call_count) vertica_system.shell_execute.assert_any_call( (vertica_system.SSH_KEY_GEN % ('/home/' + user)), user) vertica_system.shell_execute.assert_any_call( 'cat ' + '/home/' + user + '/.ssh/id_rsa.pub') def test_get_public_keys_if_key_exists(self): user = 'test_user' with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ['some_key', None]])): key = self.app.get_public_keys(user=user) self.assertEqual(2, vertica_system.shell_execute.call_count) self.assertEqual('some_key', key) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail_get_public_keys(self, *args): user = 'test_user' with patch.object(os.path, 'expanduser', return_value=('/home/' + user)): with patch.object( vertica_system, 'shell_execute', MagicMock(side_effect=[ProcessExecutionError('Some Error'), ProcessExecutionError('Some Error') ])): self.assertRaises(ProcessExecutionError, self.app.get_public_keys, user) def test_install_cluster(self): with patch.object(self.app, 'read_config', return_value=self.test_config): self.app.install_cluster(members=['member1', 'member2']) # Verifying the number of shell calls, # as command has already been tested in preceding tests self.assertEqual(5, vertica_system.shell_execute.call_count) def test__enable_db_on_boot(self): self.app._enable_db_on_boot() restart_policy, agent_enable = subprocess.Popen.call_args_list expected_restart_policy = [ 'sudo', 'su', '-', 'dbadmin', '-c', (vertica_system.SET_RESTART_POLICY % ('db_srvr', 'always'))] expected_agent_enable = [ 'sudo', 'su', '-', 'root', '-c', (vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'enable')] self.assertEqual(2, subprocess.Popen.call_count) restart_policy.assert_called_with(expected_restart_policy) agent_enable.assert_called_with(expected_agent_enable) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure__enable_db_on_boot(self, *args): with patch.object(subprocess, 'Popen', side_effect=OSError): self.assertRaisesRegex(RuntimeError, 'Could not enable database on boot.', self.app._enable_db_on_boot) def test__disable_db_on_boot(self): self.app._disable_db_on_boot() restart_policy, agent_disable = ( vertica_system.shell_execute.call_args_list) expected_restart_policy = ( vertica_system.SET_RESTART_POLICY % ('db_srvr', 'never')) expected_agent_disable = ( vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'disable') self.assertEqual(2, vertica_system.shell_execute.call_count) restart_policy.assert_called_with(expected_restart_policy, 'dbadmin') agent_disable.assert_called_with(expected_agent_disable, 'root') @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_failure__disable_db_on_boot(self, *args): with patch.object(vertica_system, 'shell_execute', side_effect=ProcessExecutionError('Error')): self.assertRaisesRegex(RuntimeError, 'Could not disable database on boot.', self.app._disable_db_on_boot) def test_read_config(self): with patch.object(configparser, 'ConfigParser', return_value=self.test_config): test_config = self.app.read_config() self.assertEqual('some_password', test_config.get('credentials', 'dbadmin_password') ) @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') def test_fail_read_config(self, *args): with patch.object(configparser.ConfigParser, 'read', side_effect=configparser.Error()): self.assertRaises(RuntimeError, self.app.read_config) @patch.object(ConfigurationManager, 'save_configuration') def test_start_db_with_conf_changes(self, save_cfg): type(self.appStatus)._is_restarting = PropertyMock(return_value=False) type(self.appStatus).is_running = PropertyMock(return_value=False) with patch.object(self.app, 'read_config', return_value=self.test_config): with patch.object(self.appStatus, 'end_restart') as end_restart: config = 'tst_cfg_contents' self.app.start_db_with_conf_changes(config) save_cfg.assert_called_once_with(config) end_restart.assert_any_call() class DB2AppTest(trove_testtools.TestCase): @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2service, 'run_command') @patch.object(db2service.DB2App, 'process_default_dbm_config') def setUp(self, *args, **kwargs): super(DB2AppTest, self).setUp() self.orig_utils_execute_with_timeout = ( db2service.utils.execute_with_timeout) util.init_db() self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.db2App = db2service.DB2App(self.appStatus) self.db2App.init_config() dbaas.CONF.guest_id = self.FAKE_ID def tearDown(self): db2service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() dbaas.CONF.guest_id = None self.db2App = None super(DB2AppTest, self).tearDown() def assert_reported_status(self, expected_status): service_status = InstanceServiceStatus.find_by( instance_id=self.FAKE_ID) self.assertEqual(expected_status, service_status.status) def test_stop_db(self): db2service.utils.execute_with_timeout = MagicMock(return_value=None) self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN) self.db2App.stop_db() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) @patch.object(db2service, 'run_command') @patch.object(db2service.DB2App, 'process_default_dbm_config') def test_restart_server(self, *args, **kwargs): self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) mock_status = MagicMock(return_value=None) app = db2service.DB2App(mock_status) mock_status.begin_restart = MagicMock(return_value=None) app.stop_db = MagicMock(return_value=None) app.start_db = MagicMock(return_value=None) with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: patch_pc.__get__ = Mock(return_value=True) app.restart() self.assertTrue(mock_status.begin_restart.called) self.assertTrue(app.stop_db.called) self.assertTrue(app.start_db.called) def test_start_db(self): db2service.utils.execute_with_timeout = MagicMock(return_value=None) self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) with patch.object(self.db2App, '_enable_db_on_boot', return_value=None): self.db2App.start_db() self.assert_reported_status(rd_instance.ServiceStatuses.NEW) @patch.object(ConfigurationManager, 'save_configuration') @patch.object(db2service.DB2App, 'start_db') def test_start_db_with_conf_changes(self, start_db, save_cfg): config = {'DIAGSIZE': '10'} self.db2App.start_db_with_conf_changes(config) start_db.assert_called_once_with(True) save_cfg.assert_called_once_with(config) @patch.object(ConfigurationManager, 'apply_user_override') @patch.object(db2service.DB2App, '_apply_config') def test_update_overrides(self, apply_user_override, apply_config): overrides = {'DIAGSIZE': 50} context = MagicMock() self.db2App.update_overrides(context, overrides) apply_user_override.assert_called_once_with(overrides) apply_config.assert_called_once_with(overrides) @patch.object(ConfigurationManager, 'get_user_override') @patch.object(ConfigurationManager, 'remove_user_override') @patch.object(db2service.DB2App, '_reset_config') def test_remove_overrides(self, reset_config, remove_user_override, get_user_override): overrides = {'DIAGSIZE': 50} get_user_override.return_value = overrides self.db2App.remove_overrides() get_user_override.assert_called_once() reset_config.assert_called_once_with(overrides) remove_user_override.assert_called_once() class DB2AdminTest(trove_testtools.TestCase): def setUp(self): super(DB2AdminTest, self).setUp() self.db2Admin = db2service.DB2Admin() self.orig_utils_execute_with_timeout = ( db2service.utils.execute_with_timeout) def tearDown(self): db2service.utils.execute_with_timeout = ( self.orig_utils_execute_with_timeout) super(DB2AdminTest, self).tearDown() @patch('trove.guestagent.datastore.experimental.db2.service.LOG') def test_delete_database(self, *args): with patch.object( db2service, 'run_command', MagicMock( return_value=None, side_effect=ProcessExecutionError('Error'))): self.assertRaises(GuestError, self.db2Admin.delete_database, FAKE_DB) self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 drop database testDB" self.assertEqual(expected, args[0], "Delete database queries are not the same") @patch('trove.guestagent.datastore.experimental.db2.service.LOG') def test_list_databases(self, *args): with patch.object(db2service, 'run_command', MagicMock( side_effect=ProcessExecutionError('Error'))): self.db2Admin.list_databases() self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 list database directory " \ "| grep -B6 -i indirect | grep 'Database name' | " \ "sed 's/.*= //'" self.assertEqual(expected, args[0], "Delete database queries are not the same") def test_create_users(self): with patch.object(db2service, 'run_command', MagicMock( return_value=None)): db2service.utils.execute_with_timeout = MagicMock( return_value=None) self.db2Admin.create_user(FAKE_USER) self.assertTrue(db2service.utils.execute_with_timeout.called) self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 connect to testDB; " \ "db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \ "ON DATABASE TO USER random; db2 connect reset" self.assertEqual( expected, args[0], "Granting database access queries are not the same") self.assertEqual(1, db2service.run_command.call_count) def test_delete_users_with_db(self): with patch.object(db2service, 'run_command', MagicMock(return_value=None)): with patch.object(db2service.DB2Admin, 'list_access', MagicMock(return_value=None)): utils.execute_with_timeout = MagicMock(return_value=None) self.db2Admin.delete_user(FAKE_USER[0]) self.assertTrue(db2service.run_command.called) self.assertTrue(db2service.utils.execute_with_timeout.called) self.assertFalse(db2service.DB2Admin.list_access.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 connect to testDB; " \ "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \ "ON DATABASE FROM USER random; db2 connect reset" self.assertEqual( expected, args[0], "Revoke database access queries are not the same") self.assertEqual(1, db2service.run_command.call_count) def test_delete_users_without_db(self): FAKE_USER.append( {"_name": "random2", "_password": "guesswhat", "_host": '%', "_databases": []}) with patch.object(db2service, 'run_command', MagicMock(return_value=None)): with patch.object(db2service.DB2Admin, 'list_access', MagicMock(return_value=[FAKE_DB])): utils.execute_with_timeout = MagicMock(return_value=None) self.db2Admin.delete_user(FAKE_USER[1]) self.assertTrue(db2service.run_command.called) self.assertTrue(db2service.DB2Admin.list_access.called) self.assertTrue( db2service.utils.execute_with_timeout.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 connect to testDB; " \ "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \ "DATAACCESS ON DATABASE FROM USER random2; " \ "db2 connect reset" self.assertEqual( expected, args[0], "Revoke database access queries are not the same") self.assertEqual(1, db2service.run_command.call_count) FAKE_USER.pop() def test_list_users(self): databases = [] databases.append(FAKE_DB) with patch.object(db2service, 'run_command', MagicMock( side_effect=ProcessExecutionError('Error'))): with patch.object(self.db2Admin, "list_databases", MagicMock(return_value=(databases, None))): self.db2Admin.list_users() self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 +o connect to testDB; " \ "db2 -x select grantee, dataaccessauth " \ "from sysibm.sysdbauth; db2 connect reset" self.assertEqual(expected, args[0], "List database queries are not the same") def test_get_user(self): databases = [] databases.append(FAKE_DB) with patch.object(db2service, 'run_command', MagicMock( side_effect=ProcessExecutionError('Error'))): with patch.object(self.db2Admin, "list_databases", MagicMock(return_value=(databases, None))): self.db2Admin._get_user('random', None) self.assertTrue(db2service.run_command.called) args, _ = db2service.run_command.call_args_list[0] expected = "db2 +o connect to testDB; " \ "db2 -x select grantee, dataaccessauth " \ "from sysibm.sysdbauth; db2 connect reset" self.assertEqual(args[0], expected, "Delete database queries are not the same") class PXCAppTest(trove_testtools.TestCase): def setUp(self): super(PXCAppTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_unlink = os.unlink self.orig_get_auth_password = \ mysql_common_service.BaseMySqlApp.get_auth_password self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.PXCApp = pxc_service.PXCApp(self.appStatus) mysql_service = patch.object( pxc_service.PXCApp, 'mysql_service', PropertyMock(return_value={ 'cmd_start': Mock(), 'cmd_stop': Mock(), 'cmd_enable': Mock(), 'cmd_disable': Mock(), 'cmd_bootstrap_galera_cluster': Mock(), 'bin': Mock() })) mysql_service.start() self.addCleanup(mysql_service.stop) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() mysql_common_service.BaseMySqlApp.get_auth_password = Mock() self.mock_client = Mock() self.mock_execute = Mock() self.mock_client.__enter__ = Mock() self.mock_client.__exit__ = Mock() self.mock_client.__enter__.return_value.execute = self.mock_execute self.orig_configuration_manager = \ mysql_common_service.BaseMySqlApp.configuration_manager mysql_common_service.BaseMySqlApp.configuration_manager = Mock() self.orig_create_engine = sqlalchemy.create_engine def tearDown(self): self.PXCApp = None mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout time.sleep = self.orig_time_sleep time.time = self.orig_time_time os.unlink = self.orig_unlink mysql_common_service.BaseMySqlApp.get_auth_password = \ self.orig_get_auth_password InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() mysql_common_service.BaseMySqlApp.configuration_manager = \ self.orig_configuration_manager sqlalchemy.create_engine = self.orig_create_engine super(PXCAppTest, self).tearDown() @patch.object(pxc_service.PXCApp, 'get_engine', return_value=MagicMock(name='get_engine')) def test__grant_cluster_replication_privilege(self, mock_engine): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } with patch.object(pxc_service.PXCApp, 'local_sql_client', return_value=self.mock_client): self.PXCApp._grant_cluster_replication_privilege(repl_user) args, _ = self.mock_execute.call_args_list[0] expected = ("GRANT LOCK TABLES, RELOAD, REPLICATION CLIENT ON *.* " "TO `test-user`@`%` IDENTIFIED BY 'test-user-password';") self.assertEqual(expected, args[0].text, "Sql statements are not the same") @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__bootstrap_cluster(self, mock_execute): pxc_service_cmds = self.PXCApp.mysql_service self.PXCApp._bootstrap_cluster(timeout=20) self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with( pxc_service_cmds['cmd_bootstrap_galera_cluster'], shell=True, timeout=20) def test_install_cluster(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.PXCApp.configuration_manager.apply_system_override = apply_mock self.PXCApp.stop_db = Mock() self.PXCApp._grant_cluster_replication_privilege = Mock() self.PXCApp.wipe_ib_logfiles = Mock() self.PXCApp.start_mysql = Mock() self.PXCApp.install_cluster(repl_user, "something") self.assertEqual(1, self.PXCApp.stop_db.call_count) self.assertEqual( 1, self.PXCApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count) self.assertEqual(1, self.PXCApp.start_mysql.call_count) def test_install_cluster_with_bootstrap(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.PXCApp.configuration_manager.apply_system_override = apply_mock self.PXCApp.stop_db = Mock() self.PXCApp._grant_cluster_replication_privilege = Mock() self.PXCApp.wipe_ib_logfiles = Mock() self.PXCApp._bootstrap_cluster = Mock() self.PXCApp.install_cluster(repl_user, "something", bootstrap=True) self.assertEqual(1, self.PXCApp.stop_db.call_count) self.assertEqual( 1, self.PXCApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.PXCApp._bootstrap_cluster.call_count) class MariaDBAppTest(trove_testtools.TestCase): def setUp(self): super(MariaDBAppTest, self).setUp() self.orig_utils_execute_with_timeout = \ mysql_common_service.utils.execute_with_timeout self.orig_time_sleep = time.sleep self.orig_time_time = time.time self.orig_unlink = os.unlink self.orig_get_auth_password = \ mysql_common_service.BaseMySqlApp.get_auth_password self.FAKE_ID = str(uuid4()) InstanceServiceStatus.create(instance_id=self.FAKE_ID, status=rd_instance.ServiceStatuses.NEW) self.appStatus = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) self.MariaDBApp = mariadb_service.MariaDBApp(self.appStatus) mysql_service = patch.object( mariadb_service.MariaDBApp, 'mysql_service', PropertyMock(return_value={ 'cmd_start': Mock(), 'cmd_stop': Mock(), 'cmd_enable': Mock(), 'cmd_disable': Mock(), 'cmd_bootstrap_galera_cluster': Mock(), 'bin': Mock() })) mysql_service.start() self.addCleanup(mysql_service.stop) time.sleep = Mock() time.time = Mock(side_effect=faketime) os.unlink = Mock() mysql_common_service.BaseMySqlApp.get_auth_password = Mock() self.mock_client = Mock() self.mock_execute = Mock() self.mock_client.__enter__ = Mock() self.mock_client.__exit__ = Mock() self.mock_client.__enter__.return_value.execute = self.mock_execute self.orig_configuration_manager = \ mysql_common_service.BaseMySqlApp.configuration_manager mysql_common_service.BaseMySqlApp.configuration_manager = Mock() self.orig_create_engine = sqlalchemy.create_engine def tearDown(self): self.MariaDBApp = None mysql_common_service.utils.execute_with_timeout = \ self.orig_utils_execute_with_timeout time.sleep = self.orig_time_sleep time.time = self.orig_time_time os.unlink = self.orig_unlink mysql_common_service.BaseMySqlApp.get_auth_password = \ self.orig_get_auth_password InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() mysql_common_service.BaseMySqlApp.configuration_manager = \ self.orig_configuration_manager sqlalchemy.create_engine = self.orig_create_engine super(MariaDBAppTest, self).tearDown() @patch.object(mariadb_service.MariaDBApp, 'get_engine', return_value=MagicMock(name='get_engine')) def test__grant_cluster_replication_privilege(self, mock_engine): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } with patch.object(mariadb_service.MariaDBApp, 'local_sql_client', return_value=self.mock_client): self.MariaDBApp._grant_cluster_replication_privilege(repl_user) args, _ = self.mock_execute.call_args_list[0] expected = ("GRANT LOCK TABLES, RELOAD, REPLICATION CLIENT ON *.* " "TO `test-user`@`%` IDENTIFIED BY 'test-user-password';") self.assertEqual(expected, args[0].text, "Sql statements are not the same") @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) def test__bootstrap_cluster(self, mock_execute): mariadb_service_cmds = self.MariaDBApp.mysql_service self.MariaDBApp._bootstrap_cluster(timeout=20) self.assertEqual(1, mock_execute.call_count) mock_execute.assert_called_with( mariadb_service_cmds['cmd_bootstrap_galera_cluster'], shell=True, timeout=20) def test_install_cluster(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.MariaDBApp.configuration_manager.apply_system_override = \ apply_mock self.MariaDBApp.stop_db = Mock() self.MariaDBApp._grant_cluster_replication_privilege = Mock() self.MariaDBApp.wipe_ib_logfiles = Mock() self.MariaDBApp.start_mysql = Mock() self.MariaDBApp.install_cluster(repl_user, "something") self.assertEqual(1, self.MariaDBApp.stop_db.call_count) self.assertEqual( 1, self.MariaDBApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.MariaDBApp.wipe_ib_logfiles.call_count) self.assertEqual(1, self.MariaDBApp.start_mysql.call_count) def test_install_cluster_with_bootstrap(self): repl_user = { 'name': 'test-user', 'password': 'test-user-password', } apply_mock = Mock() self.MariaDBApp.configuration_manager.apply_system_override = \ apply_mock self.MariaDBApp.stop_db = Mock() self.MariaDBApp._grant_cluster_replication_privilege = Mock() self.MariaDBApp.wipe_ib_logfiles = Mock() self.MariaDBApp._bootstrap_cluster = Mock() self.MariaDBApp.install_cluster(repl_user, "something", bootstrap=True) self.assertEqual(1, self.MariaDBApp.stop_db.call_count) self.assertEqual( 1, self.MariaDBApp._grant_cluster_replication_privilege.call_count) self.assertEqual(1, self.MariaDBApp.wipe_ib_logfiles.call_count) self.assertEqual(1, apply_mock.call_count) self.assertEqual(1, self.MariaDBApp._bootstrap_cluster.call_count) class PostgresAppTest(BaseAppTest.AppTestCase): @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) @patch.object(pg_service.PgSqlApp, '_find_config_file', return_value='') @patch.object(pg_service.PgSqlApp, 'pgsql_extra_bin_dir', PropertyMock(return_value='')) def setUp(self, mock_cfg, mock_exec): super(PostgresAppTest, self).setUp(str(uuid4()), 'postgresql') self.orig_time_sleep = time.sleep self.orig_time_time = time.time time.sleep = Mock() time.time = Mock(side_effect=faketime) self.postgres = pg_service.PgSqlApp() self.postgres.status = FakeAppStatus(self.FAKE_ID, rd_instance.ServiceStatuses.NEW) @property def app(self): return self.postgres @property def appStatus(self): return self.postgres.status @property def expected_state_change_timeout(self): return CONF.state_change_wait_time @property def expected_service_candidates(self): return self.postgres.service_candidates def tearDown(self): time.sleep = self.orig_time_sleep time.time = self.orig_time_time super(PostgresAppTest, self).tearDown() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_galera_cluster_api.py0000644000175000017500000001336400000000000030125 0ustar00coreycorey00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import Timeout import mock import trove.common.context as context from trove.common import exception from trove.common.rpc.version import RPC_API_VERSION from trove.common.strategies.cluster.experimental.galera_common.guestagent \ import GaleraCommonGuestAgentStrategy from trove import rpc from trove.tests.unittests import trove_testtools def _mock_call(cmd, timeout, version=None, user=None, public_keys=None, members=None): # To check get_public_keys, authorize_public_keys, # install_cluster, cluster_complete in cmd. if cmd in ('get_public_keys', 'authorize_public_keys', 'install_cluster', 'cluster_complete'): return True else: raise BaseException("Test Failed") class ApiTest(trove_testtools.TestCase): @mock.patch.object(rpc, 'get_client') @mock.patch('trove.instance.models.get_instance_encryption_key', return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08') def setUp(self, mock_get_encryption_key, *args): super(ApiTest, self).setUp() cluster_guest_api = (GaleraCommonGuestAgentStrategy() .guest_client_class) self.context = context.TroveContext() self.guest = cluster_guest_api(self.context, 0) self.guest._call = _mock_call self.api = cluster_guest_api(self.context, "instance-id-x23d2d") self._mock_rpc_client() mock_get_encryption_key.assert_called() def test_get_routing_key(self): self.assertEqual('guestagent.instance-id-x23d2d', self.api._get_routing_key()) @mock.patch('trove.guestagent.api.LOG') def test_api_cast_exception(self, mock_logging): self.call_context.cast.side_effect = IOError('host down') self.assertRaises(exception.GuestError, self.api.create_user, 'test_user') @mock.patch('trove.guestagent.api.LOG') def test_api_call_exception(self, mock_logging): self.call_context.call.side_effect = IOError('host_down') self.assertRaises(exception.GuestError, self.api.list_users) def test_api_call_timeout(self): self.call_context.call.side_effect = Timeout() self.assertRaises(exception.GuestTimeout, self.api.restart) def _verify_rpc_prepare_before_call(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION, timeout=mock.ANY) def _verify_rpc_prepare_before_cast(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION) def _verify_cast(self, *args, **kwargs): self.call_context.cast.assert_called_once_with(self.context, *args, **kwargs) def _verify_call(self, *args, **kwargs): self.call_context.call.assert_called_once_with(self.context, *args, **kwargs) def _mock_rpc_client(self): self.call_context = mock.Mock() self.api.client.prepare = mock.Mock(return_value=self.call_context) self.call_context.call = mock.Mock() self.call_context.cast = mock.Mock() def test_install_cluster(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.install_cluster( replication_user="repuser", cluster_configuration="cluster-configuration", bootstrap=False) self._verify_rpc_prepare_before_call() self._verify_call('install_cluster', replication_user="repuser", cluster_configuration="cluster-configuration", bootstrap=False) self.assertEqual(exp_resp, resp) def test_reset_admin_password(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.reset_admin_password( admin_password="admin_password") self._verify_rpc_prepare_before_call() self._verify_call('reset_admin_password', admin_password="admin_password") self.assertEqual(exp_resp, resp) def test_cluster_complete(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.cluster_complete() self._verify_rpc_prepare_before_call() self._verify_call('cluster_complete') self.assertEqual(exp_resp, resp) def test_get_cluster_context(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.get_cluster_context() self._verify_rpc_prepare_before_call() self._verify_call('get_cluster_context') self.assertEqual(exp_resp, resp) def test_write_cluster_configuration_overrides(self): exp_resp = None self.call_context.call.return_value = exp_resp resp = self.api.write_cluster_configuration_overrides( cluster_configuration="cluster-configuration") self._verify_rpc_prepare_before_call() self._verify_call('write_cluster_configuration_overrides', cluster_configuration="cluster-configuration",) self.assertEqual(exp_resp, resp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_galera_manager.py0000644000175000017500000001207600000000000027224 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import patch from trove.common.context import TroveContext from trove.guestagent.datastore.galera_common import manager as galera_manager from trove.guestagent.datastore.galera_common import service as galera_service from trove.guestagent.datastore.mysql_common import service as mysql_service from trove.tests.unittests import trove_testtools class GaleraTestApp(galera_service.GaleraApp): def __init__(self, status): super(GaleraTestApp, self).__init__( status, mysql_service.BaseLocalSqlClient, mysql_service.BaseKeepAliveConnection) @property def cluster_configuration(self): return self.configuration_manager.get_value('mysqld') class GaleraTestRootAccess(mysql_service.BaseMySqlRootAccess): def __init__(self): super(GaleraTestRootAccess, self).__init__( mysql_service.BaseLocalSqlClient, GaleraTestApp(mysql_service.BaseMySqlAppStatus.get())) class GaleraTestAdmin(mysql_service.BaseMySqlAdmin): def __init__(self): super(GaleraTestAdmin, self).__init__( mysql_service.BaseLocalSqlClient, GaleraTestRootAccess(), GaleraTestApp) class GuestAgentManagerTest(trove_testtools.TestCase): def setUp(self): super(GuestAgentManagerTest, self).setUp() self.manager = galera_manager.GaleraManager( GaleraTestApp, mysql_service.BaseMySqlAppStatus, GaleraTestAdmin) self.context = TroveContext() patcher_rs = patch( 'trove.guestagent.strategies.replication.get_instance') patcher_rs.start() self.addCleanup(patcher_rs.stop) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'install_cluster', new_callable=MagicMock) def test_install_cluster(self, install_cluster, app_status_get): install_cluster.return_value = MagicMock() app_status_get.return_value = None replication_user = "repuser" configuration = "configuration" bootstrap = True self.manager.install_cluster(self.context, replication_user, configuration, bootstrap) app_status_get.assert_any_call() install_cluster.assert_called_with( replication_user, configuration, bootstrap) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'reset_admin_password', new_callable=MagicMock) def test_reset_admin_password(self, reset_admin_password, app_status_get): reset_admin_password.return_value = None app_status_get.return_value = MagicMock() admin_password = "password" self.manager.reset_admin_password(self.context, admin_password) app_status_get.assert_any_call() reset_admin_password.assert_called_with(admin_password) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'get_cluster_context') def test_get_cluster_context(self, get_cluster_ctxt, app_status_get): get_cluster_ctxt.return_value = {'cluster': 'info'} self.manager.get_cluster_context(self.context) app_status_get.assert_any_call() get_cluster_ctxt.assert_any_call() @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(galera_service.GaleraApp, 'write_cluster_configuration_overrides') def test_write_cluster_configuration_overrides(self, conf_overries, app_status_get): cluster_configuration = "cluster_configuration" self.manager.write_cluster_configuration_overrides( self.context, cluster_configuration) app_status_get.assert_any_call() conf_overries.assert_called_with(cluster_configuration) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(mysql_service.BaseMySqlAdmin, 'enable_root') def test_enable_root_with_password(self, reset_admin_pwd, app_status_get): admin_password = "password" self.manager.enable_root_with_password(self.context, admin_password) reset_admin_pwd.assert_called_with(admin_password) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_guestagent_utils.py0000644000175000017500000001703300000000000027663 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from mock import patch from trove.common import pagination from trove.guestagent.common import guestagent_utils from trove.tests.unittests import trove_testtools class TestGuestagentUtils(trove_testtools.TestCase): def test_update_dict(self): data = [{ 'dict': {}, 'update': {}, 'expected': {}, }, { 'dict': None, 'update': {}, 'expected': {}, }, { 'dict': {}, 'update': None, 'expected': {}, }, { 'dict': {}, 'update': None, 'expected': {}, }, { 'dict': None, 'update': {'name': 'Tom'}, 'expected': {'name': 'Tom'}, }, { 'dict': {}, 'update': {'name': 'Tom'}, 'expected': {'name': 'Tom'}, }, { 'dict': {'name': 'Tom'}, 'update': {}, 'expected': {'name': 'Tom'}, }, { 'dict': {'key1': 'value1', 'dict1': {'key1': 'value1', 'key2': 'value2'}}, 'update': {'key1': 'value1+', 'key2': 'value2', 'dict1': {'key3': 'value3'}}, 'expected': {'key1': 'value1+', 'key2': 'value2', 'dict1': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}}, }, { 'dict': {'d1': {'d2': {'d3': {'k1': 'v1'}}}}, 'update': {'d1': {'d2': {'d3': {'k2': 'v2'}}}}, 'expected': {'d1': {'d2': {'d3': {'k1': 'v1', 'k2': 'v2'}}}}, }, { 'dict': {'timeout': 0, 'save': [[900, 1], [300, 10]]}, 'update': {'save': [[300, 20], [60, 10000]]}, 'expected': {'timeout': 0, 'save': [[300, 20], [60, 10000]]}, }, { 'dict': {'rpc_address': '0.0.0.0', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 'parameters': [{'seeds': '0.0.0.0'}]}] }, 'update': {'rpc_address': '127.0.0.1', 'seed_provider': {'parameters': { 'seeds': '127.0.0.1'}} }, 'expected': {'rpc_address': '127.0.0.1', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 'parameters': [{'seeds': '127.0.0.1'}]}] }, }, { 'dict': {'rpc_address': '127.0.0.1', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider', 'parameters': [{'seeds': '0.0.0.0'}]}] }, 'update': {'seed_provider': [{'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider'}] }, 'expected': {'rpc_address': '127.0.0.1', 'broadcast_rpc_address': '0.0.0.0', 'listen_address': '0.0.0.0', 'seed_provider': [{ 'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider' }]}, }] count = 0 for record in data: count += 1 target = record['dict'] update = record['update'] expected = record['expected'] result = guestagent_utils.update_dict(update, target) msg = 'Unexpected result for test %s' % str(count) self.assertEqual(expected, result, msg) def test_build_file_path(self): self.assertEqual( 'base_dir/base_name', guestagent_utils.build_file_path('base_dir', 'base_name')) self.assertEqual( 'base_dir/base_name.ext1', guestagent_utils.build_file_path('base_dir', 'base_name', 'ext1')) self.assertEqual( 'base_dir/base_name.ext1.ext2', guestagent_utils.build_file_path( 'base_dir', 'base_name', 'ext1', 'ext2')) def test_flatten_expand_dict(self): self._assert_flatten_expand_dict({}, {}) self._assert_flatten_expand_dict({'ns1': 1}, {'ns1': 1}) self._assert_flatten_expand_dict( {'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}}, {'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10}) def _assert_flatten_expand_dict(self, nested_dict, flattened_dict): self.assertEqual( flattened_dict, guestagent_utils.flatten_dict(nested_dict)) self.assertEqual( nested_dict, guestagent_utils.expand_dict(flattened_dict)) def test_to_bytes(self): self.assertEqual('1024', guestagent_utils.to_bytes('1024')) self.assertEqual(1048576, guestagent_utils.to_bytes('1024K')) self.assertEqual(1073741824, guestagent_utils.to_bytes('1024M')) self.assertEqual(1099511627776, guestagent_utils.to_bytes('1024G')) self.assertEqual('1024T', guestagent_utils.to_bytes('1024T')) self.assertEqual(1024, guestagent_utils.to_bytes(1024)) self.assertEqual('Hello!', guestagent_utils.to_bytes('Hello!')) self.assertEqual('', guestagent_utils.to_bytes('')) self.assertIsNone(guestagent_utils.to_bytes(None)) @patch.object(pagination, 'paginate_object_list') def test_paginate_list(self, paginate_obj_mock): limit = Mock() marker = Mock() include_marker = Mock() test_list = [Mock(), Mock(), Mock()] guestagent_utils.paginate_list( test_list, limit=limit, marker=marker, include_marker=include_marker) paginate_obj_mock.assert_called_once_with( test_list, 'name', limit=limit, marker=marker, include_marker=include_marker) def test_serialize_list(self): test_list = [Mock(), Mock(), Mock()] with patch.object(guestagent_utils, 'paginate_list', return_value=(test_list[:2], test_list[-2]) ) as paginate_lst_mock: _, next_name = guestagent_utils.serialize_list(test_list) paginate_lst_mock.assert_called_once_with( test_list, limit=None, marker=None, include_marker=False) for item in paginate_lst_mock.return_value[0]: item.serialize.assert_called_once_with() self.assertEqual(paginate_lst_mock.return_value[1], next_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_manager.py0000644000175000017500000006126400000000000025714 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import getpass import os from mock import ANY from mock import DEFAULT from mock import MagicMock from mock import Mock from mock import patch from oslo_utils import encodeutils from proboscis.asserts import assert_equal from proboscis.asserts import assert_is_none from proboscis.asserts import assert_true from trove.common.context import TroveContext from trove.common import exception from trove.guestagent.common import operating_system from trove.guestagent.datastore import manager from trove.guestagent import guest_log from trove.guestagent.module import module_manager from trove import rpc from trove.tests.unittests import trove_testtools class MockManager(manager.Manager): def __init__(self): super(MockManager, self).__init__('mysql') self._app = MagicMock() self._status = MagicMock() self._configuration_manager = MagicMock() @property def app(self): return self._app @property def status(self): return self._status @property def configuration_manager(self): return self._configuration_manager def prepare(self, *args): args[0].notification = MagicMock() with patch.object(rpc, 'get_client'): return super(MockManager, self).prepare(*args) class ManagerTest(trove_testtools.TestCase): def setUp(self): super(ManagerTest, self).setUp() self.chmod_patch = patch.object(operating_system, 'chmod') self.chmod_mock = self.chmod_patch.start() self.addCleanup(self.chmod_patch.stop) self.manager = MockManager() self.context = TroveContext() self.log_name_sys = 'guest' self.log_name_user = 'general' self.prefix = 'log_prefix' self.container = 'log_container' self.size = 1024 self.published = 128 self.guest_log_user = guest_log.GuestLog( self.context, self.log_name_user, guest_log.LogType.USER, None, '/tmp/gen.log', True) self.guest_log_sys = guest_log.GuestLog( self.context, self.log_name_sys, guest_log.LogType.SYS, None, '/tmp/guest.log', True) for gl in [self.guest_log_user, self.guest_log_sys]: gl._container_name = self.container gl._refresh_details = MagicMock() gl._log_rotated = MagicMock(return_value=False) gl._publish_to_container = MagicMock() gl._delete_log_components = MagicMock() gl._object_prefix = MagicMock(return_value=self.prefix) gl._size = self.size gl._published_size = self.published self.manager._guest_log_cache = { self.log_name_user: self.guest_log_user, self.log_name_sys: self.guest_log_sys} self.expected_details_user = { 'status': 'Disabled', 'prefix': self.prefix, 'container': self.container, 'name': self.log_name_user, 'published': self.published, 'metafile': self.prefix + '_metafile', 'type': 'USER', 'pending': self.size - self.published} self.expected_details_sys = dict(self.expected_details_user) self.expected_details_sys['type'] = 'SYS' self.expected_details_sys['status'] = 'Enabled' self.expected_details_sys['name'] = self.log_name_sys self.expected_module_details = { 'name': 'mymod', 'type': 'ping', 'contents': 'e262cfe36134' } self.manager.module_manager = Mock() def tearDown(self): super(ManagerTest, self).tearDown() def test_update_status(self): self.manager._status.is_installed = True self.manager._status._is_restarting = False self.manager.update_status(self.context) self.assertTrue(self.manager.status.set_status.called) def test_guest_log_list(self): log_list = self.manager.guest_log_list(self.context) expected = [self.expected_details_sys, self.expected_details_user] assert_equal(self._flatten_list_of_dicts(expected), self._flatten_list_of_dicts(log_list), "Wrong list: %s (Expected: %s)" % ( self._flatten_list_of_dicts(log_list), self._flatten_list_of_dicts(expected))) def _flatten_list_of_dicts(self, lod): value = sorted("".join("%s%s" % (k, d[k]) for k in sorted(d.keys())) for d in lod) return "".join(sorted(value)) def test_guest_log_action_enable_disable(self): self.assertRaisesRegex(exception.BadRequest, "Cannot enable and disable", self.manager.guest_log_action, self.context, self.log_name_sys, True, True, False, False) def test_guest_log_action_enable_sys(self): self.assertRaisesRegex(exception.BadRequest, "Cannot enable a SYSTEM log", self.manager.guest_log_action, self.context, self.log_name_sys, True, False, False, False) def test_guest_log_action_disable_sys(self): self.assertRaisesRegex(exception.BadRequest, "Cannot disable a SYSTEM log", self.manager.guest_log_action, self.context, self.log_name_sys, False, True, False, False) def test_guest_log_action_publish_sys(self): with patch.object(os.path, 'isfile', return_value=True): log_details = self.manager.guest_log_action(self.context, self.log_name_sys, False, False, True, False) assert_equal(log_details, self.expected_details_sys, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_sys)) assert_equal( 1, self.guest_log_sys._publish_to_container.call_count) def test_guest_log_action_discard_sys(self): log_details = self.manager.guest_log_action(self.context, self.log_name_sys, False, False, False, True) assert_equal(log_details, self.expected_details_sys, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_sys)) assert_equal( 1, self.guest_log_sys._delete_log_components.call_count) def test_guest_log_action_enable_user(self): with patch.object(manager.Manager, 'guest_log_enable', return_value=False) as mock_enable: log_details = self.manager.guest_log_action(self.context, self.log_name_user, True, False, False, False) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, mock_enable.call_count) def test_guest_log_action_disable_user(self): with patch.object(manager.Manager, 'guest_log_enable', return_value=False) as mock_enable: self.guest_log_user._enabled = True log_details = self.manager.guest_log_action(self.context, self.log_name_user, False, True, False, False) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, mock_enable.call_count) def test_guest_log_action_publish_user(self): with patch.object(manager.Manager, 'guest_log_enable', return_value=False) as mock_enable: with patch.object(os.path, 'isfile', return_value=True): log_details = self.manager.guest_log_action(self.context, self.log_name_user, False, False, True, False) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, mock_enable.call_count) def test_guest_log_action_discard_user(self): log_details = self.manager.guest_log_action(self.context, self.log_name_user, False, False, False, True) assert_equal(log_details, self.expected_details_user, "Wrong details: %s (expected %s)" % (log_details, self.expected_details_user)) assert_equal(1, self.guest_log_user._delete_log_components.call_count) def test_set_guest_log_status_disabled(self): data = [ {'orig': guest_log.LogStatus.Enabled, 'new': guest_log.LogStatus.Disabled, 'expect': guest_log.LogStatus.Disabled}, {'orig': guest_log.LogStatus.Restart_Required, 'new': guest_log.LogStatus.Enabled, 'expect': guest_log.LogStatus.Restart_Required}, {'orig': guest_log.LogStatus.Restart_Required, 'new': guest_log.LogStatus.Restart_Completed, 'expect': guest_log.LogStatus.Restart_Completed}, {'orig': guest_log.LogStatus.Published, 'new': guest_log.LogStatus.Partial, 'expect': guest_log.LogStatus.Partial}, ] for datum in data: self.assert_guest_log_status(datum['orig'], datum['new'], datum['expect']) def assert_guest_log_status(self, original_status, new_status, expected_final_status): gl_cache = self.manager.get_guest_log_cache() gl_cache[self.log_name_sys]._status = original_status self.manager.set_guest_log_status(new_status, self.log_name_sys) assert_equal(gl_cache[self.log_name_sys].status, expected_final_status, "Unexpected status for '%s': %s' (Expected %s)" % (self.log_name_sys, gl_cache[self.log_name_sys].status, expected_final_status)) def test_build_log_file_name(self): current_owner = getpass.getuser() with patch.multiple(operating_system, exists=MagicMock(return_value=False), write_file=DEFAULT, create_directory=DEFAULT, chown=DEFAULT, chmod=DEFAULT) as os_mocks: log_file = self.manager.build_log_file_name(self.log_name_sys, current_owner) expected_filename = '%s/%s/%s-%s.log' % ( self.manager.GUEST_LOG_BASE_DIR, self.manager.GUEST_LOG_DATASTORE_DIRNAME, self.manager.manager, self.log_name_sys) expected_call_counts = {'exists': 1, 'write_file': 1, 'create_directory': 2, 'chown': 1, 'chmod': 1} self.assert_build_log_file_name(expected_filename, log_file, os_mocks, expected_call_counts) def assert_build_log_file_name(self, expected_filename, filename, mocks, call_counts): assert_equal(expected_filename, filename, "Unexpected filename: %s (expected %s)" % (filename, expected_filename)) for key in mocks.keys(): assert_true( mocks[key].call_count == call_counts[key], "%s called %d time(s)" % (key, mocks[key].call_count)) def test_build_log_file_name_with_dir(self): current_owner = getpass.getuser() log_dir = '/tmp' with patch.multiple(operating_system, exists=MagicMock(return_value=False), write_file=DEFAULT, create_directory=DEFAULT, chown=DEFAULT, chmod=DEFAULT) as os_mocks: log_file = self.manager.build_log_file_name(self.log_name_sys, current_owner, datastore_dir=log_dir) expected_filename = '%s/%s-%s.log' % ( log_dir, self.manager.manager, self.log_name_sys) expected_call_counts = {'exists': 1, 'write_file': 1, 'create_directory': 1, 'chown': 1, 'chmod': 1} self.assert_build_log_file_name(expected_filename, log_file, os_mocks, expected_call_counts) def test_validate_log_file(self): file_name = '/tmp/non-existent-file' current_owner = getpass.getuser() with patch.multiple(operating_system, exists=MagicMock(return_value=False), write_file=DEFAULT, chown=DEFAULT, chmod=DEFAULT) as os_mocks: log_file = self.manager.validate_log_file(file_name, current_owner) assert_equal(file_name, log_file, "Unexpected filename") for key in os_mocks.keys(): assert_true(os_mocks[key].call_count == 1, "%s not called" % key) def test_prepare_single(self): self.run_prepare_test(cluster_config=None) def test_prepare_single_no_users(self): self.run_prepare_test(cluster_config=None, users=None) def test_prepare_single_no_databases(self): self.run_prepare_test(cluster_config=None, databases=None) def test_prepare_single_no_root_password(self): self.run_prepare_test(cluster_config=None, root_password=None) def test_prepare_cluster(self): self.run_prepare_test() def run_prepare_test(self, packages=Mock(), databases=Mock(), memory_mb=Mock(), users=Mock(), device_path=Mock(), mount_point=Mock(), backup_info=Mock(), config_contents=Mock(), root_password=Mock(), overrides=Mock(), cluster_config=Mock(), snapshot=Mock()): self._assert_prepare(self.context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) def _assert_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): is_error_expected = False is_post_process_expected = cluster_config is not None with patch.multiple(self.manager, do_prepare=DEFAULT, post_prepare=DEFAULT, apply_overrides_on_prepare=DEFAULT, enable_root_on_prepare=DEFAULT, create_database=DEFAULT, create_user=DEFAULT): self.manager.prepare( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) self.manager.status.begin_install.assert_called_once_with() self.manager.do_prepare.assert_called_once_with( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) self.manager.apply_overrides_on_prepare.assert_called_once_with( context, overrides) self.manager.status.end_install( error_occurred=is_error_expected, post_processing=is_post_process_expected) self.manager.post_prepare.assert_called_once_with( context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) if not is_post_process_expected: if databases: self.manager.create_database.assert_called_once_with( context, databases) else: self.assertEqual( 0, self.manager.create_database.call_count) if users: self.manager.create_user.assert_called_once_with( context, users) else: self.assertEqual(0, self.manager.create_user.call_count) if not backup_info and root_password: (self.manager.enable_root_on_prepare. assert_called_once_with(context, root_password)) else: self.assertEqual( 0, self.manager.enable_root_on_prepare.call_count) else: self.assertEqual(0, self.manager.create_database.call_count) self.assertEqual(0, self.manager.create_user.call_count) self.assertEqual( 0, self.manager.enable_root_on_prepare.call_count) def test_apply_overrides_on_prepare(self): overrides = Mock() with patch.multiple(self.manager, update_overrides=DEFAULT, restart=DEFAULT): self.manager.apply_overrides_on_prepare(self.context, overrides) self.manager.update_overrides.assert_called_once_with( self.context, overrides) self.manager.restart.assert_called_once_with(self.context) @patch('trove.guestagent.datastore.manager.LOG') def test_apply_overrides_on_prepare_failure(self, mock_logging): packages = Mock() databases = Mock() memory_mb = Mock() users = Mock() device_path = Mock() mount_point = Mock() backup_info = Mock() config_contents = Mock() root_password = Mock() overrides = Mock() cluster_config = Mock() snapshot = Mock() expected_failure = Exception("Error in 'apply_overrides_on_prepare'.") with patch.multiple( self.manager, do_prepare=DEFAULT, apply_overrides_on_prepare=MagicMock( side_effect=expected_failure )): expected_msg = encodeutils.exception_to_unicode(expected_failure) self.assertRaisesRegex( Exception, expected_msg, self.manager.prepare, self.context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot) self.manager.status.begin_install.assert_called_once_with() self.manager.status.end_install( error_occurred=True, post_processing=ANY) @patch.object(operating_system, 'copy') @patch.object(operating_system, 'chown') def test_restore_directory_with_owner(self, chown_mock, copy_mock): restore_dir = '/restore_directory' restore_files = '/restore_directory/.' target_dir = '/target_directory' owner = 'owner' self.manager._restore_directory(restore_dir, target_dir, owner) copy_mock.assert_called_once_with(restore_files, target_dir, preserve=True, as_root=True) chown_mock.assert_called_once_with(path=target_dir, user=owner, group=owner, recursive=True, as_root=True) @patch.object(operating_system, 'copy') @patch.object(operating_system, 'chown') def test_restore_directory_without_owner(self, chown_mock, copy_mock): restore_dir = '/restore_directory' restore_files = '/restore_directory/.' target_dir = '/target_directory' self.manager._restore_directory(restore_dir, target_dir) copy_mock.assert_called_once_with(restore_files, target_dir, preserve=True, as_root=True) chown_mock.assert_not_called() @patch.object(manager.Manager, '_restore_directory') @patch.object(operating_system, 'get_current_user', return_value='trove') def test_restore_home_directory(self, os_mock, restore_mock): saved_home_dir = '/old_home' with patch.object(os.path, 'expanduser', return_value='/home/trove'): self.manager._restore_home_directory(saved_home_dir) os_mock.assert_any_call() restore_mock.assert_called_once_with(restore_dir=saved_home_dir, target_dir='/home/trove', owner='trove') def test_module_list(self): with patch.object(module_manager.ModuleManager, 'read_module_results', return_value=[ self.expected_module_details]) as mock_rmr: module_list = self.manager.module_list(self.context) expected = [self.expected_module_details] assert_equal(self._flatten_list_of_dicts(expected), self._flatten_list_of_dicts(module_list), "Wrong list: %s (Expected: %s)" % ( self._flatten_list_of_dicts(module_list), self._flatten_list_of_dicts(expected))) assert_equal(1, mock_rmr.call_count) def test_module_apply(self): with patch.object( module_manager.ModuleManager, 'apply_module', return_value=[self.expected_module_details]) as mock_am: module_details = self.manager.module_apply( self.context, [{'module': self.expected_module_details}]) assert_equal([[self.expected_module_details]], module_details) assert_equal(1, mock_am.call_count) def test_module_remove(self): with patch.object( module_manager.ModuleManager, 'remove_module', return_value=[self.expected_module_details]) as mock_rm: module_details = self.manager.module_remove( self.context, {'module': self.expected_module_details}) assert_is_none(module_details) assert_equal(1, mock_rm.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_mariadb_manager.py0000644000175000017500000000530000000000000027360 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock from mock import patch from trove.guestagent.datastore.experimental.mariadb import ( manager as mariadb_manager) from trove.guestagent.datastore.experimental.mariadb import ( service as mariadb_service) from trove.guestagent.datastore.mysql_common import service as mysql_service from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest class GuestAgentManagerTest(DatastoreManagerTest): def setUp(self): super(GuestAgentManagerTest, self).setUp('mariadb') self.manager = mariadb_manager.Manager() patcher_rs = patch( 'trove.guestagent.strategies.replication.get_instance') patcher_rs.start() self.addCleanup(patcher_rs.stop) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(mariadb_service.MariaDBApp, 'install_cluster', new_callable=MagicMock) def test_install_cluster(self, install_cluster, app_status_get): install_cluster.return_value = MagicMock() app_status_get.return_value = None replication_user = "repuser" configuration = "configuration" bootstrap = True self.manager.install_cluster(self.context, replication_user, configuration, bootstrap) app_status_get.assert_any_call() install_cluster.assert_called_with( replication_user, configuration, bootstrap) @patch.object(mysql_service.BaseMySqlAppStatus, 'get', new_callable=MagicMock) @patch.object(mariadb_service.MariaDBApp, 'reset_admin_password', new_callable=MagicMock) def test_reset_admin_password(self, reset_admin_password, app_status_get): reset_admin_password.return_value = None app_status_get.return_value = MagicMock() admin_password = "password" self.manager.reset_admin_password(self.context, admin_password) app_status_get.assert_any_call() reset_admin_password.assert_called_with(admin_password) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_models.py0000644000175000017500000000533400000000000025561 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from mock import Mock, MagicMock, patch from trove.common import timeutils from trove.common import utils from trove.db import models as dbmodels from trove.db.sqlalchemy import api as dbapi from trove.guestagent import models from trove.tests.unittests import trove_testtools class AgentHeartBeatTest(trove_testtools.TestCase): def setUp(self): super(AgentHeartBeatTest, self).setUp() self.origin_get_db_api = dbmodels.get_db_api self.origin_utcnow = timeutils.utcnow self.origin_db_api_save = dbapi.save self.origin_is_valid = dbmodels.DatabaseModelBase.is_valid self.origin_generate_uuid = utils.generate_uuid def tearDown(self): super(AgentHeartBeatTest, self).tearDown() dbmodels.get_db_api = self.origin_get_db_api timeutils.utcnow = self.origin_utcnow dbapi.save = self.origin_db_api_save dbmodels.DatabaseModelBase.is_valid = self.origin_is_valid utils.generate_uuid = self.origin_generate_uuid def test_create(self): utils.generate_uuid = Mock() dbapi.save = MagicMock( return_value=dbmodels.DatabaseModelBase) dbmodels.DatabaseModelBase.is_valid = Mock(return_value=True) models.AgentHeartBeat.create() self.assertEqual(1, utils.generate_uuid.call_count) self.assertEqual(3, dbmodels.DatabaseModelBase.is_valid.call_count) @patch('trove.db.models.DatabaseModelBase') def test_save(self, dmb_mock): timeutils.utcnow = Mock() dbmodels.get_db_api = MagicMock( return_value=dbmodels.DatabaseModelBase) dbapi.save = Mock() dbmodels.DatabaseModelBase.is_valid = Mock(return_value=True) self.heartBeat = models.AgentHeartBeat() self.heartBeat.save() self.assertEqual(1, timeutils.utcnow.call_count) def test_is_active(self): models.AGENT_HEARTBEAT = 10000000000 mock = models.AgentHeartBeat() models.AgentHeartBeat.__setitem__(mock, 'updated_at', datetime.now()) self.assertTrue(models.AgentHeartBeat.is_active(mock)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_mysql_manager.py0000644000175000017500000007710600000000000027143 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from mock import DEFAULT from mock import MagicMock from mock import patch from proboscis.asserts import assert_equal from testtools.matchers import Is, Equals, Not from trove.common.db.mysql import models from trove.common.exception import InsufficientSpaceForReplica from trove.common.exception import ProcessExecutionError from trove.common import instance as rd_instance from trove.guestagent import backup from trove.guestagent.common import operating_system # TODO(atomic77) The test cases should be made configurable # to make it easier to test the various derived datastores. from trove.guestagent.datastore.mysql.manager import Manager import trove.guestagent.datastore.mysql.service as dbaas from trove.guestagent import dbaas as base_dbaas from trove.guestagent import pkg from trove.guestagent import volume from trove.guestagent.volume import VolumeDevice from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest from trove.tests.unittests import trove_testtools class GuestAgentManagerTest(DatastoreManagerTest): def setUp(self): super(GuestAgentManagerTest, self).setUp('mysql') self.context = trove_testtools.TroveTestContext(self) self.replication_strategy = 'MysqlGTIDReplication' self.patch_rs = patch( 'trove.guestagent.strategies.replication.get_strategy', return_value=self.replication_strategy) self.mock_rs = self.patch_rs.start() self.addCleanup(self.patch_rs.stop) self.manager = Manager() self.origin_MySqlAppStatus = dbaas.MySqlAppStatus.get self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_unmount = volume.VolumeDevice.unmount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_mysql = dbaas.MySqlApp.stop_db self.origin_start_mysql = dbaas.MySqlApp.start_mysql self.origin_update_overrides = dbaas.MySqlApp.update_overrides self.origin_install_if_needed = dbaas.MySqlApp.install_if_needed self.origin_secure = dbaas.MySqlApp.secure self.origin_secure_root = dbaas.MySqlApp.secure_root self.origin_pkg_is_installed = pkg.Package.pkg_is_installed self.origin_os_path_exists = os.path.exists self.origin_chown = operating_system.chown # set up common mock objects, etc. for replication testing self.patcher_gfvs = patch( 'trove.guestagent.dbaas.get_filesystem_volume_stats') self.patcher_rs = patch( 'trove.guestagent.strategies.replication.get_instance') self.mock_gfvs_class = self.patcher_gfvs.start() self.mock_rs_class = self.patcher_rs.start() def tearDown(self): super(GuestAgentManagerTest, self).tearDown() dbaas.MySqlAppStatus.get = self.origin_MySqlAppStatus os.path.exists = self.origin_os_path_exists volume.VolumeDevice.format = self.origin_format volume.VolumeDevice.migrate_data = self.origin_migrate_data volume.VolumeDevice.mount = self.origin_mount volume.VolumeDevice.unmount = self.origin_unmount volume.VolumeDevice.mount_points = self.origin_mount_points dbaas.MySqlApp.stop_db = self.origin_stop_mysql dbaas.MySqlApp.start_mysql = self.origin_start_mysql dbaas.MySqlApp.update_overrides = self.origin_update_overrides dbaas.MySqlApp.install_if_needed = self.origin_install_if_needed dbaas.MySqlApp.secure = self.origin_secure dbaas.MySqlApp.secure_root = self.origin_secure_root operating_system.chown = self.origin_chown pkg.Package.pkg_is_installed = self.origin_pkg_is_installed os.path.exists = self.origin_os_path_exists # teardown the replication mock objects self.patcher_gfvs.stop() self.patcher_rs.stop() def test_update_status(self): mock_status = MagicMock() mock_status.is_installed = True mock_status._is_restarting = False dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.update_status(self.context) self.assertTrue(mock_status.set_status.called) def _empty_user(self): return models.MySQLUser(deserializing=True) def test_valid_host_name(self): test_host = "192.58.197.0/255.255.255.0" user = self._empty_user() user.host = test_host self.assertEqual(test_host, user.host) @patch.object(dbaas.MySqlAdmin, 'create_database') def test_create_database(self, create_db_mock): self.manager.create_database(self.context, ['db1']) create_db_mock.assert_any_call(['db1']) @patch.object(dbaas.MySqlAdmin, 'create_user') def test_create_user(self, create_user_mock): self.manager.create_user(self.context, ['user1']) create_user_mock.assert_any_call(['user1']) @patch.object(dbaas.MySqlAdmin, 'delete_database') def test_delete_database(self, delete_database_mock): databases = ['db1'] self.manager.delete_database(self.context, databases) delete_database_mock.assert_any_call(databases) @patch.object(dbaas.MySqlAdmin, 'delete_user') def test_delete_user(self, delete_user_mock): user = ['user1'] self.manager.delete_user(self.context, user) delete_user_mock.assert_any_call(user) @patch.object(dbaas.MySqlAdmin, 'grant_access') def test_grant_access(self, grant_access_mock): username = "test_user" hostname = "test_host" databases = ["test_database"] self.manager.grant_access(self.context, username, hostname, databases) grant_access_mock.assert_any_call(username, hostname, databases) @patch.object(dbaas.MySqlAdmin, 'list_databases', return_value=['database1']) def test_list_databases(self, list_databases_mock): databases = self.manager.list_databases(self.context) self.assertThat(databases, Not(Is(None))) self.assertThat(databases, Equals(list_databases_mock.return_value)) list_databases_mock.assert_any_call(None, None, False) @patch.object(dbaas.MySqlAdmin, 'list_users', return_value=['user1']) def test_list_users(self, list_users_mock): users = self.manager.list_users(self.context) self.assertThat(users, Equals(list_users_mock.return_value)) dbaas.MySqlAdmin.list_users.assert_any_call(None, None, False) @patch.object(dbaas.MySqlAdmin, 'get_user', return_value=['user1']) def test_get_users(self, get_user_mock): username = ['user1'] hostname = ['host'] users = self.manager.get_user(self.context, username, hostname) self.assertThat(users, Equals(get_user_mock.return_value)) get_user_mock.assert_any_call(username, hostname) @patch.object(dbaas.MySqlAdmin, 'enable_root', return_value='user_id_stuff') def test_enable_root(self, enable_root_mock): user_id = self.manager.enable_root(self.context) self.assertThat(user_id, Is(enable_root_mock.return_value)) enable_root_mock.assert_any_call() @patch.object(dbaas.MySqlAdmin, 'disable_root') def test_disable_root(self, disable_root_mock): self.manager.disable_root(self.context) disable_root_mock.assert_any_call() @patch.object(dbaas.MySqlAdmin, 'is_root_enabled', return_value=True) def test_is_root_enabled(self, is_root_enabled_mock): is_enabled = self.manager.is_root_enabled(self.context) self.assertThat(is_enabled, Is(is_root_enabled_mock.return_value)) is_root_enabled_mock.assert_any_call() @patch.object(backup, 'backup') def test_create_backup(self, backup_mock): # entry point Manager().create_backup(self.context, 'backup_id_123') # assertions backup_mock.assert_any_call(self.context, 'backup_id_123') def test_prepare_device_path_true(self): self._prepare_dynamic() def test_prepare_device_path_false(self): self._prepare_dynamic(device_path=None) def test_prepare_device_path_mounted(self): self._prepare_dynamic(is_mounted=True) def test_prepare_mysql_not_installed(self): self._prepare_dynamic(is_mysql_installed=False) def test_prepare_mysql_from_backup(self): self._prepare_dynamic(backup_id='backup_id_123abc') def test_prepare_mysql_from_backup_with_root(self): self._prepare_dynamic(backup_id='backup_id_123abc', is_root_enabled=True) def test_prepare_mysql_with_root_password(self): self._prepare_dynamic(root_password='some_password') def test_prepare_mysql_with_users_and_databases(self): self._prepare_dynamic(databases=['db1'], users=['user1']) def test_prepare_mysql_with_snapshot(self): snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': 1.0}, 'config': None} total_size = snapshot['dataset']['dataset_size'] + 1 self.mock_gfvs_class.return_value = {'total': total_size} self._prepare_dynamic(snapshot=snapshot) @patch.multiple(dbaas.MySqlAdmin, create_user=DEFAULT, create_database=DEFAULT, enable_root=DEFAULT) @patch.object(backup, 'restore') def _prepare_dynamic(self, restore_mock, create_user, create_database, enable_root, device_path='/dev/vdb', is_mysql_installed=True, backup_id=None, is_root_enabled=False, root_password=None, overrides=None, is_mounted=False, databases=None, users=None, snapshot=None): # covering all outcomes is starting to cause trouble here COUNT = 1 if device_path else 0 backup_info = None if backup_id is not None: backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } # TODO(juice): this should stub an instance of the MySqlAppStatus mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_status.begin_install = MagicMock(return_value=None) VolumeDevice.format = MagicMock(return_value=None) VolumeDevice.migrate_data = MagicMock(return_value=None) VolumeDevice.mount = MagicMock(return_value=None) mount_points = [] if is_mounted: mount_points = ['/mnt'] VolumeDevice.mount_points = MagicMock(return_value=mount_points) VolumeDevice.unmount = MagicMock(return_value=None) set_data_dir_patcher = patch.object(dbaas.MySqlApp, 'set_data_dir', return_value='/var/lib/mysql') self.addCleanup(set_data_dir_patcher.stop) set_data_dir_patcher.start() dbaas.MySqlApp.stop_db = MagicMock(return_value=None) dbaas.MySqlApp.start_mysql = MagicMock(return_value=None) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) dbaas.MySqlApp.install_if_needed = MagicMock(return_value=None) dbaas.MySqlApp.secure = MagicMock(return_value=None) dbaas.MySqlApp.secure_root = MagicMock(return_value=None) pkg.Package.pkg_is_installed = MagicMock( return_value=is_mysql_installed) operating_system.chown = MagicMock(return_value=None) os.path.exists = MagicMock(return_value=True) mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication with patch.object(dbaas.MySqlAdmin, 'is_root_enabled', return_value=is_root_enabled): self.manager.prepare(context=self.context, packages=None, memory_mb='2048', databases=databases, users=users, device_path=device_path, mount_point='/var/lib/mysql', backup_info=backup_info, root_password=root_password, overrides=overrides, cluster_config=None, snapshot=snapshot) # verification/assertion mock_status.begin_install.assert_any_call() self.assertEqual(COUNT, VolumeDevice.format.call_count) self.assertEqual(COUNT, VolumeDevice.migrate_data.call_count) self.assertEqual(COUNT, VolumeDevice.mount_points.call_count) self.assertEqual(COUNT, dbaas.MySqlApp.stop_db.call_count) if is_mounted: self.assertEqual(1, VolumeDevice.unmount.call_count) else: self.assertEqual(0, VolumeDevice.unmount.call_count) if backup_info: restore_mock.assert_any_call(self.context, backup_info, '/var/lib/mysql/data') dbaas.MySqlApp.install_if_needed.assert_any_call(None) # We don't need to make sure the exact contents are there dbaas.MySqlApp.secure.assert_any_call(None) dbaas.MySqlApp.secure_root.assert_any_call( secure_remote_root=not is_root_enabled) if root_password: dbaas.MySqlAdmin.enable_root.assert_any_call(root_password) if databases: dbaas.MySqlAdmin.create_database.assert_any_call(databases) else: self.assertFalse(dbaas.MySqlAdmin.create_database.called) if users: dbaas.MySqlAdmin.create_user.assert_any_call(users) else: self.assertFalse(dbaas.MySqlAdmin.create_user.called) if snapshot: self.assertEqual(1, mock_replication.enable_as_slave.call_count) else: self.assertEqual(0, mock_replication.enable_as_slave.call_count) def test_get_replication_snapshot(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) snapshot_id = 'my_snapshot_id' log_position = 123456789 master_ref = 'my_master' used_size = 1.0 total_size = 2.0 mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() mock_replication.snapshot_for_replication = MagicMock( return_value=(snapshot_id, log_position)) mock_replication.get_master_ref = MagicMock( return_value=master_ref) self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = ( {'used': used_size, 'total': total_size}) expected_replication_snapshot = { 'dataset': { 'datastore_manager': self.manager.manager, 'dataset_size': used_size, 'volume_size': total_size, 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': master_ref, 'log_position': log_position } snapshot_info = None replica_source_config = None # entry point replication_snapshot = ( self.manager.get_replication_snapshot(self.context, snapshot_info, replica_source_config)) # assertions self.assertEqual(expected_replication_snapshot, replication_snapshot) self.assertEqual(1, mock_replication.enable_as_master.call_count) self.assertEqual( 1, mock_replication.snapshot_for_replication.call_count) self.assertEqual(1, mock_replication.get_master_ref.call_count) def test_attach_replication_slave_valid(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) total_size = 2.0 dataset_size = 1.0 mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = {'total': total_size} snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': dataset_size}} # entry point self.manager.attach_replica(self.context, snapshot, None) # assertions self.assertEqual(1, mock_replication.enable_as_slave.call_count) @patch('trove.guestagent.datastore.mysql_common.manager.LOG') def test_attach_replication_slave_invalid(self, *args): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) total_size = 2.0 dataset_size = 3.0 mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_rs_class.return_value = mock_replication self.mock_gfvs_class.return_value = {'total': total_size} snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': dataset_size}} # entry point self.assertRaises(InsufficientSpaceForReplica, self.manager.attach_replica, self.context, snapshot, None) # assertions self.assertEqual(0, mock_replication.enable_as_slave.call_count) def test_detach_replica(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_replication = MagicMock() mock_replication.detach_slave = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.detach_replica(self.context) # assertions self.assertEqual(1, mock_replication.detach_slave.call_count) def test_demote_replication_master(self): mock_status = MagicMock() dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) mock_replication = MagicMock() mock_replication.demote_master = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.demote_replication_master(self.context) # assertions self.assertEqual(1, mock_replication.demote_master.call_count) def test_get_master_UUID(self): app = dbaas.MySqlApp(None) def test_case(slave_status, expected_value): with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value=slave_status): assert_equal(app._get_master_UUID(), expected_value) test_case({'Master_UUID': '2a5b-2064-32fb'}, '2a5b-2064-32fb') test_case({'Master_UUID': ''}, None) test_case({}, None) def test_get_last_txn(self): def test_case(gtid_list, expected_value): with patch.object(dbaas.MySqlApp, '_get_gtid_executed', return_value=gtid_list): txn = self.manager.get_last_txn(self.context) assert_equal(txn, expected_value) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={'Master_UUID': '2a5b-2064-32fb'}): test_case('2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) test_case('2a5b-2064-32fb:1-5', ('2a5b-2064-32fb', 5)) test_case('2a5b-2064-32fb:1,4b4-23:5', ('2a5b-2064-32fb', 1)) test_case('4b4-23:5,2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) test_case('4b-23:5,2a5b-2064-32fb:1,25:3-4', ('2a5b-2064-32fb', 1)) test_case('4b4-23:1-5,2a5b-2064-32fb:1-10', ('2a5b-2064-32fb', 10)) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={'Master_UUID': ''}): test_case('2a5b-2064-32fb:1', (None, 0)) with patch.object(dbaas.MySqlApp, '_get_slave_status', return_value={}): test_case('2a5b-2064-32fb:1', (None, 0)) def test_rpc_ping(self): self.assertTrue(self.manager.rpc_ping(self.context)) @patch.object(dbaas.MySqlAdmin, 'change_passwords') def test_change_passwords(self, change_passwords_mock): self.manager.change_passwords( self.context, [{'name': 'test_user', 'password': 'testpwd'}]) change_passwords_mock.assert_any_call( [{'name': 'test_user', 'password': 'testpwd'}]) @patch.object(dbaas.MySqlAdmin, 'update_attributes') def test_update_attributes(self, update_attr_mock): self.manager.update_attributes(self.context, 'test_user', '%', {'password': 'testpwd'}) update_attr_mock.assert_any_call('test_user', '%', {'password': 'testpwd'}) @patch.object(dbaas.MySqlApp, 'reset_configuration') def test_reset_configuration(self, reset_config_mock): dbaas.MySqlAppStatus.get = MagicMock(return_value=MagicMock()) configuration = {'config_contents': 'some junk'} self.manager.reset_configuration(self.context, configuration) dbaas.MySqlAppStatus.get.assert_any_call() reset_config_mock.assert_any_call({'config_contents': 'some junk'}) @patch.object(dbaas.MySqlAdmin, 'revoke_access') def test_revoke_access(self, revoke_access_mock): self.manager.revoke_access(self.context, 'test_user', '%', 'test_db') revoke_access_mock.assert_any_call('test_user', '%', 'test_db') @patch.object(dbaas.MySqlAdmin, 'list_access', return_value=['database1']) def test_list_access(self, list_access_mock): access = self.manager.list_access(self.context, 'test_user', '%') self.assertEqual(list_access_mock.return_value, access) list_access_mock.assert_any_call('test_user', '%') @patch.object(dbaas.MySqlApp, 'restart') def test_restart(self, restart_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.restart(self.context) dbaas.MySqlAppStatus.get.assert_any_call() restart_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'start_db_with_conf_changes') def test_start_db_with_conf_changes(self, start_db_mock): mock_status = MagicMock() configuration = {'config_contents': 'some junk'} self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.start_db_with_conf_changes(self.context, configuration) dbaas.MySqlAppStatus.get.assert_any_call() start_db_mock.assert_any_call({'config_contents': 'some junk'}) def test_stop_db(self): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.stop_db = MagicMock(return_value=None) self.manager.stop_db(self.context) dbaas.MySqlAppStatus.get.assert_any_call() dbaas.MySqlApp.stop_db.assert_any_call(do_not_start_on_reboot=False) def test_get_filesystem_stats(self): with patch.object(base_dbaas, 'get_filesystem_volume_stats'): self.manager.get_filesystem_stats(self.context, '/var/lib/mysql') base_dbaas.get_filesystem_volume_stats.assert_any_call( '/var/lib/mysql') def test_mount_volume(self): with patch.object(volume.VolumeDevice, 'mount', return_value=None): self.manager.mount_volume(self.context, device_path='/dev/vdb', mount_point='/var/lib/mysql') test_mount = volume.VolumeDevice.mount.call_args_list[0] test_mount.assert_called_with('/var/lib/mysql', False) def test_unmount_volume(self): with patch.object(volume.VolumeDevice, 'unmount', return_value=None): self.manager.unmount_volume(self.context, device_path='/dev/vdb') test_unmount = volume.VolumeDevice.unmount.call_args_list[0] test_unmount.assert_called_with('/var/lib/mysql') def test_resize_fs(self): with patch.object(volume.VolumeDevice, 'resize_fs', return_value=None): self.manager.resize_fs(self.context, device_path='/dev/vdb') test_resize_fs = volume.VolumeDevice.resize_fs.call_args_list[0] test_resize_fs.assert_called_with('/var/lib/mysql') @patch.object(dbaas.MySqlApp, 'remove_overrides') def test_update_overrides(self, remove_config_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) self.manager.update_overrides(self.context, 'something_overrides') dbaas.MySqlAppStatus.get.assert_any_call() remove_config_mock.assert_not_called() dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') @patch.object(dbaas.MySqlApp, 'remove_overrides') def test_update_overrides_with_remove(self, remove_overrides_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) self.manager.update_overrides(self.context, 'something_overrides', True) dbaas.MySqlAppStatus.get.assert_any_call() remove_overrides_mock.assert_any_call() dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') @patch.object(dbaas.MySqlApp, 'apply_overrides') def test_apply_overrides(self, apply_overrides_mock): mock_status = MagicMock() override = {'some_key': 'some value'} self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.apply_overrides(self.context, override) dbaas.MySqlAppStatus.get.assert_any_call() apply_overrides_mock.assert_any_call({'some_key': 'some value'}) @patch.object(dbaas.MySqlApp, 'get_txn_count', return_value=(9879)) def test_get_txn_count(self, get_txn_count_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) txn_count = self.manager.get_txn_count(self.context) self.assertEqual(get_txn_count_mock.return_value, txn_count) dbaas.MySqlAppStatus.get.assert_any_call() get_txn_count_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'get_latest_txn_id', return_value=('2a5b-2064-32fb:1')) def test_get_latest_txn_id(self, get_latest_txn_id_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) latest_txn_id = self.manager.get_latest_txn_id(self.context) self.assertEqual(get_latest_txn_id_mock.return_value, latest_txn_id) dbaas.MySqlAppStatus.get.assert_any_call() get_latest_txn_id_mock.assert_any_call() @patch.object(dbaas.MySqlApp, 'wait_for_txn') def test_wait_for_txn(self, wait_for_txn_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.wait_for_txn(self.context, '4b4-23:5,2a5b-2064-32fb:1') dbaas.MySqlAppStatus.get.assert_any_call() wait_for_txn_mock.assert_any_call('4b4-23:5,2a5b-2064-32fb:1') @patch.object(dbaas.MySqlApp, 'make_read_only') def test_make_read_only(self, make_read_only_mock): mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) self.manager.make_read_only(self.context, 'ON') dbaas.MySqlAppStatus.get.assert_any_call() make_read_only_mock.assert_any_call('ON') def test_cleanup_source_on_replica_detach(self): mock_replication = MagicMock() mock_replication.cleanup_source_on_replica_detach = MagicMock() self.mock_rs_class.return_value = mock_replication snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': '1.0'}} # entry point self.manager.cleanup_source_on_replica_detach(self.context, snapshot) # assertions self.assertEqual( 1, mock_replication.cleanup_source_on_replica_detach.call_count) def test_get_replica_context(self): replication_user = { 'name': 'repl_user', 'password': 'repl_pwd' } master_ref = { 'host': '1.2.3.4', 'port': 3306 } rep_info = { 'master': master_ref, 'log_position': { 'replication_user': replication_user } } mock_replication = MagicMock() mock_replication.get_replica_context = MagicMock(return_value=rep_info) self.mock_rs_class.return_value = mock_replication # entry point replica_info = self.manager.get_replica_context(self.context) # assertions self.assertEqual(1, mock_replication.get_replica_context.call_count) self.assertEqual(rep_info, replica_info) def test_enable_as_master(self): mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() self.mock_rs_class.return_value = mock_replication # entry point self.manager.enable_as_master(self.context, None) # assertions self.assertEqual(mock_replication.enable_as_master.call_count, 1) @patch('trove.guestagent.datastore.mysql_common.manager.LOG') def test__perform_restore(self, *args): backup_info = {'id': 'backup_id_123abc', 'location': 'fake-location', 'type': 'InnoBackupEx', 'checksum': 'fake-checksum', } mock_status = MagicMock() self.manager.appStatus = mock_status dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) with patch.object(backup, 'restore', side_effect=ProcessExecutionError): self.assertRaises(ProcessExecutionError, self.manager._perform_restore, backup_info, self.context, '/var/lib/mysql', app) app.status.set_status.assert_called_with( rd_instance.ServiceStatuses.FAILED) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_operating_system.py0000644000175000017500000015633100000000000027676 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import stat import tempfile from mock import call, patch, mock_open from oslo_concurrency.processutils import UnknownArgumentError import six from testtools import ExpectedException from trove.common import exception from trove.common.stream_codecs import ( Base64Codec, IdentityCodec, IniCodec, JsonCodec, KeyValueCodec, PropertiesCodec, XmlCodec, YamlCodec) from trove.common import utils from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode from trove.tests.unittests import trove_testtools class TestOperatingSystem(trove_testtools.TestCase): def test_base64_codec(self): data = "Line 1\nLine 2\n" # Base64Codec.deserialize returns bytes instead of string. self._test_file_codec(data, Base64Codec(), expected_data=data.encode('utf-8')) # when encoding is reversed for Base64Codec, reading from files # will call Base64Codec.serialize which returns string. data = "TGluZSAxCkxpbmUgMgo=" self._test_file_codec(data, Base64Codec(), reverse_encoding=True) data = "5Am9+y0wTwqUx39sMMBg3611FWg=" self._test_file_codec(data, Base64Codec(), reverse_encoding=True) def test_identity_file_codec(self): data = ("Lorem Ipsum, Lorem Ipsum\n" "Lorem Ipsum, Lorem Ipsum\n" "Lorem Ipsum, Lorem Ipsum\n") self._test_file_codec(data, IdentityCodec()) def test_ini_file_codec(self): data_no_none = {"Section1": {"s1k1": 's1v1', "s1k2": 3.1415926535}, "Section2": {"s2k1": 1, "s2k2": True}} self._test_file_codec(data_no_none, IniCodec()) data_with_none = {"Section1": {"s1k1": 's1v1', "s1k2": 3.1415926535}, "Section2": {"s2k1": 1, "s2k2": True, "s2k3": None}} # Keys with None values will be written without value. self._test_file_codec(data_with_none, IniCodec()) # None will be replaced with 'default_value'. default_value = 1 expected_data = guestagent_utils.update_dict( {"Section2": {"s2k3": default_value}}, dict(data_with_none)) self._test_file_codec(data_with_none, IniCodec(default_value=default_value), expected_data=expected_data) def test_yaml_file_codec(self): data = {"Section1": 's1v1', "Section2": {"s2k1": '1', "s2k2": 'True'}, "Section3": {"Section4": {"s4k1": '3.1415926535', "s4k2": None}}, "Section5": {"s5k1": 1, "s5k2": True}, "Section6": {"Section7": {"s7k1": 3.1415926535, "s7k2": None}} } self._test_file_codec(data, YamlCodec()) self._test_file_codec(data, YamlCodec(default_flow_style=True)) def test_properties_file_codec(self): data = {'key1': [1, "str1", '127.0.0.1', 3.1415926535, True, None], 'key2': [2.0, 3, 0, "str1 str2"], 'key3': ['str1', 'str2'], 'key4': [], 'key5': 5000, 'key6': 'str1', 'key7': 0, 'key8': None, 'key9': [['str1', 'str2'], ['str3', 'str4']], 'key10': [['str1', 'str2', 'str3'], ['str3', 'str4'], 'str5'], 'key11': True } self._test_file_codec(data, PropertiesCodec()) self._test_file_codec(data, PropertiesCodec( string_mappings={'yes': True, 'no': False, "''": None})) def test_key_value_file_codec(self): data = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} self._test_file_codec(data, KeyValueCodec()) def test_json_file_codec(self): data = {"Section1": 's1v1', "Section2": {"s2k1": '1', "s2k2": 'True'}, "Section3": {"Section4": {"s4k1": '3.1415926535', "s4k2": None}}, "Section5": {"s5k1": 1, "s5k2": True}, "Section6": {"Section7": {"s7k1": 3.1415926535, "s7k2": None}} } self._test_file_codec(data, JsonCodec()) def test_xml_file_codec(self): data = {'document': {'@name': 'mydocument', '@ttl': '10', 'author': {'@name': 'Jycll ;-)'}, 'page': [{'@number': '1', 'paragraph': ['lorem ipsum', 'more lorem ipsum']}, {'@number': '1', 'paragraph': ['lorem ipsum', 'more lorem ipsum']}] } } self._test_file_codec(data, XmlCodec()) def _test_file_codec(self, data, read_codec, write_codec=None, expected_data=None, expected_exception=None, reverse_encoding=False): write_codec = write_codec or read_codec with tempfile.NamedTemporaryFile() as test_file: encode = True decode = True if reverse_encoding: encode = False decode = False if expected_exception: with expected_exception: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) operating_system.read_file(test_file.name, codec=read_codec, decode=decode) else: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) read = operating_system.read_file(test_file.name, codec=read_codec, decode=decode) if expected_data is not None: self.assertEqual(expected_data, read) else: self.assertEqual(data, read) def test_read_write_file_input_validation(self): with ExpectedException(exception.UnprocessableEntity, "File does not exist: None"): operating_system.read_file(None) with ExpectedException(exception.UnprocessableEntity, "File does not exist: /__DOES_NOT_EXIST__"): operating_system.read_file('/__DOES_NOT_EXIST__') with ExpectedException(exception.UnprocessableEntity, "Invalid path: None"): operating_system.write_file(None, {}) @patch.object(operating_system, 'copy') def test_write_file_as_root(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile('w') with patch('tempfile.NamedTemporaryFile', return_value=temp_file): operating_system.write_file( target_file.name, "Lorem Ipsum", as_root=True) copy_mock.assert_called_once_with( temp_file.name, target_file.name, force=True, as_root=True) self.assertFalse(os.path.exists(temp_file.name)) @patch.object(operating_system, 'copy', side_effect=Exception("Error while executing 'copy'.")) def test_write_file_as_root_with_error(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile('w') with patch('tempfile.NamedTemporaryFile', return_value=temp_file): with ExpectedException(Exception, "Error while executing 'copy'."): operating_system.write_file(target_file.name, "Lorem Ipsum", as_root=True) self.assertFalse(os.path.exists(temp_file.name)) @patch.object(operating_system, 'exists', return_value=True) @patch.object(operating_system, 'copy') @patch.object(operating_system, 'chmod') @patch.object(IdentityCodec, 'deserialize') @patch.object(IdentityCodec, 'serialize') @patch.object(operating_system, 'open', mock_open(read_data='MockingRead')) def test_read_file_with_flags_and_conv_func(self, mock_serialize, mock_deserialize, mock_chmod, mock_copy, *args): test_path = '/path/of/file' test_data = 'MockingRead' # use getattr to avoid pylint 'no-member' warning mock_file = getattr(operating_system, 'open') # simple read operating_system.read_file(test_path) mock_file.assert_called_once_with(test_path, 'r') mock_file().read.assert_called_once() mock_deserialize.called_once_with(test_data) mock_file.reset_mock() mock_deserialize.reset_mock() # read with decode=False operating_system.read_file(test_path, decode=False) mock_file.assert_called_once_with(test_path, 'rb') mock_file().read.assert_called_once() mock_serialize.called_once_with(test_data) mock_file.reset_mock() mock_serialize.reset_mock() # checking _read_file_as_root arguments with patch.object(operating_system, '_read_file_as_root') as mock_read_file_as_root: # simple read as root, operating_system.read_file(test_path, as_root=True) mock_read_file_as_root.assert_called_once_with( test_path, 'r', mock_deserialize) mock_deserialize.assert_not_called() mock_read_file_as_root.reset_mock() # read as root with decode=False, operating_system.read_file(test_path, as_root=True, decode=False) mock_read_file_as_root.assert_called_once_with( test_path, 'rb', mock_serialize) mock_serialize.assert_not_called() # simple read as root temp_file = tempfile.NamedTemporaryFile('r') with patch.object(tempfile, 'NamedTemporaryFile', return_value=temp_file) as mock_temp_file: operating_system.read_file(test_path, as_root=True) mock_temp_file.assert_called_once_with('r') mock_copy.called_once_with(test_path, temp_file.name, force=True, dereference=True, as_root=True) mock_chmod.called_once_with(temp_file.name, FileMode.ADD_READ_ALL(), as_root=True) mock_deserialize.assert_called_once_with('') self.assertFalse(os.path.exists(temp_file.name)) mock_copy.reset_mock() mock_chmod.reset_mock() mock_deserialize.reset_mock() # read as root with decode=False temp_file = tempfile.NamedTemporaryFile('rb') with patch.object(tempfile, 'NamedTemporaryFile', return_value=temp_file) as mock_temp_file: operating_system.read_file(test_path, as_root=True, decode=False) mock_temp_file.assert_called_once_with('rb') mock_copy.called_once_with(test_path, temp_file.name, force=True, dereference=True, as_root=True) mock_chmod.called_once_with(temp_file.name, FileMode.ADD_READ_ALL(), as_root=True) mock_serialize.assert_called_once_with(b'') self.assertFalse(os.path.exists(temp_file.name)) @patch.object(operating_system, 'copy') @patch.object(operating_system, 'chmod') @patch.object(IdentityCodec, 'deserialize', return_value=b'DeseiralizedData') @patch.object(IdentityCodec, 'serialize', return_value='SerializedData') @patch.object(operating_system, 'open', mock_open()) def test_write_file_with_flags_and_conv_func(self, mock_serialize, mock_deserialize, mock_chmod, mock_copy): test_path = '/path/of/file' test_data = 'MockingWrite' test_serialize = 'SerializedData' test_deserialize = b'DeseiralizedData' mock_file = getattr(operating_system, 'open') # simple write operating_system.write_file(test_path, test_data) mock_file.assert_called_once_with(test_path, 'w') mock_serialize.called_once_with(test_data) mock_file().write.assert_called_once_with(test_serialize) mock_file().flush.assert_called_once() mock_file.reset_mock() mock_serialize.reset_mock() # write with encode=False operating_system.write_file(test_path, test_data, encode=False) mock_file.assert_called_once_with(test_path, 'wb') mock_deserialize.called_once_with(test_data) mock_file().write.assert_called_once_with(test_deserialize) mock_file().flush.assert_called_once() mock_file.reset_mock() mock_deserialize.reset_mock() # checking _write_file_as_root arguments with patch.object(operating_system, '_write_file_as_root') as mock_write_file_as_root: # simple write as root, operating_system.write_file(test_path, test_data, as_root=True) mock_write_file_as_root.assert_called_once_with( test_path, test_data, 'w', mock_serialize) mock_serialize.assert_not_called() mock_write_file_as_root.reset_mock() # read as root with encode=False, operating_system.write_file(test_path, test_data, as_root=True, encode=False) mock_write_file_as_root.assert_called_once_with( test_path, test_data, 'wb', mock_deserialize) mock_deserialize.assert_not_called() # simple write as root temp_file = tempfile.NamedTemporaryFile('w') with patch.object(tempfile, 'NamedTemporaryFile', return_value=temp_file) as mock_temp_file: operating_system.write_file(test_path, test_data, as_root=True) mock_temp_file.assert_called_once_with('w', delete=False) mock_serialize.assert_called_once_with(test_data) mock_copy.called_once_with(temp_file.name, test_path, force=True, as_root=True) self.assertFalse(os.path.exists(temp_file.name)) mock_copy.reset_mock() mock_chmod.reset_mock() mock_serialize.reset_mock() # write as root with decode=False temp_file = tempfile.NamedTemporaryFile('wb') with patch.object(tempfile, 'NamedTemporaryFile', return_value=temp_file) as mock_temp_file: operating_system.write_file(test_path, test_data, as_root=True, encode=False) mock_temp_file.assert_called_once_with('wb', delete=False) mock_deserialize.assert_called_once_with(test_data) mock_copy.called_once_with(temp_file.name, test_path, force=True, as_root=True) self.assertFalse(os.path.exists(temp_file.name)) def test_start_service(self): self._assert_service_call(operating_system.start_service, 'cmd_start') def test_stop_service(self): self._assert_service_call(operating_system.stop_service, 'cmd_stop') def test_enable_service_on_boot(self): self._assert_service_call(operating_system.enable_service_on_boot, 'cmd_enable') def test_disable_service_on_boot(self): self._assert_service_call(operating_system.disable_service_on_boot, 'cmd_disable') @patch.object(operating_system, '_execute_service_command') def _assert_service_call(self, fun, expected_cmd_key, exec_service_cmd_mock): test_candidate_names = ['test_service_1', 'test_service_2'] fun(test_candidate_names) exec_service_cmd_mock.assert_called_once_with(test_candidate_names, expected_cmd_key) @patch.object(operating_system, 'service_discovery', return_value={'cmd_start': 'start', 'cmd_stop': 'stop', 'cmd_enable': 'enable', 'cmd_disable': 'disable'}) def test_execute_service_command(self, discovery_mock): test_service_candidates = ['service_name'] self._assert_execute_call([['start']], [{'shell': True}], operating_system._execute_service_command, None, test_service_candidates, 'cmd_start') discovery_mock.assert_called_once_with(test_service_candidates) with ExpectedException(exception.UnprocessableEntity, "Candidate service names not specified."): operating_system._execute_service_command([], 'cmd_disable') with ExpectedException(exception.UnprocessableEntity, "Candidate service names not specified."): operating_system._execute_service_command(None, 'cmd_start') with ExpectedException(RuntimeError, "Service control command not " "available: unknown"): operating_system._execute_service_command(test_service_candidates, 'unknown') def test_modes(self): self._assert_modes(None, None, None, operating_system.FileMode()) self._assert_modes(None, None, None, operating_system.FileMode([], [], [])) self._assert_modes(0o770, 0o4, 0o3, operating_system.FileMode( [stat.S_IRWXU, stat.S_IRWXG], [stat.S_IROTH], [stat.S_IWOTH | stat.S_IXOTH]) ) self._assert_modes(0o777, None, None, operating_system.FileMode( [stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self._assert_modes(0o777, None, None, operating_system.FileMode( reset=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self._assert_modes(None, 0o777, None, operating_system.FileMode( add=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self._assert_modes(None, None, 0o777, operating_system.FileMode( remove=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) ) self.assertEqual( operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR]), operating_system.FileMode(add=[stat.S_IWUSR, stat.S_IRUSR])) self.assertEqual( hash(operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR])), hash(operating_system.FileMode(add=[stat.S_IWUSR, stat.S_IRUSR]))) self.assertNotEqual( operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR]), operating_system.FileMode(reset=[stat.S_IRUSR, stat.S_IWUSR])) self.assertNotEqual( hash(operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR])), hash(operating_system.FileMode(reset=[stat.S_IRUSR, stat.S_IWUSR])) ) def _assert_modes(self, ex_reset, ex_add, ex_remove, actual): self.assertEqual(bool(ex_reset or ex_add or ex_remove), actual.has_any()) self.assertEqual(ex_reset, actual.get_reset_mode()) self.assertEqual(ex_add, actual.get_add_mode()) self.assertEqual(ex_remove, actual.get_remove_mode()) def test_chmod(self): self._assert_execute_call( [['chmod', '-R', '=064', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.SET_GRP_RW_OTH_R, as_root=True) self._assert_execute_call( [['chmod', '-R', '+444', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.ADD_READ_ALL, as_root=True) self._assert_execute_call( [['chmod', '-R', '+060', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.ADD_GRP_RW, as_root=True) self._assert_execute_call( [['chmod', '-R', '=777', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.SET_FULL, as_root=True) self._assert_execute_call( [['chmod', '-f', '=777', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chmod, None, 'path', FileMode.SET_FULL, as_root=True, recursive=False, force=True) self._assert_execute_call( [['chmod', '-R', '=777', 'path']], [{'timeout': 100}], operating_system.chmod, None, 'path', FileMode.SET_FULL, timeout=100) self._assert_execute_call( [['chmod', '-R', '=777', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.chmod, None, 'path', FileMode.SET_FULL, as_root=True, timeout=None) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "No file mode specified."), 'path', FileMode()) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "No file mode specified."), 'path', None) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "Cannot change mode of a blank file."), '', FileMode.SET_FULL) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(exception.UnprocessableEntity, "Cannot change mode of a blank file."), None, FileMode.SET_FULL) self._assert_execute_call( None, None, operating_system.chmod, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', FileMode.SET_FULL, _unknown_kw=0) def test_remove(self): self._assert_execute_call( [['rm', '-R', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.remove, None, 'path', as_root=True) self._assert_execute_call( [['rm', '-f', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.remove, None, 'path', recursive=False, force=True, as_root=True) self._assert_execute_call( [['rm', '-R', 'path']], [{'timeout': 100}], operating_system.remove, None, 'path', timeout=100) self._assert_execute_call( [['rm', '-R', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.remove, None, 'path', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.remove, ExpectedException(exception.UnprocessableEntity, "Cannot remove a blank file."), '') self._assert_execute_call( None, None, operating_system.remove, ExpectedException(exception.UnprocessableEntity, "Cannot remove a blank file."), None) self._assert_execute_call( None, None, operating_system.remove, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', _unknown_kw=0) def test_move(self): self._assert_execute_call( [['mv', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.move, None, 'source', 'destination', as_root=True) self._assert_execute_call( [['mv', '-f', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.move, None, 'source', 'destination', force=True, as_root=True) self._assert_execute_call( [['mv', 'source', 'destination']], [{'timeout': 100}], operating_system.move, None, 'source', 'destination', timeout=100) self._assert_execute_call( [['mv', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.move, None, 'source', 'destination', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', 'destination') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, 'destination') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', '') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', None) self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', '') self._assert_execute_call( None, None, operating_system.move, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, None) self._assert_execute_call( None, None, operating_system.move, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'source', 'destination', _unknown_kw=0) def test_copy(self): self._assert_execute_call( [['cp', '-R', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.copy, None, 'source', 'destination', as_root=True) self._assert_execute_call( [['cp', '-f', '-p', 'source', 'destination']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.copy, None, 'source', 'destination', force=True, preserve=True, recursive=False, as_root=True) self._assert_execute_call( [['cp', '-R', 'source', 'destination']], [{'timeout': 100}], operating_system.copy, None, 'source', 'destination', timeout=100) self._assert_execute_call( [['cp', '-R', 'source', 'destination']], [{'run_as_root': True, 'root_helper': "sudo", 'timeout': None}], operating_system.copy, None, 'source', 'destination', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', 'destination') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, 'destination') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', '') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing destination path."), 'source', None) self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), '', '') self._assert_execute_call( None, None, operating_system.copy, ExpectedException(exception.UnprocessableEntity, "Missing source path."), None, None) self._assert_execute_call( None, None, operating_system.copy, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'source', 'destination', _unknown_kw=0) def test_chown(self): self._assert_execute_call( [['chown', '-R', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', 'grp', as_root=True) self._assert_execute_call( [['chown', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', 'grp', recursive=False, as_root=True) self._assert_execute_call( [['chown', '-f', '-R', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', 'grp', force=True, as_root=True) self._assert_execute_call( [['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', '', 'grp', as_root=True) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', '', as_root=True) self._assert_execute_call( [['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', None, 'grp', as_root=True) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.chown, None, 'path', 'usr', None, as_root=True) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'timeout': 100}], operating_system.chown, None, 'path', 'usr', None, timeout=100) self._assert_execute_call( [['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.chown, None, 'path', 'usr', None, timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), '', 'usr', 'grp') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), None, 'usr', 'grp') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Please specify owner or group, or both."), 'path', '', '') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Please specify owner or group, or both."), 'path', None, None) self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), None, None, None) self._assert_execute_call( None, None, operating_system.chown, ExpectedException(exception.UnprocessableEntity, "Cannot change ownership of a blank file."), '', '', '') self._assert_execute_call( None, None, operating_system.chown, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', 'usr', None, _unknown_kw=0) def test_change_user_group(self): self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.change_user_group, None, 'group', 'user', as_root=True) self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.change_user_group, None, 'group', 'user', append=True, add_group=True, as_root=True) self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'timeout': 100}], operating_system.change_user_group, None, 'group', 'user', timeout=100) self._assert_execute_call( [['usermod', '-a', '-G', 'user', 'group']], [{'run_as_root': True, 'root_helper': "sudo", 'timeout': None}], operating_system.change_user_group, None, 'group', 'user', timeout=None, as_root=True) self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), '', 'group') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), None, 'group') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing group."), 'user', '') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing group."), 'user', None) self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), '', '') self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(exception.UnprocessableEntity, "Missing user."), None, None) self._assert_execute_call( None, None, operating_system.change_user_group, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'user', 'add_group', _unknown_kw=0) def test_create_directory(self): self._assert_execute_call( [['mkdir', '-p', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', as_root=True) self._assert_execute_call( [['mkdir', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', force=False, as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='usr', group='grp', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', group='grp', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='usr', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'timeout': 100}, {'timeout': 100}], operating_system.create_directory, None, 'path', user='usr', timeout=100) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}, {'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], operating_system.create_directory, None, 'path', user='usr', timeout=None, as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='usr', group='', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path'], ['chown', '-R', ':grp', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}, {'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='', group='grp', as_root=True) self._assert_execute_call( [['mkdir', '-p', 'path']], [{'run_as_root': True, 'root_helper': 'sudo'}], operating_system.create_directory, None, 'path', user='', group='', as_root=True) self._assert_execute_call( None, None, operating_system.create_directory, ExpectedException(exception.UnprocessableEntity, "Cannot create a blank directory."), '', user='usr', group='grp') self._assert_execute_call( None, None, operating_system.create_directory, ExpectedException(exception.UnprocessableEntity, "Cannot create a blank directory."), None) self._assert_execute_call( None, None, operating_system.create_directory, ExpectedException(UnknownArgumentError, "Got unknown keyword args: {'_unknown_kw': 0}"), 'path', _unknown_kw=0) def test_find_executable_without_path(self): command = "command" self._delegate_assert_find_executable(command=command, path=None, isfile=True, access=True, expected_return_value=( "/usr/bin/command")) self._delegate_assert_find_executable(command=command, path=None, isfile=True, access=False, expected_return_value=None) self._delegate_assert_find_executable(command=command, path=None, isfile=False, access=True, expected_return_value=None) self._delegate_assert_find_executable(command=command, path=None, isfile=False, access=False, expected_return_value=None) def test_find_executable_with_path(self): command = "command" path = "/home" self._delegate_assert_find_executable(command=command, path=path, isfile=True, access=True, expected_return_value=( "/home/command")) self._delegate_assert_find_executable(command=command, path=path, isfile=True, access=False, expected_return_value=None) self._delegate_assert_find_executable(command=command, path=path, isfile=False, access=True, expected_return_value=None) self._delegate_assert_find_executable(command=command, path=path, isfile=False, access=False, expected_return_value=None) def _delegate_assert_find_executable(self, command, path, isfile, access, expected_return_value): self._assert_find_executable(command, path, isfile, access, expected_return_value) @patch.object(os, 'access') @patch.object(os.path, 'isfile') @patch.object(os.environ, 'get', return_value="/usr/bin") def _assert_find_executable(self, command, path, isfile, access, expected_return_value, mock_environ, mock_isfile, mock_access): mock_access.return_value = access mock_isfile.return_value = isfile actual_result = operating_system.find_executable(command, path) self.assertEqual(expected_return_value, actual_result) if path is None: mock_environ.assert_called_once() else: mock_environ.assert_not_called() def test_exists(self): self.assertFalse( operating_system.exists(tempfile.gettempdir(), is_directory=False)) self.assertTrue( operating_system.exists(tempfile.gettempdir(), is_directory=True)) with tempfile.NamedTemporaryFile() as test_file: self.assertTrue( operating_system.exists(test_file.name, is_directory=False)) self.assertFalse( operating_system.exists(test_file.name, is_directory=True)) self._assert_execute_call( [['test -f path && echo 1 || echo 0']], [{'shell': True, 'check_exit_code': False, 'run_as_root': True, 'root_helper': 'sudo'}], operating_system.exists, None, 'path', is_directory=False, as_root=True) self._assert_execute_call( [['test -d path && echo 1 || echo 0']], [{'shell': True, 'check_exit_code': False, 'run_as_root': True, 'root_helper': 'sudo'}], operating_system.exists, None, 'path', is_directory=True, as_root=True) def _assert_execute_call(self, exec_args, exec_kwargs, func, return_value, *args, **kwargs): """ Execute a function with given arguments. Assert a return value and appropriate sequence of calls to the 'utils.execute_with_timeout' interface as the result. :param exec_args: Expected arguments to the execute calls. This is a list-of-list where each sub-list represent a single call to 'utils.execute_with_timeout'. :type exec_args: list-of-lists :param exec_kwargs: Expected keywords to the execute call. This is a list-of-dicts where each dict represent a single call to 'utils.execute_with_timeout'. :type exec_kwargs: list-of-dicts :param func: Tested function call. :type func: callable :param return_value: Expected return value or exception from the tested call if any. :type return_value: object :param args: Arguments passed to the tested call. :type args: list :param kwargs: Keywords passed to the tested call. :type kwargs: dict """ with patch.object(utils, 'execute_with_timeout', return_value=('0', '')) as exec_call: if isinstance(return_value, ExpectedException): with return_value: func(*args, **kwargs) else: actual_value = func(*args, **kwargs) if return_value is not None: self.assertEqual(return_value, actual_value, "Return value mismatch.") expected_calls = [] for arg, kw in six.moves.zip(exec_args, exec_kwargs): expected_calls.append(call(*arg, **kw)) self.assertEqual(expected_calls, exec_call.mock_calls, "Mismatch in calls to " "'execute_with_timeout'.") def test_get_os_redhat(self): with patch.object(os.path, 'isfile', side_effect=[True]): find_os = operating_system.get_os() self.assertEqual('redhat', find_os) def test_get_os_suse(self): with patch.object(os.path, 'isfile', side_effect=[False, True]): find_os = operating_system.get_os() self.assertEqual('suse', find_os) def test_get_os_debian(self): with patch.object(os.path, 'isfile', side_effect=[False, False]): find_os = operating_system.get_os() self.assertEqual('debian', find_os) def test_upstart_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_sysvinit_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, True, True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_sysvinit_chkconfig_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, True, False, True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) @patch.object(os.path, 'islink', return_value=True) @patch.object(os.path, 'realpath') @patch.object(os.path, 'basename') def test_systemd_symlinked_type_service_discovery(self, mock_base, mock_path, mock_islink): with patch.object(os.path, 'isfile', side_effect=[False, False, True]): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_systemd_not_symlinked_type_service_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, False, True]): with patch.object(os.path, 'islink', return_value=False): mysql_service = operating_system.service_discovery(["mysql"]) self.assertIsNotNone(mysql_service['cmd_start']) self.assertIsNotNone(mysql_service['cmd_enable']) def test_file_discovery(self): with patch.object(os.path, 'isfile', side_effect=[False, True]): config_file = operating_system.file_discovery( ["/etc/mongodb.conf", "/etc/mongod.conf"]) self.assertEqual('/etc/mongod.conf', config_file) with patch.object(os.path, 'isfile', side_effect=[False]): config_file = operating_system.file_discovery( ["/etc/mongodb.conf"]) self.assertEqual('', config_file) def test_list_files_in_directory(self): root_path = tempfile.mkdtemp() try: all_paths = set() self._create_temp_fs_structure( root_path, 3, 3, ['txt', 'py', ''], 1, all_paths) # All files in the top directory. self._assert_list_files( root_path, False, None, False, all_paths, 9) # All files & directories in the top directory. self._assert_list_files( root_path, False, None, True, all_paths, 10) # All files recursive. self._assert_list_files( root_path, True, None, False, all_paths, 27) # All files & directories recursive. self._assert_list_files( root_path, True, None, True, all_paths, 29) # Only '*.txt' in the top directory. self._assert_list_files( root_path, False, r'.*\.txt$', False, all_paths, 3) # Only '*.txt' (including directories) in the top directory. self._assert_list_files( root_path, False, r'.*\.txt$', True, all_paths, 3) # Only '*.txt' recursive. self._assert_list_files( root_path, True, r'.*\.txt$', True, all_paths, 9) # Only '*.txt' (including directories) recursive. self._assert_list_files( root_path, True, r'.*\.txt$', False, all_paths, 9) # Only extension-less files in the top directory. self._assert_list_files( root_path, False, r'[^\.]*$', False, all_paths, 3) # Only extension-less files recursive. self._assert_list_files( root_path, True, r'[^\.]*$', False, all_paths, 9) # Non-existing extension in the top directory. self._assert_list_files( root_path, False, r'.*\.bak$', False, all_paths, 0) # Non-existing extension recursive. self._assert_list_files( root_path, True, r'.*\.bak$', False, all_paths, 0) finally: try: os.remove(root_path) except Exception: pass # Do not fail in the cleanup. def _assert_list_files(self, root, recursive, pattern, include_dirs, all_paths, count): found = operating_system.list_files_in_directory( root, recursive=recursive, pattern=pattern, include_dirs=include_dirs) expected = { path for path in filter( lambda item: include_dirs or not os.path.isdir(item), all_paths) if ( (recursive or os.path.dirname(path) == root) and ( not pattern or re.match( pattern, os.path.basename(path))))} self.assertEqual(expected, found) self.assertEqual(count, len(found), "Incorrect number of listed files.") def _create_temp_fs_structure(self, root_path, num_levels, num_files_per_extension, file_extensions, level, created_paths): """Create a structure of temporary directories 'num_levels' deep with temporary files on each level. """ file_paths = self._create_temp_files( root_path, num_files_per_extension, file_extensions) created_paths.update(file_paths) if level < num_levels: path = tempfile.mkdtemp(dir=root_path) created_paths.add(path) self._create_temp_fs_structure( path, num_levels, num_files_per_extension, file_extensions, level + 1, created_paths) def _create_temp_files(self, root_path, num_files_per_extension, file_extensions): """Create 'num_files_per_extension' temporary files per each of the given extensions. """ files = set() for ext in file_extensions: for fileno in range(1, num_files_per_extension + 1): prefix = str(fileno) suffix = os.extsep + ext if ext else '' _, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=root_path) files.add(path) return files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_pkg.py0000644000175000017500000005133100000000000025055 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import subprocess from mock import Mock, MagicMock, patch import pexpect from trove.common import exception from trove.common import utils from trove.guestagent import pkg from trove.tests.unittests import trove_testtools """ Unit tests for the classes and functions in pkg.py. """ class PkgDEBInstallTestCase(trove_testtools.TestCase): def setUp(self): super(PkgDEBInstallTestCase, self).setUp() self.pkg = pkg.DebianPackagerMixin() self.pkg_fix = self.pkg._fix self.pkg_fix_package_selections = self.pkg._fix_package_selections p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) self.pkg._fix = Mock(return_value=None) self.pkg._fix_package_selections = Mock(return_value=None) self.pkgName = 'packageName' def tearDown(self): super(PkgDEBInstallTestCase, self).tearDown() self.pkg._fix = self.pkg_fix self.pkg._fix_package_selections = self.pkg_fix_package_selections def test_pkg_is_installed_no_packages(self): packages = [] self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_yes(self): packages = ["package1=1.0", "package2"] self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0"]) self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_no(self): packages = ["package1=1.0", "package2", "package3=3.1"] self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0", "3.0"]) self.assertFalse(self.pkg.pkg_is_installed(packages)) def test_success_install(self): # test pexpect.spawn.return_value.expect.return_value = 7 pexpect.spawn.return_value.match = False self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_success_install_with_config_opts(self): # test config_opts = {'option': 'some_opt'} pexpect.spawn.return_value.expect.return_value = 7 pexpect.spawn.return_value.match = False self.assertTrue( self.pkg.pkg_install(self.pkgName, config_opts, 5000) is None) def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_not_found_1(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_not_found_2(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_run_DPKG_bad_State(self): # test _fix method is called and PackageStateError is thrown pexpect.spawn.return_value.expect.return_value = 4 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_install, self.pkgName, {}, 5000) self.assertTrue(self.pkg._fix.called) def test_admin_lock_error(self): # test 'Unable to lock the administration directory' error pexpect.spawn.return_value.expect.return_value = 5 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_broken_error(self): pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgBrokenError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install, self.pkgName, {}, 5000) class PkgDEBRemoveTestCase(trove_testtools.TestCase): def setUp(self): super(PkgDEBRemoveTestCase, self).setUp() self.pkg = pkg.DebianPackagerMixin() self.pkg_version = self.pkg.pkg_version self.pkg_install = self.pkg._install self.pkg_fix = self.pkg._fix p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) self.pkg.pkg_version = Mock(return_value="OK") self.pkg._install = Mock(return_value=None) self.pkg._fix = Mock(return_value=None) self.pkgName = 'packageName' def tearDown(self): super(PkgDEBRemoveTestCase, self).tearDown() self.pkg.pkg_version = self.pkg_version self.pkg._install = self.pkg_install self.pkg._fix = self.pkg_fix def test_remove_no_pkg_version(self): # test pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False with patch.object(self.pkg, 'pkg_version', return_value=None): self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_success_remove(self): # test pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove, self.pkgName, 5000) def test_package_not_found(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove, self.pkgName, 5000) def test_package_reinstall_first_1(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, self.pkgName, 5000) self.assertTrue(self.pkg._install.called) self.assertFalse(self.pkg._fix.called) def test_package_reinstall_first_2(self): # test pexpect.spawn.return_value.expect.return_value = 3 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, self.pkgName, 5000) self.assertTrue(self.pkg._install.called) self.assertFalse(self.pkg._fix.called) def test_package_DPKG_first(self): # test pexpect.spawn.return_value.expect.return_value = 4 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, self.pkgName, 5000) self.assertFalse(self.pkg._install.called) self.assertTrue(self.pkg._fix.called) def test_admin_lock_error(self): # test 'Unable to lock the administration directory' error pexpect.spawn.return_value.expect.return_value = 5 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_remove, self.pkgName, 5000) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, self.pkgName, 5000) @patch.object(subprocess, 'call') def test_timeout_error_with_exception(self, mock_call): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) pexpect.spawn.return_value.close.side_effect = ( pexpect.ExceptionPexpect('error')) # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, self.pkgName, 5000) self.assertEqual(1, mock_call.call_count) class PkgDEBVersionTestCase(trove_testtools.TestCase): def setUp(self): super(PkgDEBVersionTestCase, self).setUp() self.pkgName = 'mysql-server-5.7' self.pkgVersion = '5.7.20-0' self.getoutput = pkg.getoutput def tearDown(self): super(PkgDEBVersionTestCase, self).tearDown() pkg.getoutput = self.getoutput def test_version_success(self): cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, self.pkgVersion) pkg.getoutput = Mock(return_value=cmd_out) version = pkg.DebianPackagerMixin().pkg_version(self.pkgName) self.assertTrue(version) self.assertEqual(self.pkgVersion, version) def test_version_unknown_package(self): cmd_out = "N: Unable to locate package %s" % self.pkgName pkg.getoutput = Mock(return_value=cmd_out) self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName)) def test_version_no_version(self): cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, "(none)") pkg.getoutput = Mock(return_value=cmd_out) self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName)) class PkgRPMVersionTestCase(trove_testtools.TestCase): def setUp(self): super(PkgRPMVersionTestCase, self).setUp() self.pkgName = 'python-requests' self.pkgVersion = '0.14.2-1.el6' self.getoutput = pkg.getoutput def tearDown(self): super(PkgRPMVersionTestCase, self).tearDown() pkg.getoutput = self.getoutput @patch('trove.guestagent.pkg.LOG') def test_version_no_output(self, mock_logging): cmd_out = '' pkg.getoutput = Mock(return_value=cmd_out) self.assertIsNone(pkg.RedhatPackagerMixin().pkg_version(self.pkgName)) def test_version_success(self): cmd_out = self.pkgVersion pkg.getoutput = Mock(return_value=cmd_out) version = pkg.RedhatPackagerMixin().pkg_version(self.pkgName) self.assertTrue(version) self.assertEqual(self.pkgVersion, version) class PkgRPMInstallTestCase(trove_testtools.TestCase): def setUp(self): super(PkgRPMInstallTestCase, self).setUp() self.pkg = pkg.RedhatPackagerMixin() self.getoutput = pkg.getoutput self.pkgName = 'packageName' p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) def tearDown(self): super(PkgRPMInstallTestCase, self).tearDown() pkg.getoutput = self.getoutput def test_pkg_is_installed_no_packages(self): packages = [] self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_yes(self): packages = ["package1=1.0", "package2"] with patch.object(pkg, 'getoutput', MagicMock( return_value="package1=1.0\n" "package2=2.0")): self.assertTrue(self.pkg.pkg_is_installed(packages)) def test_pkg_is_installed_no(self): packages = ["package1=1.0", "package2", "package3=3.0"] with patch.object(pkg, 'getoutput', MagicMock( return_value="package1=1.0\n" "package2=2.0")): self.assertFalse(self.pkg.pkg_is_installed(packages)) def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_not_found(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_conflict_remove(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) self.pkg._rpm_remove_nodeps = Mock() # test and verify self.pkg._install(self.pkgName, 5000) self.assertTrue(self.pkg._rpm_remove_nodeps.called) def test_package_conflict_remove_install(self): with patch.object(self.pkg, '_install', side_effect=[3, 3, 0]): self.assertTrue( self.pkg.pkg_install(self.pkgName, {}, 5000) is None) self.assertEqual(3, self.pkg._install.call_count) @patch.object(utils, 'execute') def test__rpm_remove_nodeps(self, mock_execute): self.pkg._rpm_remove_nodeps(self.pkgName) mock_execute.assert_called_with('rpm', '-e', '--nodeps', self.pkgName, run_as_root=True, root_helper='sudo') def test_package_scriptlet_error(self): # test pexpect.spawn.return_value.expect.return_value = 5 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgScriptletError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_http_error(self): # test pexpect.spawn.return_value.expect.return_value = 6 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_nomirrors_error(self): # test pexpect.spawn.return_value.expect.return_value = 7 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_sign_error(self): # test pexpect.spawn.return_value.expect.return_value = 8 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgSignError, self.pkg.pkg_install, self.pkgName, {}, 5000) def test_package_already_installed(self): # test pexpect.spawn.return_value.expect.return_value = 9 pexpect.spawn.return_value.match = False # test and verify self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_package_success_updated(self): # test pexpect.spawn.return_value.expect.return_value = 10 pexpect.spawn.return_value.match = False # test and verify self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_package_success_installed(self): # test pexpect.spawn.return_value.expect.return_value = 11 pexpect.spawn.return_value.match = False # test and verify self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install, self.pkgName, {}, 5000) class PkgRPMRemoveTestCase(trove_testtools.TestCase): def setUp(self): super(PkgRPMRemoveTestCase, self).setUp() self.pkg = pkg.RedhatPackagerMixin() self.pkg_version = self.pkg.pkg_version self.pkg_install = self.pkg._install p0 = patch('pexpect.spawn') p0.start() self.addCleanup(p0.stop) p1 = patch('trove.common.utils.execute') p1.start() self.addCleanup(p1.stop) self.pkg.pkg_version = Mock(return_value="OK") self.pkg._install = Mock(return_value=None) self.pkgName = 'packageName' def tearDown(self): super(PkgRPMRemoveTestCase, self).tearDown() self.pkg.pkg_version = self.pkg_version self.pkg._install = self.pkg_install def test_permission_error(self): # test pexpect.spawn.return_value.expect.return_value = 0 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove, self.pkgName, 5000) def test_package_not_found(self): # test pexpect.spawn.return_value.expect.return_value = 1 pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove, self.pkgName, 5000) def test_remove_no_pkg_version(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = False with patch.object(self.pkg, 'pkg_version', return_value=None): self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_success_remove(self): # test pexpect.spawn.return_value.expect.return_value = 2 pexpect.spawn.return_value.match = False self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) def test_timeout_error(self): # test timeout error pexpect.spawn.return_value.expect.side_effect = ( pexpect.TIMEOUT('timeout error')) pexpect.spawn.return_value.match = False # test and verify self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, self.pkgName, 5000) class PkgDEBFixPackageSelections(trove_testtools.TestCase): def setUp(self): super(PkgDEBFixPackageSelections, self).setUp() self.pkg = pkg.DebianPackagerMixin() self.getoutput = pkg.getoutput def tearDown(self): super(PkgDEBFixPackageSelections, self).tearDown() pkg.getoutput = self.getoutput @patch.object(os, 'remove') @patch.object(pkg, 'NamedTemporaryFile') @patch.object(utils, 'execute') def test__fix_package_selections(self, mock_execute, mock_temp_file, mock_remove): packages = ["package1"] config_opts = {'option': 'some_opt'} pkg.getoutput = Mock( return_value="* package1/option: some_opt") self.pkg._fix_package_selections(packages, config_opts) self.assertEqual(2, mock_execute.call_count) self.assertEqual(1, mock_remove.call_count) @patch.object(os, 'remove') @patch.object(pkg, 'NamedTemporaryFile') @patch.object(utils, 'execute', side_effect=exception.ProcessExecutionError) def test_fail__fix_package_selections(self, mock_execute, mock_temp_file, mock_remove): packages = ["package1"] config_opts = {'option': 'some_opt'} pkg.getoutput = Mock( return_value="* package1/option: some_opt") self.assertRaises(pkg.PkgConfigureError, self.pkg._fix_package_selections, packages, config_opts) self.assertEqual(1, mock_remove.call_count) @patch.object(utils, 'execute') def test__fix(self, mock_execute): self.pkg._fix(30) mock_execute.assert_called_with('dpkg', '--configure', '-a', run_as_root=True, root_helper='sudo') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_query.py0000644000175000017500000003351000000000000025440 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.guestagent.common import sql_query from trove.tests.unittests import trove_testtools class QueryTestBase(trove_testtools.TestCase): def setUp(self): super(QueryTestBase, self).setUp() def tearDown(self): super(QueryTestBase, self).tearDown() class QueryTest(QueryTestBase): def setUp(self): super(QueryTest, self).setUp() def tearDown(self): super(QueryTest, self).tearDown() def test_columns(self): myQuery = sql_query.Query(columns=None) self.assertEqual("SELECT *", myQuery._columns) def test_columns_2(self): columns = ["col_A", "col_B"] myQuery = sql_query.Query(columns=columns) self.assertEqual("SELECT col_A, col_B", myQuery._columns) def test_tables(self): tables = ['table_A', 'table_B'] myQuery = sql_query.Query(tables=tables) self.assertEqual("FROM table_A, table_B", myQuery._tables) def test_where(self): myQuery = sql_query.Query(where=None) self.assertEqual("", myQuery._where) def test_where_2(self): conditions = ['cond_A', 'cond_B'] myQuery = sql_query.Query(where=conditions) self.assertEqual("WHERE cond_A AND cond_B", myQuery._where) def test_order(self): myQuery = sql_query.Query(order=None) self.assertEqual('', myQuery._order) def test_order_2(self): orders = ['deleted_at', 'updated_at'] myQuery = sql_query.Query(order=orders) self.assertEqual('ORDER BY deleted_at, updated_at', myQuery._order) def test_group_by(self): myQuery = sql_query.Query(group=None) self.assertEqual('', myQuery._group_by) def test_group_by_2(self): groups = ['deleted=1'] myQuery = sql_query.Query(group=groups) self.assertEqual('GROUP BY deleted=1', myQuery._group_by) def test_limit(self): myQuery = sql_query.Query(limit=None) self.assertEqual('', myQuery._limit) def test_limit_2(self): limit_count = 20 myQuery = sql_query.Query(limit=limit_count) self.assertEqual('LIMIT 20', myQuery._limit) class GrantTest(QueryTestBase): def setUp(self): super(GrantTest, self).setUp() def tearDown(self): super(GrantTest, self).tearDown() def test_grant_no_arg_constr(self): grant = sql_query.Grant() self.assertIsNotNone(grant) self.assertEqual("GRANT USAGE ON *.* " "TO ``@`%`;", str(grant)) def test_grant_all_with_grant_option(self): permissions = ['ALL'] user_name = 'root' user_password = 'password123' host = 'localhost' # grant_option defaults to True grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password, grant_option=True) self.assertEqual("GRANT ALL PRIVILEGES ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY 'password123' " "WITH GRANT OPTION;", str(grant)) def test_grant_all_with_explicit_grant_option(self): permissions = ['ALL', 'GRANT OPTION'] user_name = 'root' user_password = 'password123' host = 'localhost' grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password, grant_option=True) self.assertEqual("GRANT ALL PRIVILEGES ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY 'password123' " "WITH GRANT OPTION;", str(grant)) def test_grant_specify_permissions(self): permissions = ['ALTER ROUTINE', 'CREATE', 'ALTER', 'CREATE ROUTINE', 'CREATE TEMPORARY TABLES', 'CREATE VIEW', 'CREATE USER', 'DELETE', 'DROP', 'EVENT', 'EXECUTE', 'INDEX', 'INSERT', 'LOCK TABLES', 'PROCESS', 'REFERENCES', 'SELECT', 'SHOW DATABASES', 'SHOW VIEW', 'TRIGGER', 'UPDATE', 'USAGE'] user_name = 'root' user_password = 'password123' host = 'localhost' grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password) self.assertEqual("GRANT ALTER, " "ALTER ROUTINE, " "CREATE, " "CREATE ROUTINE, " "CREATE TEMPORARY TABLES, " "CREATE USER, " "CREATE VIEW, " "DELETE, " "DROP, " "EVENT, " "EXECUTE, " "INDEX, " "INSERT, " "LOCK TABLES, " "PROCESS, " "REFERENCES, " "SELECT, " "SHOW DATABASES, " "SHOW VIEW, " "TRIGGER, " "UPDATE, " "USAGE ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY " "'password123';", str(grant)) def test_grant_specify_duplicate_permissions(self): permissions = ['ALTER ROUTINE', 'CREATE', 'CREATE', 'DROP', 'DELETE', 'DELETE', 'ALTER', 'CREATE ROUTINE', 'CREATE TEMPORARY TABLES', 'CREATE VIEW', 'CREATE USER', 'DELETE', 'DROP', 'EVENT', 'EXECUTE', 'INDEX', 'INSERT', 'LOCK TABLES', 'PROCESS', 'REFERENCES', 'SELECT', 'SHOW DATABASES', 'SHOW VIEW', 'TRIGGER', 'UPDATE', 'USAGE'] user_name = 'root' user_password = 'password123' host = 'localhost' grant = sql_query.Grant(permissions=permissions, user=user_name, host=host, clear=user_password) self.assertEqual("GRANT ALTER, " "ALTER ROUTINE, " "CREATE, " "CREATE ROUTINE, " "CREATE TEMPORARY TABLES, " "CREATE USER, " "CREATE VIEW, " "DELETE, " "DROP, " "EVENT, " "EXECUTE, " "INDEX, " "INSERT, " "LOCK TABLES, " "PROCESS, " "REFERENCES, " "SELECT, " "SHOW DATABASES, " "SHOW VIEW, " "TRIGGER, " "UPDATE, " "USAGE ON *.* TO " "`root`@`localhost` " "IDENTIFIED BY " "'password123';", str(grant)) class RevokeTest(QueryTestBase): def setUp(self): super(RevokeTest, self).setUp() def tearDown(self): super(RevokeTest, self).tearDown() def test_defaults(self): r = sql_query.Revoke() # Technically, this isn't valid for MySQL. self.assertEqual("REVOKE ALL ON *.* FROM ``@`%`;", str(r)) def test_permissions(self): r = sql_query.Revoke() r.user = 'x' r.permissions = ['CREATE', 'DELETE', 'DROP'] self.assertEqual("REVOKE CREATE, DELETE, DROP ON *.* FROM `x`@`%`;", str(r)) def test_database(self): r = sql_query.Revoke() r.user = 'x' r.database = 'foo' self.assertEqual("REVOKE ALL ON `foo`.* FROM `x`@`%`;", str(r)) def test_table(self): r = sql_query.Revoke() r.user = 'x' r.database = 'foo' r.table = 'bar' self.assertEqual("REVOKE ALL ON `foo`.'bar' FROM `x`@`%`;", str(r)) def test_user(self): r = sql_query.Revoke() r.user = 'x' self.assertEqual("REVOKE ALL ON *.* FROM `x`@`%`;", str(r)) def test_user_host(self): r = sql_query.Revoke() r.user = 'x' r.host = 'y' self.assertEqual("REVOKE ALL ON *.* FROM `x`@`y`;", str(r)) class CreateDatabaseTest(QueryTestBase): def setUp(self): super(CreateDatabaseTest, self).setUp() def tearDown(self): super(CreateDatabaseTest, self).tearDown() def test_defaults(self): cd = sql_query.CreateDatabase('foo') self.assertEqual("CREATE DATABASE IF NOT EXISTS `foo`;", str(cd)) def test_charset(self): cd = sql_query.CreateDatabase('foo') cd.charset = "foo" self.assertEqual(("CREATE DATABASE IF NOT EXISTS `foo` " "CHARACTER SET = 'foo';"), str(cd)) def test_collate(self): cd = sql_query.CreateDatabase('foo') cd.collate = "bar" self.assertEqual(("CREATE DATABASE IF NOT EXISTS `foo` " "COLLATE = 'bar';"), str(cd)) class DropDatabaseTest(QueryTestBase): def setUp(self): super(DropDatabaseTest, self).setUp() def tearDown(self): super(DropDatabaseTest, self).tearDown() def test_defaults(self): dd = sql_query.DropDatabase('foo') self.assertEqual("DROP DATABASE `foo`;", str(dd)) class CreateUserTest(QueryTestBase): def setUp(self): super(CreateUserTest, self).setUp() def tearDown(self): super(CreateUserTest, self).tearDown() def test_defaults(self): username = 'root' hostname = 'localhost' password = 'password123' cu = sql_query.CreateUser(user=username, host=hostname, clear=password) self.assertEqual("CREATE USER :user@:host " "IDENTIFIED BY 'password123';", str(cu)) class RenameUserTest(QueryTestBase): def setUp(self): super(RenameUserTest, self).setUp() def tearDown(self): super(RenameUserTest, self).tearDown() def test_rename_user(self): username = 'root' hostname = 'localhost' new_user = 'root123' uu = sql_query.RenameUser(user=username, host=hostname, new_user=new_user) self.assertEqual("RENAME USER 'root'@'localhost' " "TO 'root123'@'localhost';", str(uu)) def test_change_host(self): username = 'root' hostname = 'localhost' new_host = '%' uu = sql_query.RenameUser(user=username, host=hostname, new_host=new_host) self.assertEqual("RENAME USER 'root'@'localhost' " "TO 'root'@'%';", str(uu)) def test_change_username_and_hostname(self): username = 'root' hostname = 'localhost' new_user = 'root123' new_host = '%' uu = sql_query.RenameUser(user=username, host=hostname, new_user=new_user, new_host=new_host) self.assertEqual("RENAME USER 'root'@'localhost' " "TO 'root123'@'%';", str(uu)) class SetPasswordTest(QueryTestBase): def setUp(self): super(SetPasswordTest, self).setUp() def tearDown(self): super(SetPasswordTest, self).tearDown() def test_alter_user(self): username = 'root' hostname = 'localhost' new_password = 'new_password' uu = sql_query.SetPassword(user=username, host=hostname, new_password=new_password) self.assertEqual("SET PASSWORD FOR 'root'@'localhost' = " "PASSWORD('new_password');", str(uu)) class DropUserTest(QueryTestBase): def setUp(self): super(DropUserTest, self).setUp() def tearDown(self): super(DropUserTest, self).tearDown() def test_defaults(self): username = 'root' hostname = 'localhost' du = sql_query.DropUser(user=username, host=hostname) self.assertEqual("DROP USER `root`@`localhost`;", str(du)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_redis_manager.py0000644000175000017500000004145700000000000027104 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import DEFAULT, MagicMock, Mock, patch from trove.common import utils from trove.guestagent import backup from trove.guestagent.common import configuration from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.redis import ( service as redis_service) from trove.guestagent.datastore.experimental.redis.manager import ( Manager as RedisManager) from trove.guestagent.volume import VolumeDevice from trove.tests.unittests.guestagent.test_datastore_manager import \ DatastoreManagerTest class RedisGuestAgentManagerTest(DatastoreManagerTest): @patch.object(redis_service.RedisApp, '_build_admin_client') @patch.object(ImportOverrideStrategy, '_initialize_import_directory') def setUp(self, *args, **kwargs): super(RedisGuestAgentManagerTest, self).setUp('redis') self.patch_ope = patch('os.path.expanduser', return_value='/tmp/redis') self.mock_ope = self.patch_ope.start() self.addCleanup(self.patch_ope.stop) self.replication_strategy = 'RedisSyncReplication' self.patch_rs = patch( 'trove.guestagent.strategies.replication.get_strategy', return_value=self.replication_strategy) self.mock_rs = self.patch_rs.start() self.addCleanup(self.patch_rs.stop) self.manager = RedisManager() self.packages = 'redis-server' self.origin_RedisAppStatus = redis_service.RedisAppStatus self.origin_start_redis = redis_service.RedisApp.start_db self.origin_stop_redis = redis_service.RedisApp.stop_db self.origin_install_redis = redis_service.RedisApp._install_redis self.origin_install_if_needed = \ redis_service.RedisApp.install_if_needed self.origin_format = VolumeDevice.format self.origin_mount = VolumeDevice.mount self.origin_mount_points = VolumeDevice.mount_points self.origin_restore = backup.restore self.patch_repl = patch( 'trove.guestagent.strategies.replication.get_instance') self.mock_repl = self.patch_repl.start() self.addCleanup(self.patch_repl.stop) self.patch_gfvs = patch( 'trove.guestagent.dbaas.get_filesystem_volume_stats') self.mock_gfvs_class = self.patch_gfvs.start() self.addCleanup(self.patch_gfvs.stop) def tearDown(self): super(RedisGuestAgentManagerTest, self).tearDown() redis_service.RedisAppStatus = self.origin_RedisAppStatus redis_service.RedisApp.stop_db = self.origin_stop_redis redis_service.RedisApp.start_db = self.origin_start_redis redis_service.RedisApp._install_redis = self.origin_install_redis redis_service.RedisApp.install_if_needed = \ self.origin_install_if_needed VolumeDevice.format = self.origin_format VolumeDevice.mount = self.origin_mount VolumeDevice.mount_points = self.origin_mount_points backup.restore = self.origin_restore def test_update_status(self): mock_status = MagicMock() mock_status.is_installed = True mock_status._is_restarting = False self.manager._app.status = mock_status self.manager.update_status(self.context) self.assertTrue(mock_status.set_status.called) def test_prepare_redis_not_installed(self): self._prepare_dynamic(is_redis_installed=False) def test_prepare_redis_with_snapshot(self): snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': 1.0}, 'config': None} self._prepare_dynamic(snapshot=snapshot) @patch.object(redis_service.RedisApp, 'get_working_dir', MagicMock(return_value='/var/lib/redis')) def test_prepare_redis_from_backup(self): self._prepare_dynamic(backup_id='backup_id_123abc') @patch.multiple(redis_service.RedisApp, apply_initial_guestagent_configuration=DEFAULT, restart=DEFAULT, install_if_needed=DEFAULT) @patch.object(operating_system, 'chown') @patch.object(configuration.ConfigurationManager, 'save_configuration') def _prepare_dynamic(self, save_configuration_mock, chown_mock, apply_initial_guestagent_configuration, restart, install_if_needed, device_path='/dev/vdb', is_redis_installed=True, backup_info=None, is_root_enabled=False, mount_point='var/lib/redis', backup_id=None, snapshot=None): backup_info = None if backup_id is not None: backup_info = {'id': backup_id, 'location': 'fake-location', 'type': 'RedisBackup', 'checksum': 'fake-checksum', } # covering all outcomes is starting to cause trouble here mock_status = MagicMock() self.manager._app.status = mock_status self.manager._build_admin_client = MagicMock(return_value=MagicMock()) redis_service.RedisApp.stop_db = MagicMock(return_value=None) redis_service.RedisApp.start_db = MagicMock(return_value=None) mock_status.begin_install = MagicMock(return_value=None) VolumeDevice.format = MagicMock(return_value=None) VolumeDevice.mount = MagicMock(return_value=None) VolumeDevice.mount_points = MagicMock(return_value=[]) backup.restore = MagicMock(return_value=None) mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_repl.return_value = mock_replication self.manager.prepare(self.context, self.packages, None, '2048', None, device_path=device_path, mount_point=mount_point, backup_info=backup_info, overrides=None, cluster_config=None, snapshot=snapshot) mock_status.begin_install.assert_any_call() VolumeDevice.format.assert_any_call() install_if_needed.assert_any_call(self.packages) save_configuration_mock.assert_any_call(None) apply_initial_guestagent_configuration.assert_called_once_with() chown_mock.assert_any_call(mount_point, 'redis', 'redis', as_root=True) if backup_info: backup.restore.assert_called_once_with(self.context, backup_info, '/var/lib/redis') else: redis_service.RedisApp.restart.assert_any_call() if snapshot: self.assertEqual(1, mock_replication.enable_as_slave.call_count) else: self.assertEqual(0, mock_replication.enable_as_slave.call_count) @patch.object(redis_service.RedisApp, 'restart') def test_restart(self, redis_mock): self.manager.restart(self.context) redis_mock.assert_any_call() @patch.object(redis_service.RedisApp, 'stop_db') def test_stop_db(self, redis_mock): self.manager.stop_db(self.context) redis_mock.assert_any_call(do_not_start_on_reboot=False) @patch.object(ImportOverrideStrategy, '_initialize_import_directory') @patch.object(backup, 'backup') @patch.object(configuration.ConfigurationManager, 'parse_configuration', MagicMock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) @patch.object(operating_system, 'chown') @patch.object(operating_system, 'create_directory') @patch.object(redis_service.RedisApp, 'get_config_command_name', Mock(return_value='fakeconfig')) def test_create_backup(self, *mocks): backup.backup = MagicMock(return_value=None) RedisManager().create_backup(self.context, 'backup_id_123') backup.backup.assert_any_call(self.context, 'backup_id_123') def test_backup_required_for_replication(self): mock_replication = MagicMock() mock_replication.backup_required_for_replication = MagicMock() self.mock_repl.return_value = mock_replication self.manager.backup_required_for_replication(self.context) self.assertEqual( 1, mock_replication.backup_required_for_replication.call_count) @patch.object(redis_service.RedisApp, 'update_overrides') @patch.object(redis_service.RedisApp, 'remove_overrides') def test_update_overrides(self, remove_config_mock, update_config_mock): self.manager.update_overrides(self.context, 'overrides') remove_config_mock.assert_not_called() update_config_mock.assert_called_once_with(self.context, 'overrides', False) @patch.object(redis_service.RedisApp, 'update_overrides') @patch.object(redis_service.RedisApp, 'remove_overrides') def test_update_overrides_with_remove(self, remove_config_mock, update_config_mock): self.manager.update_overrides(self.context, 'overrides', True) remove_config_mock.assert_called_once_with() update_config_mock.assert_not_called() @patch.object(redis_service.RedisApp, 'apply_overrides') def test_apply_overrides(self, apply_config_mock): self.manager.apply_overrides(self.context, 'overrides') apply_config_mock.assert_called_once_with(self.manager._app.admin, 'overrides') def test_attach_replica(self): mock_replication = MagicMock() mock_replication.enable_as_slave = MagicMock() self.mock_repl.return_value = mock_replication snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': 1.0}} self.manager.attach_replica(self.context, snapshot, None) self.assertEqual(1, mock_replication.enable_as_slave.call_count) def test_detach_replica(self): mock_replication = MagicMock() mock_replication.detach_slave = MagicMock() self.mock_repl.return_value = mock_replication self.manager.detach_replica(self.context) self.assertEqual(1, mock_replication.detach_slave.call_count) def test_enable_as_master(self): mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() self.mock_repl.return_value = mock_replication self.manager.enable_as_master(self.context, None) self.assertEqual(mock_replication.enable_as_master.call_count, 1) def test_demote_replication_master(self): mock_replication = MagicMock() mock_replication.demote_master = MagicMock() self.mock_repl.return_value = mock_replication self.manager.demote_replication_master(self.context) self.assertEqual(1, mock_replication.demote_master.call_count) @patch.object(redis_service.RedisApp, 'make_read_only') def test_make_read_only(self, redis_mock): self.manager.make_read_only(self.context, 'ON') redis_mock.assert_any_call('ON') def test_cleanup_source_on_replica_detach(self): mock_replication = MagicMock() mock_replication.cleanup_source_on_replica_detach = MagicMock() self.mock_repl.return_value = mock_replication snapshot = {'replication_strategy': self.replication_strategy, 'dataset': {'dataset_size': '1.0'}} self.manager.cleanup_source_on_replica_detach(self.context, snapshot) self.assertEqual( 1, mock_replication.cleanup_source_on_replica_detach.call_count) def test_get_replication_snapshot(self): snapshot_id = None log_position = None master_ref = 'my_master' used_size = 1.0 total_size = 2.0 mock_replication = MagicMock() mock_replication.enable_as_master = MagicMock() mock_replication.snapshot_for_replication = MagicMock( return_value=(snapshot_id, log_position)) mock_replication.get_master_ref = MagicMock( return_value=master_ref) self.mock_repl.return_value = mock_replication self.mock_gfvs_class.return_value = ( {'used': used_size, 'total': total_size}) expected_replication_snapshot = { 'dataset': { 'datastore_manager': self.manager.manager, 'dataset_size': used_size, 'volume_size': total_size, 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, 'master': master_ref, 'log_position': log_position } snapshot_info = None replica_source_config = None replication_snapshot = ( self.manager.get_replication_snapshot(self.context, snapshot_info, replica_source_config)) self.assertEqual(expected_replication_snapshot, replication_snapshot) self.assertEqual(1, mock_replication.enable_as_master.call_count) self.assertEqual( 1, mock_replication.snapshot_for_replication.call_count) self.assertEqual(1, mock_replication.get_master_ref.call_count) def test_get_replica_context(self): master_ref = { 'host': '1.2.3.4', 'port': 3306 } expected_info = { 'master': master_ref, } mock_replication = MagicMock() mock_replication.get_replica_context = MagicMock( return_value=expected_info) self.mock_repl.return_value = mock_replication replica_info = self.manager.get_replica_context(self.context) self.assertEqual(1, mock_replication.get_replica_context.call_count) self.assertEqual(expected_info, replica_info) def test_get_last_txn(self): expected_host = '10.0.0.2' self.manager._get_master_host = MagicMock(return_value=expected_host) expected_txn_id = 199 repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} self.manager._get_repl_info = MagicMock(return_value=repl_info) (host, txn_id) = self.manager.get_last_txn(self.context) self.manager._get_master_host.assert_any_call() self.manager._get_repl_info.assert_any_call() self.assertEqual(expected_host, host) self.assertEqual(expected_txn_id, txn_id) def test_get_latest_txn_id(self): expected_txn_id = 199 repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} self.manager._get_repl_info = MagicMock(return_value=repl_info) latest_txn_id = self.manager.get_latest_txn_id(self.context) self.assertEqual(expected_txn_id, latest_txn_id) self.manager._get_repl_info.assert_any_call() def test_wait_for_txn(self): expected_txn_id = 199 repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} self.manager._get_repl_info = MagicMock(return_value=repl_info) self.manager.wait_for_txn(self.context, expected_txn_id) self.manager._get_repl_info.assert_any_call() @patch.object(configuration.ConfigurationManager, 'apply_system_override') @patch.object(redis_service.RedisApp, 'apply_overrides') @patch.object(utils, 'generate_random_password', return_value='password') def test_enable_root(self, *mock): root_user = {'_name': '-', '_password': 'password'} result = self.manager.enable_root(self.context) self.assertEqual(root_user, result) @patch.object(redis_service.RedisApp, 'disable_root') def test_disable_root(self, disable_root_mock): self.manager.disable_root(self.context) disable_root_mock.assert_any_call() @patch.object(redis_service.RedisApp, 'get_auth_password', return_value="password") def test_get_root_password(self, get_auth_password_mock): result = self.manager.get_root_password(self.context) self.assertTrue(get_auth_password_mock.called) self.assertEqual('password', result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_service.py0000644000175000017500000000214700000000000025735 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock from mock import patch from trove.guestagent import service from trove.tests.unittests import trove_testtools class ServiceTest(trove_testtools.TestCase): def setUp(self): super(ServiceTest, self).setUp() def tearDown(self): super(ServiceTest, self).tearDown() @patch.object(service.API, '_instance_router') def test_app_factory(self, instance_router_mock): service.app_factory(Mock) self.assertEqual(1, instance_router_mock.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/guestagent/test_volume.py0000644000175000017500000003141700000000000025606 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import ANY, call, DEFAULT, patch, mock_open from trove.common import cfg from trove.common import exception from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent import volume from trove.tests.unittests import trove_testtools CONF = cfg.CONF class VolumeDeviceTest(trove_testtools.TestCase): def setUp(self): super(VolumeDeviceTest, self).setUp() self.patch_conf_property('volume_fstype', 'ext3') self.patch_conf_property('format_options', '-m 5') self.volumeDevice = volume.VolumeDevice('/dev/vdb') self.exec_patcher = patch.object( utils, 'execute', return_value=('has_journal', '')) self.mock_exec = self.exec_patcher.start() self.addCleanup(self.exec_patcher.stop) self.ismount_patcher = patch.object(operating_system, 'is_mount') self.mock_ismount = self.ismount_patcher.start() self.addCleanup(self.ismount_patcher.stop) def tearDown(self): super(VolumeDeviceTest, self).tearDown() def test_migrate_data(self): with patch.multiple(self.volumeDevice, mount=DEFAULT, unmount=DEFAULT) as mocks: self.volumeDevice.migrate_data('/') self.assertEqual(1, mocks['mount'].call_count) self.assertEqual(1, mocks['unmount'].call_count) self.assertEqual(1, self.mock_exec.call_count) calls = [ call('rsync', '--safe-links', '--perms', '--recursive', '--owner', '--group', '--xattrs', '--sparse', '/', '/mnt/volume', root_helper='sudo', run_as_root=True), ] self.mock_exec.assert_has_calls(calls) def test__check_device_exists(self): self.volumeDevice._check_device_exists() self.assertEqual(1, self.mock_exec.call_count) calls = [ call('blockdev', '--getsize64', '/dev/vdb', attempts=3, root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) @patch('trove.guestagent.volume.LOG') def test_fail__check_device_exists(self, mock_logging): with patch.object(utils, 'execute', side_effect=exception.ProcessExecutionError): self.assertRaises(exception.GuestError, self.volumeDevice._check_device_exists) def test__check_format(self): self.volumeDevice._check_format() self.assertEqual(1, self.mock_exec.call_count) calls = [ call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) @patch('trove.guestagent.volume.LOG') def test__check_format_2(self, mock_logging): self.assertEqual(0, self.mock_exec.call_count) proc_err = exception.ProcessExecutionError() proc_err.stderr = 'Wrong magic number' self.mock_exec.side_effect = proc_err self.assertRaises(exception.GuestError, self.volumeDevice._check_format) def test__format(self): self.volumeDevice._format() self.assertEqual(1, self.mock_exec.call_count) calls = [ call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) def test_format(self): self.volumeDevice.format() self.assertEqual(3, self.mock_exec.call_count) calls = [ call('blockdev', '--getsize64', '/dev/vdb', attempts=3, root_helper='sudo', run_as_root=True), call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb', root_helper='sudo', run_as_root=True), call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) def test_mount(self): with patch.multiple(volume.VolumeMountPoint, mount=DEFAULT, write_to_fstab=DEFAULT) as mocks: self.volumeDevice.mount('/dev/vba') self.assertEqual(1, mocks['mount'].call_count, "Wrong number of calls to mount()") self.assertEqual(1, mocks['write_to_fstab'].call_count, "Wrong number of calls to write_to_fstab()") self.mock_exec.assert_not_called() def test_resize_fs(self): with patch.object(operating_system, 'is_mount', return_value=True): mount_point = '/mnt/volume' self.volumeDevice.resize_fs(mount_point) self.assertEqual(4, self.mock_exec.call_count) calls = [ call('blockdev', '--getsize64', '/dev/vdb', attempts=3, root_helper='sudo', run_as_root=True), call("umount", mount_point, run_as_root=True, root_helper='sudo'), call('e2fsck', '-f', '-p', '/dev/vdb', root_helper='sudo', run_as_root=True), call('resize2fs', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) @patch.object(utils, 'execute', side_effect=exception.ProcessExecutionError) @patch('trove.guestagent.volume.LOG') def test_fail_resize_fs(self, mock_logging, mock_execute): with patch.object(self.volumeDevice, '_check_device_exists'): self.assertRaises(exception.GuestError, self.volumeDevice.resize_fs, '/mnt/volume') self.assertEqual(1, self.volumeDevice._check_device_exists.call_count) self.assertEqual(2, self.mock_ismount.call_count) def test_unmount_positive(self): self._test_unmount() def test_unmount_negative(self): self._test_unmount(has_mount=False) def _test_unmount(self, has_mount=True): with patch.object(operating_system, 'is_mount', return_value=has_mount): self.volumeDevice.unmount('/mnt/volume') if has_mount: self.assertEqual(1, self.mock_exec.call_count) else: self.mock_exec.assert_not_called() def test_mount_points(self): self.mock_exec.return_value = ( ("/dev/vdb /var/lib/mysql xfs rw 0 0", "")) mount_point = self.volumeDevice.mount_points('/dev/vdb') self.assertEqual(['/var/lib/mysql'], mount_point) self.assertEqual(1, self.mock_exec.call_count) calls = [ call("grep '^/dev/vdb ' /etc/mtab", check_exit_code=[0, 1], shell=True) ] self.mock_exec.assert_has_calls(calls) def test_set_readahead_size(self): readahead_size = 2048 self.volumeDevice.set_readahead_size(readahead_size) self.assertEqual(2, self.mock_exec.call_count) calls = [ call('blockdev', '--getsize64', '/dev/vdb', attempts=3, root_helper='sudo', run_as_root=True), call('blockdev', '--setra', readahead_size, '/dev/vdb', root_helper='sudo', run_as_root=True), ] self.mock_exec.assert_has_calls(calls) @patch('trove.guestagent.volume.LOG') def test_fail_set_readahead_size(self, mock_logging): self.mock_exec.side_effect = exception.ProcessExecutionError readahead_size = 2048 self.assertRaises(exception.GuestError, self.volumeDevice.set_readahead_size, readahead_size) self.assertEqual(1, self.mock_exec.call_count) calls = [ call('blockdev', '--getsize64', '/dev/vdb', attempts=3, root_helper='sudo', run_as_root=True), ] self.mock_exec.assert_has_calls(calls) class VolumeDeviceTestXFS(trove_testtools.TestCase): def setUp(self): super(VolumeDeviceTestXFS, self).setUp() self.patch_conf_property('volume_fstype', 'xfs') self.patch_conf_property('format_options', '') self.volumeDevice = volume.VolumeDevice('/dev/vdb') self.exec_patcher = patch.object( utils, 'execute', return_value=('', '')) self.mock_exec = self.exec_patcher.start() self.addCleanup(self.exec_patcher.stop) self.ismount_patcher = patch.object(operating_system, 'is_mount') self.mock_ismount = self.ismount_patcher.start() self.addCleanup(self.ismount_patcher.stop) def tearDown(self): super(VolumeDeviceTestXFS, self).tearDown() self.volumeDevice = None def test__check_format(self): self.volumeDevice._check_format() self.assertEqual(1, self.mock_exec.call_count) calls = [ call('xfs_admin', '-l', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) @patch('trove.guestagent.volume.LOG') @patch.object(utils, 'execute', return_value=('not a valid XFS filesystem', '')) def test__check_format_2(self, mock_logging, mock_exec): self.assertRaises(exception.GuestError, self.volumeDevice._check_format) def test__format(self): self.volumeDevice._format() self.assertEqual(1, self.mock_exec.call_count) calls = [ call('mkfs.xfs', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) def test_resize_fs(self): with patch.object(operating_system, 'is_mount', return_value=True): mount_point = '/mnt/volume' self.volumeDevice.resize_fs(mount_point) self.assertEqual(6, self.mock_exec.call_count) calls = [ call('blockdev', '--getsize64', '/dev/vdb', attempts=3, root_helper='sudo', run_as_root=True), call("umount", mount_point, run_as_root=True, root_helper='sudo'), call('xfs_repair', '/dev/vdb', root_helper='sudo', run_as_root=True), call('mount', '/dev/vdb', root_helper='sudo', run_as_root=True), call('xfs_growfs', '/dev/vdb', root_helper='sudo', run_as_root=True), call('umount', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) class VolumeMountPointTest(trove_testtools.TestCase): def setUp(self): super(VolumeMountPointTest, self).setUp() self.patch_conf_property('volume_fstype', 'ext3') self.patch_conf_property('format_options', '-m 5') self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device', '/dev/vdb') self.exec_patcher = patch.object(utils, 'execute', return_value=('', '')) self.mock_exec = self.exec_patcher.start() self.addCleanup(self.exec_patcher.stop) def tearDown(self): super(VolumeMountPointTest, self).tearDown() def test_mount(self): with patch.object(operating_system, 'exists', return_value=False): self.volumeMountPoint.mount() self.assertEqual(2, self.mock_exec.call_count) calls = [ call('mkdir', '-p', '/dev/vdb', root_helper='sudo', run_as_root=True), call('mount', '-t', 'ext3', '-o', 'defaults,noatime', '/mnt/device', '/dev/vdb', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) def test_write_to_fstab(self): mock_file = mock_open() with patch('%s.open' % volume.__name__, mock_file, create=True): self.volumeMountPoint.write_to_fstab() self.assertEqual(1, self.mock_exec.call_count) calls = [ call('install', '-o', 'root', '-g', 'root', '-m', '644', ANY, '/etc/fstab', root_helper='sudo', run_as_root=True) ] self.mock_exec.assert_has_calls(calls) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/hacking/0000755000175000017500000000000000000000000022116 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/hacking/__init__.py0000644000175000017500000000000000000000000024215 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/hacking/test_check.py0000644000175000017500000001203600000000000024606 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import pycodestyle import textwrap from trove.hacking import checks as tc from trove.tests.unittests import trove_testtools class HackingTestCase(trove_testtools.TestCase): def assertLinePasses(self, func, *args): def check_callable(f, *args): return next(f(*args)) self.assertRaises(StopIteration, check_callable, func, *args) def assertLineFails(self, func, *args): self.assertIsInstance(next(func(*args)), tuple) def test_log_translations(self): all_log_levels = ( 'critical', 'debug', 'error', 'exception', 'info', 'reserved', 'warning', ) for level in all_log_levels: bad = 'LOG.%s(_("Bad"))' % level self.assertEqual( 1, len(list(tc.no_translate_logs(bad, bad, 'f')))) bad = "LOG.%s(_('Bad'))" % level self.assertEqual( 1, len(list(tc.no_translate_logs(bad, bad, 'f')))) ok = 'LOG.%s("OK")' % level self.assertEqual( 0, len(list(tc.no_translate_logs(ok, ok, 'f')))) ok = "LOG.%s(_('OK')) # noqa" % level self.assertEqual( 0, len(list(tc.no_translate_logs(ok, ok, 'f')))) ok = "LOG.%s(variable)" % level self.assertEqual( 0, len(list(tc.no_translate_logs(ok, ok, 'f')))) # Do not do validations in tests ok = 'LOG.%s(_("OK - unit tests"))' % level self.assertEqual( 0, len(list(tc.no_translate_logs(ok, ok, 'f/tests/f')))) def test_check_localized_exception_messages(self): f = tc.check_raised_localized_exceptions self.assertLineFails(f, " raise KeyError('Error text')", '') self.assertLineFails(f, ' raise KeyError("Error text")', '') self.assertLinePasses(f, ' raise KeyError(_("Error text"))', '') self.assertLinePasses(f, ' raise KeyError(_ERR("Error text"))', '') self.assertLinePasses(f, " raise KeyError(translated_msg)", '') self.assertLinePasses(f, '# raise KeyError("Not translated")', '') self.assertLinePasses(f, 'print("raise KeyError("Not ' 'translated")")', '') def test_check_localized_exception_message_skip_tests(self): f = tc.check_raised_localized_exceptions self.assertLinePasses(f, "raise KeyError('Error text')", 'neutron_lib/tests/unit/mytest.py') def test_no_basestring(self): self.assertEqual( 1, len(list(tc.check_no_basestring("isinstance(x, basestring)")))) self.assertEqual( 0, len(list(tc.check_no_basestring("this basestring is good)")))) # We are patching pycodestyle so that only the check under test is actually # installed. @mock.patch('pycodestyle._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pycodestyle.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pycodestyle.Checker(filename=filename, lines=lines) # NOTE(sdague): the standard reporter has printing to stdout # as a normal part of check_all, which bleeds through to the # test output stream in an unhelpful way. This blocks that printing. with mock.patch('pycodestyle.StandardReport.get_file_results'): checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_oslo_assert_raises_regexp(self): code = """ self.assertRaisesRegexp(ValueError, "invalid literal for.*XYZ'$", int, 'XYZ') """ self._assert_has_errors(code, tc.assert_raises_regexp, expected_errors=[(1, 0, "N335")]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/instance/0000755000175000017500000000000000000000000022316 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/instance/__init__.py0000644000175000017500000000000000000000000024415 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/instance/test_instance_controller.py0000644000175000017500000003667700000000000030021 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import Mock from testtools.matchers import Is, Equals from testtools.testcase import skip from trove.common import apischema from trove.instance.service import InstanceController from trove.tests.unittests import trove_testtools class TestInstanceController(trove_testtools.TestCase): def setUp(self): super(TestInstanceController, self).setUp() self.controller = InstanceController() self.locality = 'affinity' self.instance = { "instance": { "volume": {"size": "1"}, "users": [ {"name": "user1", "password": "litepass", "databases": [{"name": "firstdb"}]} ], "flavorRef": "https://localhost:8779/v1.0/2500/1", "name": "TEST-XYS2d2fe2kl;zx;jkl2l;sjdcma239(E)@(D", "databases": [ { "name": "firstdb", "collate": "latin2_general_ci", "character_set": "latin2" }, { "name": "db2" } ], "locality": self.locality } } self.context = trove_testtools.TroveTestContext(self) self.req = Mock(remote_addr='ip:port', host='myhost') def verify_errors(self, errors, msg=None, properties=None, path=None): msg = msg or [] properties = properties or [] self.assertThat(len(errors), Is(len(msg))) i = 0 while i < len(msg): self.assertIn(errors[i].message, msg) if path: self.assertThat(path, Equals(properties[i])) else: self.assertThat(errors[i].path.pop(), Equals(properties[i])) i += 1 def test_get_schema_create(self): schema = self.controller.get_schema('create', {'instance': {}}) self.assertIsNotNone(schema) self.assertIn('instance', schema['properties']) def test_get_schema_action_restart(self): schema = self.controller.get_schema('action', {'restart': {}}) self.assertIsNotNone(schema) self.assertIn('restart', schema['properties']) def test_get_schema_action_resize_volume(self): schema = self.controller.get_schema( 'action', {'resize': {'volume': {}}}) self.assertIsNotNone(schema) self.assertIn('resize', schema['properties']) self.assertIn( 'volume', schema['properties']['resize']['properties']) def test_get_schema_action_resize_flavorRef(self): schema = self.controller.get_schema( 'action', {'resize': {'flavorRef': {}}}) self.assertIsNotNone(schema) self.assertIn('resize', schema['properties']) self.assertIn( 'flavorRef', schema['properties']['resize']['properties']) def test_get_schema_action_other(self): schema = self.controller.get_schema( 'action', {'supersized': {'flavorRef': {}}}) self.assertIsNotNone(schema) self.assertThat(len(schema.keys()), Is(0)) def test_validate_create_complete(self): body = self.instance schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_complete_with_restore(self): body = self.instance body['instance']['restorePoint'] = { "backupRef": "d761edd8-0771-46ff-9743-688b9e297a3b" } schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_complete_with_restore_error(self): body = self.instance backup_id_ref = "invalid-backup-id-ref" body['instance']['restorePoint'] = { "backupRef": backup_id_ref } schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("'%s' does not match '%s'" % (backup_id_ref, apischema.uuid['pattern']))) def test_validate_create_blankname(self): body = self.instance body['instance']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_invalid_name(self): body = self.instance body['instance']['name'] = "$#$%^^" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("'$#$%^^' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) def test_validate_create_invalid_locality(self): body = self.instance body['instance']['locality'] = "$%^" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertEqual(1, len(errors)) self.assertIn("'$%^' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("locality", error_paths) def test_validate_restart(self): body = {"restart": {}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_invalid_action(self): # TODO(juice) perhaps we should validate the schema not recognized body = {"restarted": {}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_volume(self): body = {"resize": {"volume": {"size": 4}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_volume_string(self): body = {"resize": {"volume": {"size": "4"}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_volume_string_start_with_zero(self): body = {"resize": {"volume": {"size": "0040"}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_volume_string_invalid_number(self): body = {"resize": {"volume": {"size": '-44.0'}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].context[1].message, Equals("'-44.0' does not match '^0*[1-9]+[0-9]*$'")) self.assertThat(errors[0].path.pop(), Equals('size')) def test_validate_resize_volume_invalid_characters(self): body = {"resize": {"volume": {"size": 'x'}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].context[0].message, Equals("'x' is not of type 'integer'")) self.assertThat(errors[0].context[1].message, Equals("'x' does not match '^0*[1-9]+[0-9]*$'")) self.assertThat(errors[0].path.pop(), Equals('size')) def test_validate_resize_volume_zero_number(self): body = {"resize": {"volume": {"size": 0}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].context[0].message, Equals("0 is less than the minimum of 1")) self.assertThat(errors[0].path.pop(), Equals('size')) def test_validate_resize_volume_string_zero_number(self): body = {"resize": {"volume": {"size": '0'}}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(errors[0].context[1].message, Equals("'0' does not match '^0*[1-9]+[0-9]*$'")) self.assertThat(errors[0].path.pop(), Equals('size')) def test_validate_resize_instance(self): body = {"resize": {"flavorRef": "https://endpoint/v1.0/123/flavors/2"}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_instance_int(self): body = {"resize": {"flavorRef": 2}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_instance_string(self): body = {"resize": {"flavorRef": 'foo'}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_resize_instance_empty_url(self): body = {"resize": {"flavorRef": ""}} schema = self.controller.get_schema('action', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.verify_errors(errors[0].context, ["'' is too short", "'' does not match '^.*[0-9a-zA-Z]+.*$'", "'' is not of type 'integer'"], ["flavorRef", "flavorRef", "flavorRef", "flavorRef"], errors[0].path.pop()) @skip("This URI validator allows just about anything you give it") def test_validate_resize_instance_invalid_url(self): body = {"resize": {"flavorRef": "xyz-re1f2-daze329d-f23901"}} schema = self.controller.get_schema('action', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.verify_errors(errors, ["'' is too short"], ["flavorRef"]) def _setup_modify_instance_mocks(self): instance = Mock() instance.detach_replica = Mock() instance.attach_configuration = Mock() instance.detach_configuration = Mock() instance.update_db = Mock() return instance def test_modify_instance_with_empty_args(self): instance = self._setup_modify_instance_mocks() args = {} self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(0, instance.detach_replica.call_count) self.assertEqual(0, instance.detach_configuration.call_count) self.assertEqual(0, instance.attach_configuration.call_count) self.assertEqual(0, instance.update_db.call_count) def test_modify_instance_with_nonempty_args_calls_update_db(self): instance = self._setup_modify_instance_mocks() args = {} args['any'] = 'anything' self.controller._modify_instance(self.context, self.req, instance, **args) instance.update_db.assert_called_once_with(**args) def test_modify_instance_with_False_detach_replica_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['detach_replica'] = False self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(0, instance.detach_replica.call_count) def test_modify_instance_with_True_detach_replica_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['detach_replica'] = True self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.detach_replica.call_count) def test_modify_instance_with_configuration_id_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['configuration_id'] = 'some_id' self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.attach_configuration.call_count) def test_modify_instance_with_None_configuration_id_arg(self): instance = self._setup_modify_instance_mocks() args = {} args['configuration_id'] = None self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.detach_configuration.call_count) def test_modify_instance_with_all_args(self): instance = self._setup_modify_instance_mocks() args = {} args['detach_replica'] = True args['configuration_id'] = 'some_id' self.controller._modify_instance(self.context, self.req, instance, **args) self.assertEqual(1, instance.detach_replica.call_count) self.assertEqual(1, instance.attach_configuration.call_count) instance.update_db.assert_called_once_with(**args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/instance/test_instance_models.py0000644000175000017500000004770000000000000027106 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # Copyright 2014 Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from mock import Mock, patch from trove.backup import models as backup_models from trove.common import cfg from trove.common import exception from trove.common.instance import ServiceStatuses from trove.common import neutron from trove.datastore import models as datastore_models from trove.instance import models from trove.instance.models import DBInstance from trove.instance.models import DBInstanceFault from trove.instance.models import filter_ips from trove.instance.models import Instance from trove.instance.models import instance_encryption_key_cache from trove.instance.models import InstanceServiceStatus from trove.instance.models import SimpleInstance from trove.instance.tasks import InstanceTasks from trove.taskmanager import api as task_api from trove.tests.fakes import nova from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util CONF = cfg.CONF class SimpleInstanceTest(trove_testtools.TestCase): def setUp(self): super(SimpleInstanceTest, self).setUp() self.context = trove_testtools.TroveTestContext(self, is_admin=True) db_info = DBInstance( InstanceTasks.BUILDING, name="TestInstance") self.instance = SimpleInstance( None, db_info, InstanceServiceStatus( ServiceStatuses.BUILDING), ds_version=Mock(), ds=Mock(), locality='affinity') self.instance.context = self.context db_info.addresses = {"private": [{"addr": "123.123.123.123"}], "internal": [{"addr": "10.123.123.123"}], "public": [{"addr": "15.123.123.123"}]} self.orig_conf = CONF.network_label_regex self.orig_ip_regex = CONF.ip_regex self.orig_black_list_regex = CONF.black_list_regex def tearDown(self): super(SimpleInstanceTest, self).tearDown() CONF.network_label_regex = self.orig_conf CONF.ip_start = None CONF.management_networks = [] CONF.ip_regex = self.orig_ip_regex CONF.black_list_regex = self.orig_black_list_regex neutron.reset_management_networks() def test_get_root_on_create(self): root_on_create_val = Instance.get_root_on_create( 'redis') self.assertFalse(root_on_create_val) def test_filter_ips_white_list(self): CONF.network_label_regex = '.*' CONF.ip_regex = '^(15.|123.)' CONF.black_list_regex = '^10.123.123.*' ip = self.instance.get_visible_ip_addresses() ip = filter_ips( ip, CONF.ip_regex, CONF.black_list_regex) self.assertEqual(2, len(ip)) self.assertIn('123.123.123.123', ip) self.assertIn('15.123.123.123', ip) def test_filter_ips_black_list(self): CONF.network_label_regex = '.*' CONF.ip_regex = '.*' CONF.black_list_regex = '^10.123.123.*' ip = self.instance.get_visible_ip_addresses() ip = filter_ips( ip, CONF.ip_regex, CONF.black_list_regex) self.assertEqual(2, len(ip)) self.assertNotIn('10.123.123.123', ip) def test_one_network_label(self): CONF.network_label_regex = 'public' ip = self.instance.get_visible_ip_addresses() self.assertEqual(['15.123.123.123'], ip) def test_two_network_labels(self): CONF.network_label_regex = '^(private|public)$' ip = self.instance.get_visible_ip_addresses() self.assertEqual(2, len(ip)) self.assertIn('123.123.123.123', ip) self.assertIn('15.123.123.123', ip) def test_all_network_labels(self): CONF.network_label_regex = '.*' ip = self.instance.get_visible_ip_addresses() self.assertEqual(3, len(ip)) self.assertIn('10.123.123.123', ip) self.assertIn('123.123.123.123', ip) self.assertIn('15.123.123.123', ip) @patch('trove.common.clients.create_neutron_client') def test_filter_management_ip_addresses(self, mock_neutron_client): CONF.network_label_regex = '' CONF.management_networks = ['fake-net-id'] neutron_client = Mock() neutron_client.show_network.return_value = { 'network': {'name': 'public'} } mock_neutron_client.return_value = neutron_client ip = self.instance.get_visible_ip_addresses() neutron_client.show_network.assert_called_once_with('fake-net-id') self.assertEqual(2, len(ip)) self.assertIn('123.123.123.123', ip) self.assertIn('10.123.123.123', ip) def test_locality(self): self.assertEqual('affinity', self.instance.locality) def test_fault(self): fault_message = 'Error' fault_details = 'details' fault_date = 'now' temp_fault = Mock() temp_fault.message = fault_message temp_fault.details = fault_details temp_fault.updated = fault_date fault_mock = Mock(return_value=temp_fault) with patch.object(DBInstanceFault, 'find_by', fault_mock): fault = self.instance.fault self.assertEqual(fault_message, fault.message) self.assertEqual(fault_details, fault.details) self.assertEqual(fault_date, fault.updated) class CreateInstanceTest(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def setUp(self): util.init_db() self.context = trove_testtools.TroveTestContext(self, is_admin=True) self.name = "name" self.flavor_id = 5 self.image_id = "UUID" self.databases = [] self.users = [] self.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='mysql' + str(uuid.uuid4()), ) self.datastore_version = ( datastore_models.DBDatastoreVersion.create( id=str(uuid.uuid4()), datastore_id=self.datastore.id, name="5.5" + str(uuid.uuid4()), manager="mysql", image_id="image_id", packages="", active=True)) self.volume_size = 1 self.az = "az" self.nics = None self.configuration = None self.tenant_id = "UUID" self.datastore_version_id = str(uuid.uuid4()) self.db_info = DBInstance.create( name=self.name, flavor_id=self.flavor_id, tenant_id=self.tenant_id, volume_size=self.volume_size, datastore_version_id=self.datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=self.configuration ) self.backup_name = "name" self.descr = None self.backup_state = backup_models.BackupState.COMPLETED self.instance_id = self.db_info.id self.parent_id = None self.deleted = False self.backup = backup_models.DBBackup.create( name=self.backup_name, description=self.descr, tenant_id=self.tenant_id, state=self.backup_state, instance_id=self.instance_id, parent_id=self.parent_id, datastore_version_id=self.datastore_version.id, deleted=False ) self.backup_id = self.backup.id self.orig_client = models.create_nova_client models.create_nova_client = nova.fake_create_nova_client self.orig_api = task_api.API(self.context).create_instance task_api.API(self.context).create_instance = Mock() self.run_with_quotas = models.run_with_quotas models.run_with_quotas = Mock() self.check = backup_models.DBBackup.check_swift_object_exist backup_models.DBBackup.check_swift_object_exist = Mock( return_value=True) self.locality = 'affinity' self.swift_verify_patch = patch.object(models.Backup, 'verify_swift_auth_token') self.addCleanup(self.swift_verify_patch.stop) self.swift_verify_patch.start() super(CreateInstanceTest, self).setUp() @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def tearDown(self): self.db_info.delete() self.backup.delete() self.datastore.delete() self.datastore_version.delete() models.create_nova_client = self.orig_client task_api.API(self.context).create_instance = self.orig_api models.run_with_quotas = self.run_with_quotas backup_models.DBBackup.check_swift_object_exist = self.check self.backup.delete() self.db_info.delete() super(CreateInstanceTest, self).tearDown() def test_exception_on_invalid_backup_size(self): self.backup.size = 1.1 self.backup.save() self.assertEqual(self.backup.id, self.backup_id) exc = self.assertRaises( exception.BackupTooLarge, models.Instance.create, self.context, self.name, self.flavor_id, self.image_id, self.databases, self.users, self.datastore, self.datastore_version, self.volume_size, self.backup_id, self.az, self.nics, self.configuration ) self.assertIn("Backup is too large for " "given flavor or volume.", str(exc)) def test_can_restore_from_backup_with_almost_equal_size(self): # target size equals to "1Gb" self.backup.size = 0.99 self.backup.save() instance = models.Instance.create( self.context, self.name, self.flavor_id, self.image_id, self.databases, self.users, self.datastore, self.datastore_version, self.volume_size, self.backup_id, self.az, self.nics, self.configuration) self.assertIsNotNone(instance) def test_can_instantiate_with_locality(self): # make sure the backup will fit self.backup.size = 0.2 self.backup.save() instance = models.Instance.create( self.context, self.name, self.flavor_id, self.image_id, self.databases, self.users, self.datastore, self.datastore_version, self.volume_size, self.backup_id, self.az, self.nics, self.configuration, locality=self.locality) self.assertIsNotNone(instance) class TestInstanceUpgrade(trove_testtools.TestCase): def setUp(self): self.context = trove_testtools.TroveTestContext(self, is_admin=True) util.init_db() self.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='test' + str(uuid.uuid4()), default_version_id=str(uuid.uuid4())) self.datastore_version1 = datastore_models.DBDatastoreVersion.create( id=self.datastore.default_version_id, name='name' + str(uuid.uuid4()), image_id='old_image', packages=str(uuid.uuid4()), datastore_id=self.datastore.id, manager='test', active=1) self.datastore_version2 = datastore_models.DBDatastoreVersion.create( id=str(uuid.uuid4()), name='name' + str(uuid.uuid4()), image_id='new_image', packages=str(uuid.uuid4()), datastore_id=self.datastore.id, manager='test', active=1) self.safe_nova_client = models.create_nova_client models.create_nova_client = nova.fake_create_nova_client super(TestInstanceUpgrade, self).setUp() def tearDown(self): self.datastore.delete() self.datastore_version1.delete() self.datastore_version2.delete() models.create_nova_client = self.safe_nova_client super(TestInstanceUpgrade, self).tearDown() @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) @patch.object(task_api.API, 'upgrade') @patch('trove.tests.fakes.nova.LOG') def test_upgrade(self, mock_logging, task_upgrade): instance_model = DBInstance( InstanceTasks.NONE, id=str(uuid.uuid4()), name="TestUpgradeInstance", datastore_version_id=self.datastore_version1.id) instance_model.set_task_status(InstanceTasks.NONE) instance_model.save() instance_status = InstanceServiceStatus( ServiceStatuses.RUNNING, id=str(uuid.uuid4()), instance_id=instance_model.id) instance_status.save() self.assertIsNotNone(instance_model) instance = models.load_instance(models.Instance, self.context, instance_model.id) try: instance.upgrade(self.datastore_version2) self.assertEqual(self.datastore_version2.id, instance.db_info.datastore_version_id) self.assertEqual(InstanceTasks.UPGRADING, instance.db_info.task_status) self.assertTrue(task_upgrade.called) finally: instance_status.delete() instance_model.delete() class TestReplication(trove_testtools.TestCase): def setUp(self): util.init_db() self.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='name' + str(uuid.uuid4()), default_version_id=str(uuid.uuid4())) self.datastore_version = datastore_models.DBDatastoreVersion.create( id=self.datastore.default_version_id, name='name' + str(uuid.uuid4()), image_id=str(uuid.uuid4()), packages=str(uuid.uuid4()), datastore_id=self.datastore.id, manager='mysql', active=1) self.databases = [] self.users = [] self.master = DBInstance( InstanceTasks.NONE, id=str(uuid.uuid4()), name="TestMasterInstance", datastore_version_id=self.datastore_version.id, volume_size=2) self.master.set_task_status(InstanceTasks.NONE) self.master.save() self.master_status = InstanceServiceStatus( ServiceStatuses.RUNNING, id=str(uuid.uuid4()), instance_id=self.master.id) self.master_status.save() self.safe_nova_client = models.create_nova_client models.create_nova_client = nova.fake_create_nova_client self.swift_verify_patch = patch.object(models.Backup, 'verify_swift_auth_token') self.addCleanup(self.swift_verify_patch.stop) self.swift_verify_patch.start() super(TestReplication, self).setUp() def tearDown(self): self.master.delete() self.master_status.delete() self.datastore.delete() self.datastore_version.delete() models.create_nova_client = self.safe_nova_client super(TestReplication, self).tearDown() @patch('trove.instance.models.LOG') def test_replica_of_not_active_master(self, mock_logging): self.master.set_task_status(InstanceTasks.BUILDING) self.master.save() self.master_status.set_status(ServiceStatuses.BUILDING) self.master_status.save() self.assertRaises(exception.UnprocessableEntity, Instance.create, None, 'name', 1, "UUID", [], [], self.datastore, self.datastore_version, 2, None, slave_of_id=self.master.id) @patch('trove.instance.models.LOG') def test_replica_with_invalid_slave_of_id(self, mock_logging): self.assertRaises(exception.NotFound, Instance.create, None, 'name', 1, "UUID", [], [], self.datastore, self.datastore_version, 2, None, slave_of_id=str(uuid.uuid4())) def test_create_replica_from_replica(self): self.replica_datastore_version = Mock( spec=datastore_models.DBDatastoreVersion) self.replica_datastore_version.id = "UUID" self.replica_datastore_version.manager = 'mysql' self.replica_info = DBInstance( InstanceTasks.NONE, id="UUID", name="TestInstance", datastore_version_id=self.replica_datastore_version.id, slave_of_id=self.master.id) self.replica_info.save() self.assertRaises(exception.Forbidden, Instance.create, None, 'name', 2, "UUID", [], [], self.datastore, self.datastore_version, 2, None, slave_of_id=self.replica_info.id) def test_create_replica_with_users(self): self.users.append({"name": "testuser", "password": "123456"}) self.assertRaises(exception.ReplicaCreateWithUsersDatabasesError, Instance.create, None, 'name', 2, "UUID", [], self.users, self.datastore, self.datastore_version, 1, None, slave_of_id=self.master.id) def test_create_replica_with_databases(self): self.databases.append({"name": "testdb"}) self.assertRaises(exception.ReplicaCreateWithUsersDatabasesError, Instance.create, None, 'name', 1, "UUID", self.databases, [], self.datastore, self.datastore_version, 2, None, slave_of_id=self.master.id) def test_replica_volume_size_smaller_than_master(self): self.assertRaises(exception.Forbidden, Instance.create, None, 'name', 1, "UUID", [], [], self.datastore, self.datastore_version, 1, None, slave_of_id=self.master.id) def trivial_key_function(id): return id * id class TestInstanceKeyCaching(trove_testtools.TestCase): def setUp(self): super(TestInstanceKeyCaching, self).setUp() def tearDown(self): super(TestInstanceKeyCaching, self).tearDown() def test_basic_caching(self): keycache = instance_encryption_key_cache(trivial_key_function, 5) self.assertEqual(keycache[5], 25) self.assertEqual(keycache[5], 25) self.assertEqual(keycache[25], 625) def test_caching(self): keyfn = Mock(return_value=123) keycache = instance_encryption_key_cache(keyfn, 5) self.assertEqual(keycache[5], 123) self.assertEqual(keyfn.call_count, 1) self.assertEqual(keycache[5], 123) self.assertEqual(keyfn.call_count, 1) self.assertEqual(keycache[6], 123) self.assertEqual(keyfn.call_count, 2) self.assertEqual(keycache[7], 123) self.assertEqual(keyfn.call_count, 3) self.assertEqual(keycache[8], 123) self.assertEqual(keyfn.call_count, 4) self.assertEqual(keycache[9], 123) self.assertEqual(keyfn.call_count, 5) self.assertEqual(keycache[10], 123) self.assertEqual(keyfn.call_count, 6) self.assertEqual(keycache[10], 123) self.assertEqual(keyfn.call_count, 6) self.assertEqual(keycache[5], 123) self.assertEqual(keyfn.call_count, 7) # BUG(1650518): Cleanup in the Pike release def test_not_caching_none(self): keyfn = Mock(return_value=None) keycache = instance_encryption_key_cache(keyfn, 5) self.assertIsNone(keycache[30]) self.assertEqual(keyfn.call_count, 1) self.assertIsNone(keycache[30]) self.assertEqual(keyfn.call_count, 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/instance/test_instance_status.py0000644000175000017500000001417600000000000027147 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from trove.common.instance import ServiceStatuses from trove.datastore import models from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceStatus from trove.instance.models import SimpleInstance from trove.instance.tasks import InstanceTasks from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util import uuid class FakeInstanceTask(object): def __init__(self): self.is_error = False self.action = None class FakeDBInstance(object): def __init__(self): self.id = str(uuid.uuid4()) self.deleted = False self.datastore_version_id = str(uuid.uuid4()) self.server_status = "HEALTHY" self.task_status = FakeInstanceTask() class BaseInstanceStatusTestCase(trove_testtools.TestCase): def setUp(self): util.init_db() self.db_info = FakeDBInstance() self.status = InstanceServiceStatus( ServiceStatuses.RUNNING) self.datastore = models.DBDatastore.create( id=str(uuid.uuid4()), name='mysql' + str(uuid.uuid4()), default_version_id=self.db_info.datastore_version_id ) self.version = models.DBDatastoreVersion.create( id=self.db_info.datastore_version_id, datastore_id=self.datastore.id, name='5.7' + str(uuid.uuid4()), manager='mysql', image_id=str(uuid.uuid4()), active=1, packages="mysql-server-5.7" ) super(BaseInstanceStatusTestCase, self).setUp() def tearDown(self): self.datastore.delete() self.version.delete() super(BaseInstanceStatusTestCase, self).tearDown() class InstanceStatusTest(BaseInstanceStatusTestCase): def test_task_status_error_reports_error(self): self.db_info.task_status.is_error = True instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.ERROR, instance.status) def test_task_status_action_building_reports_build(self): self.db_info.task_status.action = "BUILDING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.BUILD, instance.status) def test_task_status_action_rebooting_reports_reboot(self): self.db_info.task_status.action = "REBOOTING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.REBOOT, instance.status) def test_task_status_action_resizing_reports_resize(self): self.db_info.task_status.action = "RESIZING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.RESIZE, instance.status) def test_task_status_action_deleting_reports_shutdown(self): self.db_info.task_status.action = "DELETING" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.SHUTDOWN, instance.status) def test_nova_server_build_reports_build(self): self.db_info.server_status = "BUILD" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.BUILD, instance.status) def test_nova_server_error_reports_error(self): self.db_info.server_status = "ERROR" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.ERROR, instance.status) def test_nova_server_reboot_reports_reboot(self): self.db_info.server_status = "REBOOT" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.REBOOT, instance.status) def test_nova_server_resize_reports_resize(self): self.db_info.server_status = "RESIZE" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.RESIZE, instance.status) def test_nova_server_verify_resize_reports_resize(self): self.db_info.server_status = "VERIFY_RESIZE" instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.RESIZE, instance.status) def test_service_status_paused_reports_reboot(self): self.status.set_status(ServiceStatuses.PAUSED) instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.REBOOT, instance.status) def test_service_status_new_reports_build(self): self.status.set_status(ServiceStatuses.NEW) instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.BUILD, instance.status) def test_service_status_running_reports_active(self): self.status.set_status(ServiceStatuses.RUNNING) instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.ACTIVE, instance.status) def test_service_status_reset_status(self): self.status.set_status(ServiceStatuses.UNKNOWN) instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.ERROR, instance.status) def test_service_status_force_deleteing(self): self.status.set_status(ServiceStatuses.UNKNOWN) self.db_info.task_status = InstanceTasks.DELETING instance = SimpleInstance('dummy context', self.db_info, self.status) self.assertEqual(InstanceStatus.SHUTDOWN, instance.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/instance/test_instance_views.py0000644000175000017500000001351100000000000026751 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock from trove.common import cfg from trove.instance.views import InstanceDetailView from trove.instance.views import InstanceView from trove.tests.unittests import trove_testtools CONF = cfg.CONF class InstanceViewsTest(trove_testtools.TestCase): def setUp(self): super(InstanceViewsTest, self).setUp() self.addresses = {"private": [{"addr": "123.123.123.123"}], "internal": [{"addr": "10.123.123.123"}], "public": [{"addr": "15.123.123.123"}]} self.orig_label_regex = CONF.network_label_regex self.orig_ip_regex = CONF.ip_regex def tearDown(self): super(InstanceViewsTest, self).tearDown() CONF.network_label_regex = self.orig_label_regex CONF.ip_regex = self.orig_ip_regex class InstanceDetailViewTest(trove_testtools.TestCase): def setUp(self): super(InstanceDetailViewTest, self).setUp() self.build_links_method = InstanceView._build_links self.build_flavor_links_method = InstanceView._build_flavor_links self.build_config_method = InstanceDetailView._build_configuration_info InstanceView._build_links = Mock() InstanceView._build_flavor_links = Mock() InstanceDetailView._build_configuration_info = Mock() self.instance = Mock() self.instance.created = 'Yesterday' self.instance.updated = 'Now' self.instance.datastore_version = Mock() self.instance.datastore_version.name = 'mysql_test_version' self.instance.datastore_version.manager = 'mysql' self.instance.hostname = 'test.trove.com' self.ip = "1.2.3.4" self.instance.addresses = {"private": [{"addr": self.ip}]} self.instance.volume_used = '3' self.instance.root_password = 'iloveyou' self.instance.get_visible_ip_addresses = lambda: ["1.2.3.4"] self.instance.slave_of_id = None self.instance.slaves = [] self.instance.locality = 'affinity' self.instance.server_id = 'server_abc' self.instance.volume_id = 'volume_abc' self.fault_message = 'Error' self.fault_details = 'details' self.fault_date = 'now' self.instance.fault = Mock() self.instance.fault.message = self.fault_message self.instance.fault.details = self.fault_details self.instance.fault.updated = self.fault_date self.context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=self.context) def tearDown(self): super(InstanceDetailViewTest, self).tearDown() InstanceView._build_links = self.build_links_method InstanceView._build_flavor_links = self.build_flavor_links_method InstanceDetailView._build_configuration_info = self.build_config_method def test_data_hostname(self): view = InstanceDetailView(self.instance, self.req) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertEqual(self.instance.hostname, result['instance']['hostname']) self.assertNotIn('ip', result['instance']) def test_data_ip(self): self.instance.hostname = None view = InstanceDetailView(self.instance, self.req) result = view.data() self.assertEqual(self.instance.created, result['instance']['created']) self.assertEqual(self.instance.updated, result['instance']['updated']) self.assertEqual(self.instance.datastore_version.name, result['instance']['datastore']['version']) self.assertNotIn('hostname', result['instance']) self.assertEqual([self.ip], result['instance']['ip']) def test_locality(self): self.instance.hostname = None view = InstanceDetailView(self.instance, self.req) result = view.data() self.assertEqual(self.instance.locality, result['instance']['locality']) def test_fault(self): view = InstanceDetailView(self.instance, self.req) result = view.data() self.assertEqual(self.fault_message, result['instance']['fault']['message']) self.assertEqual(self.fault_details, result['instance']['fault']['details']) self.assertEqual(self.fault_date, result['instance']['fault']['created']) def test_admin_view(self): self.context.is_admin = True view = InstanceDetailView(self.instance, self.req) result = view.data() self.assertIn('server_id', result['instance']) self.assertIn('volume_id', result['instance']) def test_non_admin_view(self): self.context.is_admin = False view = InstanceDetailView(self.instance, self.req) result = view.data() self.assertNotIn('server_id', result['instance']) self.assertNotIn('volume_id', result['instance']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/mgmt/0000755000175000017500000000000000000000000021456 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mgmt/__init__.py0000644000175000017500000000000000000000000023555 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mgmt/test_clusters.py0000644000175000017500000000720600000000000024740 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import Mock, patch from trove.common import exception from trove.extensions.mgmt.clusters.models import MgmtCluster from trove.extensions.mgmt.clusters.service import MgmtClusterController from trove.tests.unittests import trove_testtools class TestClusterController(trove_testtools.TestCase): def setUp(self): super(TestClusterController, self).setUp() self.context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=self.context) mock_cluster1 = Mock() mock_cluster1.datastore_version.manager = 'vertica' mock_cluster1.instances = [] mock_cluster1.instances_without_server = [] mock_cluster2 = Mock() mock_cluster2.datastore_version.manager = 'vertica' mock_cluster2.instances = [] mock_cluster2.instances_without_server = [] self.mock_clusters = [mock_cluster1, mock_cluster2] self.controller = MgmtClusterController() def tearDown(self): super(TestClusterController, self).tearDown() def test_get_action_schema(self): body = {'do_stuff': {}} action_schema = Mock() action_schema.get = Mock() self.controller.get_action_schema(body, action_schema) action_schema.get.assert_called_with('do_stuff', {}) @patch.object(MgmtCluster, 'load') def test_show_cluster(self, mock_cluster_load): tenant_id = Mock() id = Mock() mock_cluster_load.return_value = self.mock_clusters[0] self.controller.show(self.req, tenant_id, id) mock_cluster_load.assert_called_with(self.context, id) @patch.object(MgmtCluster, 'load_all') def test_index_cluster(self, mock_cluster_load_all): tenant_id = Mock() mock_cluster_load_all.return_value = self.mock_clusters self.controller.index(self.req, tenant_id) mock_cluster_load_all.assert_called_with(self.context, deleted=None) @patch.object(MgmtCluster, 'load') def test_controller_action_found(self, mock_cluster_load): body = {'reset-task': {}} tenant_id = Mock() id = Mock() mock_cluster_load.return_value = self.mock_clusters[0] result = self.controller.action(self.req, body, tenant_id, id) self.assertEqual(202, result.status) self.assertIsNotNone(result.data) def test_controller_no_body_action_found(self): tenant_id = Mock() id = Mock() self.assertRaisesRegex( exception.BadRequest, 'Invalid request body.', self.controller.action, self.req, None, tenant_id, id) @patch.object(MgmtCluster, 'load') def test_controller_invalid_action_found(self, mock_cluster_load): body = {'do_stuff': {}} tenant_id = Mock() id = Mock() mock_cluster_load.return_value = self.mock_clusters[0] self.assertRaisesRegex( exception.BadRequest, 'Invalid cluster action requested.', self.controller.action, self.req, body, tenant_id, id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mgmt/test_datastore_controller.py0000644000175000017500000001547100000000000027330 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from mock import Mock, patch, MagicMock, PropertyMock from testtools.matchers import Is, Equals from trove.common import clients from trove.common import exception from trove.datastore import models as datastore_models from trove.extensions.mgmt.datastores.service import DatastoreVersionController from trove.tests.unittests import trove_testtools class TestDatastoreVersionController(trove_testtools.TestCase): def setUp(self): super(TestDatastoreVersionController, self).setUp() self.controller = DatastoreVersionController() self.version = { "version": { "datastore_name": "test_dsx", "name": "test_vr1", "datastore_manager": "mysql", "image": "154b350d-4d86-4214-9067-9c54b230c0da", "packages": ["mysql-server-5.7"], "active": True, "default": False } } self.tenant_id = Mock() context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=context) def test_get_schema_create(self): schema = self.controller.get_schema('create', self.version) self.assertIsNotNone(schema) self.assertIn('version', schema['properties']) def test_validate_create(self): body = self.version schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blankname(self): body = self.version body['version']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_blank_datastore(self): body = self.version body['version']['datastore_name'] = "" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) @patch.object(clients, 'create_glance_client') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load', side_effect=exception.DatastoreVersionNotFound) @patch.object(datastore_models, 'update_datastore_version') def test_create_datastore_versions(self, mock_ds_version_create, mock_ds_version_load, mock_ds_load, mock_glance_client): body = self.version mock_ds_load.return_value.name = 'test_dsx' self.controller.create(self.req, body, self.tenant_id) mock_ds_version_create.assert_called_with( 'test_dsx', 'test_vr1', 'mysql', '154b350d-4d86-4214-9067-9c54b230c0da', 'mysql-server-5.7', True) @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_show_ds_version(self, mock_ds_version_load): id = Mock() self.controller.show(self.req, self.tenant_id, id) mock_ds_version_load.assert_called_with(id) @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_delete_ds_version(self, mock_ds_version_load, mock_ds_load): ds_version_id = Mock() ds_version = Mock() mock_ds_version_load.return_value = ds_version self.controller.delete(self.req, self.tenant_id, ds_version_id) ds_version.delete.assert_called_with() @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(datastore_models.DatastoreVersions, 'load_all') def test_index_ds_version(self, mock_ds_version_load_all, mock_ds_version_load_by_uuid): mock_id = Mock() mock_ds_version = Mock() mock_ds_version.id = mock_id mock_ds_version_load_all.return_value = [mock_ds_version] self.controller.index(self.req, self.tenant_id) mock_ds_version_load_all.assert_called_with(only_active=False) mock_ds_version_load_by_uuid.assert_called_with(mock_id) @patch.object(clients, 'create_glance_client') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(datastore_models, 'update_datastore_version') def test_edit_datastore_versions(self, mock_ds_version_update, mock_ds_version_load, mock_glance_client): body = {'image': '21c8805a-a800-4bca-a192-3a5a2519044d'} mock_ds_version = MagicMock() type(mock_ds_version).datastore_name = PropertyMock( return_value=self.version['version']['datastore_name']) type(mock_ds_version).name = PropertyMock( return_value=self.version['version']['name']) type(mock_ds_version).image_id = PropertyMock( return_value=self.version['version']['image']) type(mock_ds_version).packages = PropertyMock( return_value=self.version['version']['packages']) type(mock_ds_version).active = PropertyMock( return_value=self.version['version']['active']) type(mock_ds_version).manager = PropertyMock( return_value=self.version['version']['datastore_manager']) mock_ds_version_load.return_value = mock_ds_version self.controller.edit(self.req, body, self.tenant_id, Mock()) mock_ds_version_update.assert_called_with( 'test_dsx', 'test_vr1', 'mysql', '21c8805a-a800-4bca-a192-3a5a2519044d', 'mysql-server-5.7', True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mgmt/test_datastores.py0000644000175000017500000001574000000000000025247 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mock import Mock, patch from glanceclient import exc as glance_exceptions from trove.common import clients from trove.common import exception from trove.datastore import models from trove.extensions.mgmt.datastores.service import DatastoreVersionController from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class TestDatastoreVersion(trove_testtools.TestCase): def setUp(self): super(TestDatastoreVersion, self).setUp() util.init_db() models.update_datastore(name='test_ds', default_version=None) models.update_datastore_version( 'test_ds', 'test_vr1', 'mysql', '154b350d-4d86-4214-9067-9c54b230c0da', 'pkg-1', 1) models.update_datastore_version( 'test_ds', 'test_vr2', 'mysql', '154b350d-4d86-4214-9067-9c54b230c0da', 'pkg-1', 1) self.ds = models.Datastore.load('test_ds') self.ds_version2 = models.DatastoreVersion.load(self.ds, 'test_vr2') self.context = trove_testtools.TroveTestContext(self) self.req = Mock() self.req.environ = Mock() self.req.environ.__getitem__ = Mock(return_value=self.context) self.tenant_id = Mock() self.version_controller = DatastoreVersionController() def tearDown(self): super(TestDatastoreVersion, self).tearDown() @patch.object(clients, 'create_glance_client') def test_version_create(self, mock_glance_client): body = {"version": { "datastore_name": "test_ds", "name": "test_version", "datastore_manager": "mysql", "image": "image-id", "packages": "test-pkg", "active": True, "default": True}} output = self.version_controller.create( self.req, body, self.tenant_id) self.assertEqual(202, output.status) @patch.object(clients, 'create_glance_client') @patch.object(models.DatastoreVersion, 'load') def test_fail_already_exists_version_create(self, mock_load, mock_glance_client): body = {"version": { "datastore_name": "test_ds", "name": "test_new_vr", "datastore_manager": "mysql", "image": "image-id", "packages": "test-pkg", "active": True, "default": True}} self.assertRaisesRegex( exception.DatastoreVersionAlreadyExists, "A datastore version with the name 'test_new_vr' already exists", self.version_controller.create, self.req, body, self.tenant_id) @patch.object(clients, 'create_glance_client') def test_fail_image_not_found_version_create(self, mock_glance_client): mock_glance_client.return_value.images.get = Mock( side_effect=glance_exceptions.HTTPNotFound()) body = {"version": { "datastore_name": "test_ds", "name": "test_vr", "datastore_manager": "mysql", "image": "image-id", "packages": "test-pkg", "active": True, "default": True}} self.assertRaisesRegex( exception.ImageNotFound, "Image image-id cannot be found.", self.version_controller.create, self.req, body, self.tenant_id) def test_version_delete(self): ds_version1 = models.DatastoreVersion.load(self.ds, 'test_vr1') output = self.version_controller.delete(self.req, self.tenant_id, ds_version1.id) err_msg = ("Datastore version '%s' cannot be found." % ds_version1.id) self.assertEqual(202, output.status) # Try to find deleted version, this should raise exception. self.assertRaisesRegex( exception.DatastoreVersionNotFound, err_msg, models.DatastoreVersion.load_by_uuid, ds_version1.id) @patch.object(clients, 'create_glance_client') def test_version_update(self, mock_client): body = {"image": "c022f4dc-76ed-4e3f-a25e-33e031f43f8b"} output = self.version_controller.edit(self.req, body, self.tenant_id, self.ds_version2.id) self.assertEqual(202, output.status) # Find the details of version updated and match the updated attribute. test_ds_version = models.DatastoreVersion.load_by_uuid( self.ds_version2.id) self.assertEqual(body['image'], test_ds_version.image_id) @patch.object(clients, 'create_glance_client') def test_version_update_fail_image_not_found(self, mock_glance_client): mock_glance_client.return_value.images.get = Mock( side_effect=glance_exceptions.HTTPNotFound()) body = {"image": "non-existent-image-id"} self.assertRaisesRegex( exception.ImageNotFound, "Image non-existent-image-id cannot be found.", self.version_controller.edit, self.req, body, self.tenant_id, self.ds_version2.id) @patch.object(models.DatastoreVersion, 'load_by_uuid') def test_version_index(self, mock_load): output = self.version_controller.index( self.req, self.tenant_id) self.assertEqual(200, output.status) def test_version_show(self): output = self.version_controller.show( self.req, self.tenant_id, self.ds_version2.id) self.assertEqual(200, output.status) self.assertEqual(self.ds_version2.id, output._data['version']['id']) self.assertEqual(self.ds_version2.name, output._data['version']['name']) self.assertEqual(self.ds_version2.datastore_id, output._data['version']['datastore_id']) self.assertEqual(self.ds_version2.datastore_name, output._data['version']['datastore_name']) self.assertEqual(self.ds_version2.manager, output._data['version']['datastore_manager']) self.assertEqual(self.ds_version2.image_id, output._data['version']['image']) self.assertEqual(self.ds_version2.packages.split(','), output._data['version']['packages']) self.assertEqual(self.ds_version2.active, output._data['version']['active']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mgmt/test_models.py0000644000175000017500000005004500000000000024356 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import uuid from mock import MagicMock, patch, ANY from novaclient.client import Client from novaclient.v2.flavors import FlavorManager, Flavor from novaclient.v2.servers import Server, ServerManager from oslo_config import cfg from testtools.matchers import Equals, Is, Not from trove.backup.models import Backup from trove.common import clients from trove.common import exception from trove.common import instance as rd_instance from trove.datastore import models as datastore_models import trove.extensions.mgmt.instances.models as mgmtmodels from trove.guestagent.api import API from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.tasks import InstanceTasks from trove import rpc from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util CONF = cfg.CONF class MockMgmtInstanceTest(trove_testtools.TestCase): @classmethod def setUpClass(cls): util.init_db() cls.version_id = str(uuid.uuid4()) cls.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='mysql' + str(uuid.uuid4()), default_version_id=cls.version_id ) cls.version = datastore_models.DBDatastoreVersion.create( id=cls.version_id, datastore_id=cls.datastore.id, name='5.5' + str(uuid.uuid4()), manager='mysql', image_id=str(uuid.uuid4()), active=1, packages="mysql-server-5.5" ) super(MockMgmtInstanceTest, cls).setUpClass() @classmethod def tearDownClass(cls): cls.version.delete() cls.datastore.delete() super(MockMgmtInstanceTest, cls).tearDownClass() def setUp(self): self.context = trove_testtools.TroveTestContext(self) self.context.auth_token = 'some_secret_password' self.client = MagicMock(spec=Client) self.server_mgr = MagicMock(spec=ServerManager) self.client.servers = self.server_mgr self.flavor_mgr = MagicMock(spec=FlavorManager) self.client.flavors = self.flavor_mgr self.admin_client_patch = patch.object( clients, 'create_admin_nova_client', return_value=self.client) self.addCleanup(self.admin_client_patch.stop) self.admin_client_patch.start() CONF.set_override('host', '127.0.0.1') CONF.set_override('exists_notification_interval', 1) CONF.set_override('notification_service_id', {'mysql': '123'}) super(MockMgmtInstanceTest, self).setUp() def do_cleanup(self, instance, status): instance.delete() status.delete() def build_db_instance(self, status, task_status=InstanceTasks.NONE): instance = DBInstance(InstanceTasks.NONE, name='test_name', id=str(uuid.uuid4()), flavor_id='flavor_1', datastore_version_id=self.version.id, compute_instance_id='compute_id_1', server_id='server_id_1', tenant_id='tenant_id_1', server_status=rd_instance.ServiceStatuses. BUILDING.api_status, deleted=False) instance.save() service_status = InstanceServiceStatus( rd_instance.ServiceStatuses.RUNNING, id=str(uuid.uuid4()), instance_id=instance.id, ) service_status.save() instance.set_task_status(task_status) instance.server_status = status instance.save() return instance, service_status class TestNotificationTransformer(MockMgmtInstanceTest): @classmethod def setUpClass(cls): super(TestNotificationTransformer, cls).setUpClass() @patch('trove.instance.models.LOG') def test_transformer(self, mock_logging): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) payloads = mgmtmodels.NotificationTransformer( context=self.context)() self.assertIsNotNone(payloads) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertIn(status.lower(), [db['state'] for db in payloads]) self.addCleanup(self.do_cleanup, instance, service_status) def test_get_service_id(self): id_map = { 'mysql': '123', 'percona': 'abc' } transformer = mgmtmodels.NotificationTransformer(context=self.context) self.assertThat(transformer._get_service_id('mysql', id_map), Equals('123')) @patch('trove.extensions.mgmt.instances.models.LOG') def test_get_service_id_unknown(self, mock_logging): id_map = { 'mysql': '123', 'percona': 'abc' } transformer = mgmtmodels.NotificationTransformer(context=self.context) self.assertThat(transformer._get_service_id('m0ng0', id_map), Equals('unknown-service-id-error')) class TestNovaNotificationTransformer(MockMgmtInstanceTest): @classmethod def setUpClass(cls): super(TestNovaNotificationTransformer, cls).setUpClass() def test_transformer_cache(self): flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' with patch.object(self.flavor_mgr, 'get', return_value=flavor): transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) transformer2 = mgmtmodels.NovaNotificationTransformer( context=self.context) self.assertThat(transformer._flavor_cache, Not(Is(transformer2._flavor_cache))) def test_lookup_flavor(self): flavor = MagicMock(spec=Flavor) flavor.name = 'flav_1' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(self.flavor_mgr, 'get', side_effect=[flavor, None]): self.assertThat(transformer._lookup_flavor('1'), Equals(flavor.name)) self.assertThat(transformer._lookup_flavor('2'), Equals('unknown')) def test_transformer(self): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' server = MagicMock(spec=Server) server.user_id = 'test_user_id' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() self.assertIsNotNone(payloads) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertThat(payload['state'], Not(Is(None))) self.assertThat(payload['instance_type'], Equals('db.small')) self.assertThat(payload['instance_type_id'], Equals('flavor_1')) self.assertThat(payload['user_id'], Equals('test_user_id')) self.assertThat(payload['service_id'], Equals('123')) self.addCleanup(self.do_cleanup, instance, service_status) @patch('trove.extensions.mgmt.instances.models.LOG') def test_transformer_invalid_datastore_manager(self, mock_logging): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) version = datastore_models.DBDatastoreVersion.get_by( id=instance.datastore_version_id) version.update(manager='something invalid') server = MagicMock(spec=Server) server.user_id = 'test_user_id' flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() # assertions self.assertIsNotNone(payloads) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertIn(status.lower(), [db['state'] for db in payloads]) self.assertThat(payload['instance_type'], Equals('db.small')) self.assertThat(payload['instance_type_id'], Equals('flavor_1')) self.assertThat(payload['user_id'], Equals('test_user_id')) self.assertThat(payload['service_id'], Equals('unknown-service-id-error')) version.update(manager='mysql') self.addCleanup(self.do_cleanup, instance, service_status) def test_transformer_shutdown_instance(self): status = rd_instance.ServiceStatuses.SHUTDOWN.api_status instance, service_status = self.build_db_instance(status) service_status.set_status(rd_instance.ServiceStatuses.SHUTDOWN) server = MagicMock(spec=Server) server.user_id = 'test_user_id' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(Backup, 'running', return_value=None): self.assertThat(mgmt_instance.status, Equals('SHUTDOWN')) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() # assertion that SHUTDOWN instances are not reported self.assertIsNotNone(payloads) self.assertNotIn(status.lower(), [db['status'] for db in payloads]) self.addCleanup(self.do_cleanup, instance, service_status) def test_transformer_no_nova_instance(self): status = rd_instance.ServiceStatuses.SHUTDOWN.api_status instance, service_status = self.build_db_instance(status) service_status.set_status(rd_instance.ServiceStatuses.SHUTDOWN) mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, None, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(Backup, 'running', return_value=None): self.assertThat(mgmt_instance.status, Equals('SHUTDOWN')) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): payloads = transformer() # assertion that SHUTDOWN instances are not reported self.assertIsNotNone(payloads) self.assertNotIn(status.lower(), [db['status'] for db in payloads]) self.addCleanup(self.do_cleanup, instance, service_status) def test_transformer_flavor_cache(self): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, InstanceTasks.BUILDING) server = MagicMock(spec=Server) server.user_id = 'test_user_id' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' transformer = mgmtmodels.NovaNotificationTransformer( context=self.context) with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): transformer() payloads = transformer() self.assertIsNotNone(payloads) self.assertThat(len(payloads), Equals(1)) payload = payloads[0] self.assertThat(payload['audit_period_beginning'], Not(Is(None))) self.assertThat(payload['audit_period_ending'], Not(Is(None))) self.assertIn(status.lower(), [db['state'] for db in payloads]) self.assertThat(payload['instance_type'], Equals('db.small')) self.assertThat(payload['instance_type_id'], Equals('flavor_1')) self.assertThat(payload['user_id'], Equals('test_user_id')) # ensure cache was used to get flavor second time self.flavor_mgr.get.assert_any_call('flavor_1') self.addCleanup(self.do_cleanup, instance, service_status) class TestMgmtInstanceTasks(MockMgmtInstanceTest): @classmethod def setUpClass(cls): super(TestMgmtInstanceTasks, cls).setUpClass() def test_public_exists_events(self): status = rd_instance.ServiceStatuses.BUILDING.api_status instance, service_status = self.build_db_instance( status, task_status=InstanceTasks.BUILDING) server = MagicMock(spec=Server) server.user_id = 'test_user_id' mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context, instance, server, service_status) flavor = MagicMock(spec=Flavor) flavor.name = 'db.small' notifier = MagicMock() with patch.object(rpc, 'get_notifier', return_value=notifier): with patch.object(mgmtmodels, 'load_mgmt_instances', return_value=[mgmt_instance]): with patch.object(self.flavor_mgr, 'get', return_value=flavor): self.assertThat(self.context.auth_token, Is('some_secret_password')) with patch.object(notifier, 'info', return_value=None): # invocation mgmtmodels.publish_exist_events( mgmtmodels.NovaNotificationTransformer( context=self.context), self.context) # assertion notifier.info.assert_any_call( self.context, 'trove.instance.exists', ANY) self.assertThat(self.context.auth_token, Is(None)) self.addCleanup(self.do_cleanup, instance, service_status) class TestMgmtInstanceDeleted(MockMgmtInstanceTest): def test_show_deleted_mgmt_instances(self): args = {'deleted': 0, 'cluster_id': None} db_infos_active = DBInstance.find_all(**args) args = {'deleted': 1, 'cluster_id': None} db_infos_deleted = DBInstance.find_all(**args) args = {'cluster_id': None} # db_infos_all = DBInstance.find_all(**args) # TODO(SlickNik) Fix this assert to work reliably in the gate. # This fails intermittenly when the unit tests run in parallel. # self.assertTrue(db_infos_all.count() == # db_infos_active.count() + # db_infos_deleted.count()) with patch.object(self.context, 'is_admin', return_value=True): deleted_instance = db_infos_deleted.all()[0] if len( db_infos_deleted.all()) > 0 else None active_instance = db_infos_active.all()[0] if len( db_infos_active.all()) > 0 else None if active_instance: instance = DBInstance.find_by(context=self.context, id=active_instance.id) self.assertEqual(active_instance.id, instance.id) if deleted_instance: self.assertRaises( exception.ModelNotFoundError, DBInstance.find_by, context=self.context, id=deleted_instance.id, deleted=False) instance = DBInstance.find_by(context=self.context, id=deleted_instance.id, deleted=True) self.assertEqual(deleted_instance.id, instance.id) class TestMgmtInstancePing(MockMgmtInstanceTest): def test_rpc_ping(self): status = rd_instance.ServiceStatuses.RUNNING.api_status instance, service_status = self.build_db_instance( status, task_status=InstanceTasks.NONE) mgmt_instance = mgmtmodels.MgmtInstance(instance, instance, None, service_status) with patch.object(API, 'rpc_ping', return_value=True): with patch.object(API, 'get_client'): self.assertTrue(mgmt_instance.rpc_ping()) self.addCleanup(self.do_cleanup, instance, service_status) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/module/0000755000175000017500000000000000000000000021777 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/module/__init__.py0000644000175000017500000000000000000000000024076 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/module/test_module_controller.py0000644000175000017500000000621400000000000027143 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from testtools.matchers import Is, Equals from trove.module.service import ModuleController from trove.tests.unittests import trove_testtools class TestModuleController(trove_testtools.TestCase): def setUp(self): super(TestModuleController, self).setUp() self.controller = ModuleController() self.module = { "module": { "name": 'test_module', "module_type": 'test', "contents": 'my_contents\n', "priority_apply": 0, "apply_order": 5 } } def test_get_schema_create(self): schema = self.controller.get_schema('create', {'module': {}}) self.assertIsNotNone(schema) self.assertIn('module', schema['properties']) def test_validate_create_complete(self): body = self.module schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_blank_name(self): body = self.module body['module']['name'] = " " schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(1)) self.assertThat(errors[0].message, Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'")) def test_validate_create_invalid_name(self): body = self.module body['module']['name'] = "$#$%^^" schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("'$#$%^^' does not match '^.*[0-9a-zA-Z]+.*$'", errors[0].message) def test_validate_create_invalid_apply_order(self): body = self.module body['module']['apply_order'] = 12 schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertEqual(1, len(errors)) self.assertIn("12 is greater than the maximum of 9", errors[0].message) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/module/test_module_models.py0000644000175000017500000001437500000000000026252 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from mock import Mock, patch from trove.common import crypto_utils from trove.common import exception from trove.datastore import models as datastore_models from trove.module import models from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class CreateModuleTest(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def setUp(self): util.init_db() self.context = Mock() self.name = "name" self.module_type = 'ping' self.contents = 'my_contents\n' super(CreateModuleTest, self).setUp() @patch.object(task_api.API, 'get_client', Mock(return_value=Mock())) def tearDown(self): super(CreateModuleTest, self).tearDown() def test_can_create_update_module(self): module = models.Module.create( self.context, self.name, self.module_type, self.contents, 'my desc', 'my_tenant', None, None, False, True, False, False, 5, True) self.assertIsNotNone(module) new_module = copy.copy(module) models.Module.update(self.context, new_module, module, False) module.delete() def test_validate_action(self): # tenant_id, auto_apply, visible, priority_apply, full_access, # valid, exception, works_for_admin data = [ ['tenant', False, True, False, None, True], ['tenant', True, True, False, None, False, exception.ModuleAccessForbidden], ['tenant', False, False, False, None, False, exception.ModuleAccessForbidden], ['tenant', False, True, True, None, False, exception.ModuleAccessForbidden], ['tenant', False, True, False, True, False, exception.ModuleAccessForbidden, False], ['tenant', False, True, False, False, False, exception.ModuleAccessForbidden], ['tenant', True, False, True, False, False, exception.ModuleAccessForbidden], ['tenant', True, False, True, True, False, exception.InvalidModelError, False], ] for datum in data: tenant = datum[0] auto_apply = datum[1] visible = datum[2] priority_apply = datum[3] full_access = datum[4] valid = datum[5] expected_exception = None if not valid: expected_exception = datum[6] context = Mock() context.is_admin = False works_for_admin = True if len(datum) > 7: works_for_admin = datum[7] if valid: models.Module.validate_action( context, 'action', tenant, auto_apply, visible, priority_apply, full_access) else: self.assertRaises( expected_exception, models.Module.validate_action, context, 'action', tenant, auto_apply, visible, priority_apply, full_access) # also make sure that it works for admin if works_for_admin: context.is_admin = True models.Module.validate_action( context, 'action', tenant, auto_apply, visible, priority_apply, full_access) def _build_module(self, ds_id, ds_ver_id): module = Mock() module.datastore_id = ds_id module.datastore_version_id = ds_ver_id module.contents = crypto_utils.encode_data( crypto_utils.encrypt_data( 'VGhpc2lzbXlkYXRhc3RyaW5n', 'thisismylongkeytouse')) return module def test_validate(self): data = [ [[self._build_module('ds', 'ds_ver')], 'ds', 'ds_ver', True], [[self._build_module('ds', None)], 'ds', 'ds_ver', True], [[self._build_module(None, None)], 'ds', 'ds_ver', True], [[self._build_module('ds', 'ds_ver')], 'ds', 'ds2_ver', False, exception.TroveError], [[self._build_module('ds', 'ds_ver')], 'ds2', 'ds_ver', False, exception.TroveError], [[self._build_module('ds', 'ds_ver')], 'ds2', 'ds2_ver', False, exception.TroveError], [[self._build_module('ds', None)], 'ds2', 'ds2_ver', False, exception.TroveError], [[self._build_module(None, None)], 'ds2', 'ds2_ver', True], [[self._build_module(None, 'ds_ver')], 'ds2', 'ds_ver', True], ] for datum in data: modules = datum[0] ds_id = datum[1] ds_ver_id = datum[2] match = datum[3] expected_exception = None if not match: expected_exception = datum[4] ds = Mock() ds.id = ds_id ds.name = ds_id ds_ver = Mock() ds_ver.id = ds_ver_id ds_ver.name = ds_ver_id ds_ver.datastore_id = ds_id with patch.object(datastore_models.Datastore, 'load', return_value=ds): with patch.object(datastore_models.DatastoreVersion, 'load', return_value=ds_ver): if match: models.Modules.validate(modules, ds_id, ds_ver_id) else: self.assertRaises( expected_exception, models.Modules.validate, modules, ds_id, ds_ver_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/module/test_module_views.py0000644000175000017500000000633000000000000026114 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import Mock, patch from trove.datastore import models from trove.module.views import DetailedModuleView from trove.tests.unittests import trove_testtools class ModuleViewsTest(trove_testtools.TestCase): def setUp(self): super(ModuleViewsTest, self).setUp() def tearDown(self): super(ModuleViewsTest, self).tearDown() class DetailedModuleViewTest(trove_testtools.TestCase): def setUp(self): super(DetailedModuleViewTest, self).setUp() self.module = Mock() self.module.name = 'test_module' self.module.type = 'test' self.module.md5 = 'md5-hash' self.module.created = 'Yesterday' self.module.updated = 'Now' self.module.datastore = 'mysql' self.module.datastore_version = '5.7' self.module.auto_apply = False self.module.tenant_id = 'my_tenant' self.module.is_admin = False self.module.priority_apply = False self.module.apply_order = 5 def tearDown(self): super(DetailedModuleViewTest, self).tearDown() def test_data(self): datastore = Mock() datastore.name = self.module.datastore ds_version = Mock() ds_version.name = self.module.datastore_version with patch.object(models, 'get_datastore_version', Mock(return_value=(datastore, ds_version))): view = DetailedModuleView(self.module) result = view.data() self.assertEqual(self.module.name, result['module']['name']) self.assertEqual(self.module.type, result['module']['type']) self.assertEqual(self.module.md5, result['module']['md5']) self.assertEqual(self.module.created, result['module']['created']) self.assertEqual(self.module.updated, result['module']['updated']) self.assertEqual(self.module.datastore_version, result['module']['datastore_version']) self.assertEqual(self.module.datastore, result['module']['datastore']) self.assertEqual(self.module.auto_apply, result['module']['auto_apply']) self.assertEqual(self.module.tenant_id, result['module']['tenant_id']) self.assertEqual(self.module.is_admin, result['module']['is_admin']) self.assertEqual(self.module.priority_apply, result['module']['priority_apply']) self.assertEqual(self.module.apply_order, result['module']['apply_order']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/mysql/0000755000175000017500000000000000000000000021657 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mysql/__init__.py0000644000175000017500000000000000000000000023756 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mysql/test_common.py0000644000175000017500000001534300000000000024566 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from testtools.matchers import Equals from testtools.matchers import Is from trove.common.exception import DatabaseForUserNotInDatabaseListError from trove.common.exception import DatabaseInitialDatabaseDuplicateError from trove.common.exception import DatabaseInitialUserDuplicateError from trove.extensions.mysql.common import populate_users from trove.extensions.mysql.common import populate_validated_databases from trove.tests.unittests import trove_testtools class MySqlCommonTest(trove_testtools.TestCase): def setUp(self): super(MySqlCommonTest, self).setUp() def tearDown(self): super(MySqlCommonTest, self).tearDown() def test_initial_databases_none(self): databases = [] result = populate_validated_databases(databases) self.assertThat(len(result), Is(0)) def test_initial_databases_single(self): databases = [{'name': 'one_db'}] result = populate_validated_databases(databases) self.assertThat(len(result), Is(1)) self.assertThat(result[0]['_name'], Equals('one_db')) def test_initial_databases_unique(self): databases = [{'name': 'one_db'}, {'name': 'diff_db'}] result = populate_validated_databases(databases) self.assertThat(len(result), Is(2)) def test_initial_databases_duplicate(self): databases = [{'name': 'same_db'}, {'name': 'same_db'}] self.assertRaises(DatabaseInitialDatabaseDuplicateError, populate_validated_databases, databases) def test_initial_databases_intermingled(self): databases = [{'name': 'a_db'}, {'name': 'b_db'}, {'name': 'a_db'}] self.assertRaises(DatabaseInitialDatabaseDuplicateError, populate_validated_databases, databases) def test_populate_users_single(self): users = [{'name': 'bob', 'password': 'x'}] result = populate_users(users) self.assertThat(len(result), Is(1)) self.assertThat(result[0]['_name'], Equals('bob')) self.assertThat(result[0]['_password'], Equals('x')) def test_populate_users_unique_host(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'bob', 'password': 'x', 'host': '128.0.0.1'}] result = populate_users(users) self.assertThat(len(result), Is(2)) def test_populate_users_unique_name(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'tom', 'password': 'x', 'host': '127.0.0.1'}] result = populate_users(users) self.assertThat(len(result), Is(2)) def test_populate_users_duplicate(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'bob', 'password': 'y', 'host': '127.0.0.1'}] self.assertRaises(DatabaseInitialUserDuplicateError, populate_users, users) def test_populate_unique_users_unique_host(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'tom', 'password': 'x', 'host': '128.0.0.1'}] result = populate_users(users) self.assertThat(len(result), Is(2)) def test_populate_users_intermingled(self): users = [{'name': 'bob', 'password': 'x', 'host': '127.0.0.1'}, {'name': 'tom', 'password': 'y', 'host': '127.0.0.1'}, {'name': 'bob', 'password': 'z', 'host': '127.0.0.1'}, {'name': 'bob', 'password': 'x', 'host': '128.0.0.1'}, {'name': 'tom', 'password': 'x', 'host': '128.0.0.1'}] self.assertRaises(DatabaseInitialUserDuplicateError, populate_users, users) def test_populate_users_both_db_list_empty(self): initial_databases = [] users = [{"name": "bob", "password": "x"}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(1)) def test_populate_users_initial_db_list_empty(self): initial_databases = [] users = [{"name": "bob", "password": "x", "databases": [{"name": "my_db"}]}] self.assertRaises(DatabaseForUserNotInDatabaseListError, populate_users, users, initial_databases) def test_populate_users_user_db_list_empty(self): initial_databases = ['my_db'] users = [{"name": "bob", "password": "x"}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(1)) def test_populate_users_db_in_list(self): initial_databases = ['my_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "my_db"}]}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(1)) def test_populate_users_db_multi_in_list(self): initial_databases = ['a_db', 'b_db', 'c_db', 'd_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "a_db"}]}, {"name": "tom", "password": "y", "databases": [{"name": "c_db"}]}, {"name": "sue", "password": "z", "databases": [{"name": "c_db"}]}] result = populate_users(users, initial_databases) self.assertThat(len(result), Is(3)) def test_populate_users_db_not_in_list(self): initial_databases = ['a_db', 'b_db', 'c_db', 'd_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "fake_db"}]}] self.assertRaises(DatabaseForUserNotInDatabaseListError, populate_users, users, initial_databases) def test_populate_users_db_multi_not_in_list(self): initial_databases = ['a_db', 'b_db', 'c_db', 'd_db'] users = [{"name": "bob", "password": "x", "databases": [{"name": "a_db"}]}, {"name": "tom", "password": "y", "databases": [{"name": "fake_db"}]}, {"name": "sue", "password": "z", "databases": [{"name": "d_db"}]}] self.assertRaises(DatabaseForUserNotInDatabaseListError, populate_users, users, initial_databases) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/mysql/test_user_controller.py0000644000175000017500000004116700000000000026522 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from testtools.matchers import Is from trove.extensions.mysql.service import SchemaController from trove.extensions.mysql.service import UserAccessController from trove.extensions.mysql.service import UserController from trove.tests.unittests import trove_testtools class TestUserController(trove_testtools.TestCase): def setUp(self): super(TestUserController, self).setUp() self.controller = UserController() def test_get_create_schema(self): body = {'users': [{'name': 'test', 'password': 'test'}]} schema = self.controller.get_schema('create', body) self.assertIn('users', schema['properties']) def test_get_update_user_pw(self): body = {'users': [{'name': 'test', 'password': 'test'}]} schema = self.controller.get_schema('update_all', body) self.assertIn('users', schema['properties']) def test_get_update_user_db(self): body = {'databases': [{'name': 'test'}, {'name': 'test'}]} schema = self.controller.get_schema('update_all', body) self.assertIn('databases', schema['properties']) def test_validate_create_empty(self): body = {"users": []} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals("[] is too short")) # self.assertThat(errors[0].path.pop(), Equals("users")) def test_validate_create_short_password(self): body = {"users": [{"name": "joe", "password": ""}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("password", error_paths) def test_validate_create_no_password(self): body = {"users": [{"name": "joe"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'password' is a required property", error_messages) def test_validate_create_short_name(self): body = {"users": [{"name": ""}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(3)) self.assertIn("'password' is a required property", error_messages) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("name", error_paths) def test_validate_create_complete_db_empty(self): body = {"users": [{"databases": [], "name": "joe", "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) self.assertThat(len(errors), Is(0)) def test_validate_create_complete_db_no_name(self): body = {"users": [{"databases": [{}], "name": "joe", "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'name' is a required property", error_messages) def test_validate_create_bogus_attr(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "bogosity": 100, "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) # TODO(zed): After API increment, this will NOT be valid. self.assertTrue(validator.is_valid(body)) def test_validate_create_complete_db(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_no_wildcard(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "192.168.1.1"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_wildcard(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "%"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_wildcard_prefix(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "%.168.1.1"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_create_host_wildcard_middle(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "192.%.1.1"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'192.%.1.1' does not match '^[%]?[\\\\w(-).]*[%]?$'", error_messages) def test_validate_create_host_wildcard_suffix(self): body = {"users": [{"databases": [{"name": "x"}], "name": "joe", "password": "123", "host": "192.168.1.%"}]} schema = self.controller.get_schema('create', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_update_empty(self): body = {"users": []} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals("[] is too short")) # self.assertThat(errors[0].path.pop(), Equals("users")) def test_validate_update_short_password(self): body = {"users": [{"name": "joe", "password": ""}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("password", error_paths) def test_validate_update_user_complete(self): body = {"users": [{"name": "joe", "password": "", "databases": [{"name": "testdb"}]}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("password", error_paths) def test_validate_update_user_with_db_short_password(self): body = {"users": [{"name": "joe", "password": "", "databases": [{"name": "testdb"}]}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(2)) self.assertIn("'' is too short", error_messages) self.assertIn("password", error_paths) def test_validate_update_no_password(self): body = {"users": [{"name": "joe"}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] self.assertThat(len(errors), Is(1)) self.assertIn("'password' is a required property", error_messages) def test_validate_update_database_complete(self): body = {"databases": [{"name": "test1"}, {"name": "test2"}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_update_database_empty(self): body = {"databases": []} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals('[] is too short')) def test_validate_update_short_name(self): body = {"users": [{"name": ""}]} schema = self.controller.get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) errors = sorted(validator.iter_errors(body), key=lambda e: e.path) error_messages = [error.message for error in errors] error_paths = [error.path.pop() for error in errors] self.assertThat(len(errors), Is(3)) self.assertIn("'password' is a required property", error_messages) self.assertIn("'' is too short", error_messages) self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages) self.assertIn("name", error_paths) def test_get_update_user_attributes(self): body = {'user': {'name': 'test'}} schema = self.controller.get_schema('update', body) self.assertIn('user', schema['properties']) def test_validate_update_user_attributes(self): body = {'user': {'name': 'test', 'password': 'test', 'host': '%'}} schema = self.controller.get_schema('update', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) def test_validate_update_user_attributes_empty(self): body = {"user": {}} schema = self.controller.get_schema('update', body) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) def test_validate_host_in_user_attributes(self): body_empty_host = {'user': { 'name': 'test', 'password': 'test', 'host': '%' }} body_with_host = {'user': { 'name': 'test', 'password': 'test', 'host': '1.1.1.1' }} body_none_host = {'user': { 'name': 'test', 'password': 'test', 'host': "" }} schema_empty_host = self.controller.get_schema('update', body_empty_host) schema_with_host = self.controller.get_schema('update', body_with_host) schema_none_host = self.controller.get_schema('update', body_none_host) validator_empty_host = jsonschema.Draft4Validator(schema_empty_host) validator_with_host = jsonschema.Draft4Validator(schema_with_host) validator_none_host = jsonschema.Draft4Validator(schema_none_host) self.assertTrue(validator_empty_host.is_valid(body_empty_host)) self.assertTrue(validator_with_host.is_valid(body_with_host)) self.assertFalse(validator_none_host.is_valid(body_none_host)) class TestUserAccessController(trove_testtools.TestCase): def test_validate_update_db(self): body = {"databases": []} schema = (UserAccessController()).get_schema('update_all', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) # TODO(zed): Restore after API version increment # errors = sorted(validator.iter_errors(body), key=lambda e: e.path) # self.assertThat(len(errors), Is(1)) # self.assertThat(errors[0].message, Equals("[] is too short")) # self.assertThat(errors[0].path.pop(), Equals("databases")) class TestSchemaController(trove_testtools.TestCase): def setUp(self): super(TestSchemaController, self).setUp() self.controller = SchemaController() self.body = { "databases": [ { "name": "first_db", "collate": "latin2_general_ci", "character_set": "latin2" }, { "name": "second_db" } ] } def test_validate_mixed(self): schema = self.controller.get_schema('create', self.body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(self.body)) def test_validate_mixed_with_no_name(self): body = self.body.copy() body['databases'].append({"collate": "some_collation"}) schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) validator = jsonschema.Draft4Validator(schema) self.assertFalse(validator.is_valid(body)) def test_validate_empty(self): body = {"databases": []} schema = self.controller.get_schema('create', body) self.assertIsNotNone(schema) self.assertIn('databases', body) validator = jsonschema.Draft4Validator(schema) self.assertTrue(validator.is_valid(body)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/quota/0000755000175000017500000000000000000000000021643 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/quota/__init__.py0000644000175000017500000000000000000000000023742 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/quota/test_quota.py0000644000175000017500000006643200000000000024420 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import Mock, MagicMock, patch from testtools import skipIf from trove.common import cfg from trove.common import exception from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.quota.models import Resource from trove.quota.quota import DbQuotaDriver from trove.quota.quota import QUOTAS from trove.quota.quota import run_with_quotas from trove.tests.unittests import trove_testtools """ Unit tests for the classes and functions in DbQuotaDriver.py. """ CONF = cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_tenant'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_tenant') } FAKE_TENANT1 = "123456" FAKE_TENANT2 = "654321" class Run_with_quotasTest(trove_testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback = Mock() QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=exception.TroveError()) self.assertRaises(exception.TroveError, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(trove_testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context = MagicMock() context.is_admin = True req = MagicMock() req.environ = MagicMock() req.environ.get = MagicMock(return_value=context) self.req = req self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota = MagicMock(spec=Quota) with patch.object(DatabaseModelBase, 'find_by', return_value=quota): body = {'quotas': {'instances': None}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) self.assertEqual(0, quota.save.call_count) self.assertEqual(200, result.status) def test_update_resource_instance(self): instance_quota = MagicMock(spec=Quota) with patch.object(DatabaseModelBase, 'find_by', return_value=instance_quota): body = {'quotas': {'instances': 2}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) self.assertEqual(1, instance_quota.save.call_count) self.assertIn('instances', result._data['quotas']) self.assertEqual(200, result.status) self.assertEqual(2, result._data['quotas']['instances']) @skipIf(not CONF.trove_volume_support, 'Volume support is not enabled') def test_update_resource_volume(self): instance_quota = MagicMock(spec=Quota) volume_quota = MagicMock(spec=Quota) def side_effect_func(*args, **kwargs): return (instance_quota if kwargs['resource'] == 'instances' else volume_quota) with patch.object(DatabaseModelBase, 'find_by', side_effect=side_effect_func): body = {'quotas': {'instances': None, 'volumes': 10}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) self.assertEqual(0, instance_quota.save.call_count) self.assertNotIn('instances', result._data['quotas']) self.assertEqual(1, volume_quota.save.call_count) self.assertEqual(200, result.status) self.assertEqual(10, result._data['quotas']['volumes']) def test_update_resource_with_invalid_negative_number(self): quota = MagicMock(spec=Quota) with patch.object(DatabaseModelBase, 'find_by', return_value=quota): body = {'quotas': {'instances': -2}} self.assertRaises(exception.QuotaLimitTooSmall, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) class DbQuotaDriverTest(trove_testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock() self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_tenant, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_tenant, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, quota.tenant_id) self.assertEqual(Resource.INSTANCES, quota.resource) self.assertEqual(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, quota.tenant_id) self.assertEqual(Resource.VOLUMES, quota.resource) self.assertEqual(CONF.max_volumes_per_tenant, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEqual(22, quotas[Resource.INSTANCES].hard_limit) self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEqual(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEqual(CONF.max_instances_per_tenant, quotas[Resource.INSTANCES].hard_limit) self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEqual(CONF.max_volumes_per_tenant, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEqual(22, quotas[Resource.INSTANCES].hard_limit) self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEqual(CONF.max_volumes_per_tenant, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, usage.tenant_id) self.assertEqual(Resource.VOLUMES, usage.resource) self.assertEqual(3, usage.in_use) self.assertEqual(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEqual(FAKE_TENANT1, usage.tenant_id) self.assertEqual(Resource.VOLUMES, usage.resource) self.assertEqual(0, usage.in_use) self.assertEqual(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEqual(2, usages[Resource.INSTANCES].in_use) self.assertEqual(1, usages[Resource.INSTANCES].reserved) self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEqual(1, usages[Resource.VOLUMES].in_use) self.assertEqual(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] def side_effect_func(*args, **kwargs): return (FAKE_QUOTAS[0] if kwargs['resource'] == 'instances' else FAKE_QUOTAS[1]) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=side_effect_func) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEqual(0, usages[Resource.INSTANCES].in_use) self.assertEqual(0, usages[Resource.INSTANCES].reserved) self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEqual(0, usages[Resource.VOLUMES].in_use) self.assertEqual(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEqual(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEqual(0, usages[Resource.INSTANCES].in_use) self.assertEqual(0, usages[Resource.INSTANCES].reserved) self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEqual(0, usages[Resource.VOLUMES].in_use) self.assertEqual(0, usages[Resource.VOLUMES].reserved) def test_check_quota_with_unlimited_quota(self): FAKE_QUOTA_USAGE = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=-1), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=-1)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTA_USAGE) QuotaUsage.save = Mock() Reservation.create = Mock() delta = {'instances': 2, 'volumes': 3} self.assertIsNone(self.driver.check_quotas(FAKE_TENANT1, resources, delta)) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() # Set up the deltas with the intention that after the reserve call # the deltas should match usage_id + 1 for both instances and volumes delta = {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) for _, kw in Reservation.create.call_args_list: self.assertEqual(kw['usage_id'] + 1, kw['delta']) self.assertEqual(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes': CONF.max_volumes_per_tenant + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) max_inst = CONF.max_instances_per_tenant delta = {'instances': max_inst, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) max_inst = CONF.max_instances_per_tenant delta = {'instances': max_inst - 1, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() # Set up the deltas with the intention that after the reserve call # the deltas should match -usage_id for both instances and volumes delta = {'instances': -1, 'volumes': -2} self.driver.reserve(FAKE_TENANT1, resources, delta) for _, kw in Reservation.create.call_args_list: self.assertEqual(-kw['usage_id'], kw['delta']) self.assertEqual(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_commit_cannot_be_less_than_zero(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=-1)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=-1, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(0, FAKE_QUOTAS[0].in_use) self.assertEqual(0, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) def test_rollback(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[1].status) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8001113 trove-12.1.0.dev92/trove/tests/unittests/router/0000755000175000017500000000000000000000000022032 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/router/__init__.py0000644000175000017500000000000000000000000024131 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/router/test_router.py0000644000175000017500000000302300000000000024761 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from routes import Mapper from trove.common.wsgi import Router, Fault from trove.tests.unittests import trove_testtools class FakeRequest(object): """A fake webob request object designed to cause 404. The dispatcher actually checks if the given request is a dict and throws an error if it is. This object wrapper tricks the dispatcher into handling the request like a regular request. """ environ = { "wsgiorg.routing_args": [ False, False ] } class TestRouter(trove_testtools.TestCase): """Test case for trove `Router` extensions.""" def setUp(self): super(TestRouter, self).setUp() self.mapper = Mapper() def test_404_is_fault(self): """Test that the dispatcher wraps 404's in a `Fault`.""" fake_request = FakeRequest() response = Router._dispatch(fake_request) assert isinstance(response, Fault) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8041112 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/0000755000175000017500000000000000000000000023007 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/__init__.py0000644000175000017500000000000000000000000025106 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/test_api.py0000644000175000017500000001351200000000000025173 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import Mock from mock import patch from trove.common import context from trove.common import exception from trove.common.rpc.version import RPC_API_VERSION from trove.common.strategies.cluster.experimental.mongodb.taskmanager import ( MongoDbTaskManagerAPI) from trove.guestagent import models as agent_models from trove.taskmanager import api as task_api from trove.tests.unittests import trove_testtools class ApiTest(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client') def setUp(self, *args): super(ApiTest, self).setUp() self.context = context.TroveContext() self.api = task_api.API(self.context) self._mock_rpc_client() def _verify_rpc_prepare_before_cast(self): self.api.client.prepare.assert_called_once_with( version=RPC_API_VERSION) def _verify_cast(self, *args, **kwargs): self.call_context.cast.assert_called_once_with(self.context, *args, **kwargs) def _mock_rpc_client(self): self.call_context = trove_testtools.TroveTestContext(self) self.api.client.prepare = Mock(return_value=self.call_context) self.call_context.cast = Mock() @patch.object(task_api.API, '_transform_obj', Mock(return_value='flv-id')) def test_create_instance(self): self.api.create_instance( 'inst-id', 'inst-name', mock.ANY, 'img-id', {'name': 'db1'}, {'name': 'usr1'}, 'mysql', None, 1, backup_id='bk-id', availability_zone='az', root_password='pwd', nics=['nic-id'], overrides={}, slave_of_id='slv-id', cluster_config={}, volume_type='type', modules=['mod-id'], locality='affinity') self._verify_rpc_prepare_before_cast() self._verify_cast( 'create_instance', availability_zone='az', backup_id='bk-id', cluster_config={}, databases={'name': 'db1'}, datastore_manager='mysql', flavor='flv-id', image_id='img-id', instance_id='inst-id', locality='affinity', modules=['mod-id'], name='inst-name', nics=['nic-id'], overrides={}, packages=None, root_password='pwd', slave_of_id='slv-id', users={'name': 'usr1'}, volume_size=1, volume_type='type', access=None) def test_detach_replica(self): self.api.detach_replica('some-instance-id') self._verify_rpc_prepare_before_cast() self._verify_cast('detach_replica', instance_id='some-instance-id') def test_promote_to_replica_source(self): self.api.promote_to_replica_source('some-instance-id') self._verify_rpc_prepare_before_cast() self._verify_cast('promote_to_replica_source', instance_id='some-instance-id') def test_eject_replica_source(self): self.api.eject_replica_source('some-instance-id') self._verify_rpc_prepare_before_cast() self._verify_cast('eject_replica_source', instance_id='some-instance-id') def test_create_cluster(self): self.api.create_cluster('some-cluster-id') self._verify_rpc_prepare_before_cast() self._verify_cast('create_cluster', cluster_id='some-cluster-id') def test_delete_cluster(self): self.api.delete_cluster('some-cluster-id') self._verify_rpc_prepare_before_cast() self._verify_cast('delete_cluster', cluster_id='some-cluster-id') @patch.object(agent_models, 'AgentHeartBeat') def test_delete_heartbeat(self, mock_agent_heart_beat): mock_heartbeat = Mock() mock_agent_heart_beat.return_value.find_by_instance_id = Mock( return_value=mock_heartbeat) self.api._delete_heartbeat('some-cluster-id') mock_heartbeat.delete.assert_called_with() @patch.object(agent_models, 'AgentHeartBeat') @patch('trove.taskmanager.api.LOG') def test_exception_delete_heartbeat(self, mock_logging, mock_agent_heart_beat): mock_agent_heart_beat.return_value.find_by_instance_id.side_effect = ( exception.ModelNotFoundError) self.api._delete_heartbeat('some-cluster-id') mock_agent_heart_beat.return_value.delete.assert_not_called() def test_transform_obj(self): flavor = Mock() self.assertRaisesRegex(ValueError, ('Could not transform %s' % flavor), self.api._transform_obj, flavor) def test_upgrade(self): self.api.upgrade('some-instance-id', 'some-datastore-version') self._verify_rpc_prepare_before_cast() self._verify_cast('upgrade', instance_id='some-instance-id', datastore_version_id='some-datastore-version') class TestAPI(trove_testtools.TestCase): @patch.object(task_api.API, 'get_client') def test_load_api(self, get_client_mock): context = trove_testtools.TroveTestContext(self) manager = 'mongodb' self.assertIsInstance(task_api.load(context), task_api.API) self.assertIsInstance(task_api.load(context, manager), MongoDbTaskManagerAPI) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/test_clusters.py0000644000175000017500000006364200000000000026277 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from mock import MagicMock from mock import Mock from mock import patch from trove.cluster.models import ClusterTasks as ClusterTaskStatus from trove.cluster.models import DBCluster from trove.common.strategies.cluster.experimental.mongodb.taskmanager import ( MongoDbClusterTasks as ClusterTasks) from trove.common import utils from trove.datastore import models as datastore_models from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceTasks # from trove.taskmanager.models import BuiltInstanceTasks from trove.taskmanager.models import ServiceStatuses from trove.tests.unittests import trove_testtools class MongoDbClusterTasksTest(trove_testtools.TestCase): def setUp(self): super(MongoDbClusterTasksTest, self).setUp() self.cluster_id = "1232" self.cluster_name = "Cluster-1234" self.tenant_id = "6789" self.db_cluster = DBCluster(ClusterTaskStatus.NONE, id=self.cluster_id, created=str(datetime.date), updated=str(datetime.date), name=self.cluster_name, task_id=ClusterTaskStatus.NONE._code, tenant_id=self.tenant_id, datastore_version_id="1", deleted=False) self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="member") self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2", compute_instance_id="compute-2", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-2", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="member") self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="mongos", compute_instance_id="compute-3", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-3", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="query_router") self.dbinst4 = DBInstance(InstanceTasks.NONE, id="4", name="configserver", compute_instance_id="compute-4", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-4", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="config_server") mock_ds1 = Mock() mock_ds1.name = 'mongodb' mock_dv1 = Mock() mock_dv1.name = '2.0.4' self.clustertasks = ClusterTasks(Mock(), self.db_cluster, datastore=mock_ds1, datastore_version=mock_dv1) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_with_server_error(self, mock_logging, mock_find, mock_db_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.NEW (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.BUILDING_ERROR_SERVER ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_bad_status(self, mock_logging, mock_find, mock_db_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.FAILED (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.NONE ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') def test_all_instances_ready(self, mock_find, mock_db_find): (mock_find.return_value. get_status.return_value) = ServiceStatuses.INSTANCE_READY (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.NONE ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.mongodb.taskmanager.LOG') def test_init_replica_set_failure(self, mock_logging, mock_dv, mock_ds, mock_ip, mock_guest, mock_update): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) primary_member = member1 other_members = [member2] mock_ip.side_effect = ["10.0.0.3"] mock_guest().prep_primary.return_value = Mock() mock_guest().add_members.return_value = Mock() mock_guest.return_value.add_members = Mock( side_effect=Exception("Boom!")) ret_val = self.clustertasks._init_replica_set(primary_member, other_members) mock_update.assert_called_with(self.cluster_id, shard_id='shard-1') self.assertFalse(ret_val) @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_init_replica_set(self, mock_dv, mock_ds, mock_ip, mock_guest): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) primary_member = member1 other_members = [member2] mock_ip.side_effect = ["10.0.0.3"] mock_guest().prep_primary.return_value = Mock() mock_guest().add_members.return_value = Mock() ret_val = self.clustertasks._init_replica_set(primary_member, other_members) mock_guest.return_value.add_members.assert_called_with( ["10.0.0.3"] ) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, '_init_replica_set') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.mongodb.taskmanager.LOG') def test_create_shard_failure(self, mock_logging, mock_dv, mock_ds, mock_ip, mock_guest, mock_init_rs, mock_update): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) members = [member1, member2] mock_ip.side_effect = ["10.0.0.2"] query_router = [ BaseInstance(Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) ] mock_guest().get_replica_set_name.return_value = 'testrs' mock_add_shard = Mock(side_effect=Exception("Boom!")) mock_guest().add_shard = mock_add_shard ret_val = self.clustertasks._create_shard(query_router, members) mock_init_rs.assert_called_with(member1, [member2]) mock_update.assert_called_with(self.cluster_id, shard_id="shard-1") self.assertFalse(ret_val) @patch.object(ClusterTasks, '_init_replica_set') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_shard(self, mock_dv, mock_ds, mock_ip, mock_guest, mock_init_rs): member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) members = [member1, member2] mock_ip.side_effect = ["10.0.0.2"] query_router = [ BaseInstance(Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) ] mock_guest().get_replica_set_name.return_value = 'testrs' mock_add_shard = Mock() mock_guest().add_shard = mock_add_shard ret_val = self.clustertasks._create_shard(query_router, members) mock_init_rs.assert_called_with(member1, [member2]) mock_add_shard.assert_called_with("testrs", "10.0.0.2") self.assertTrue(ret_val) @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, '_create_shard') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch.object(Instance, 'load') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(DBInstance, 'find_all') def test_add_shard_cluster(self, mock_find_all, mock_all_instances_ready, mock_load, mock_dv, mock_ds, mock_add_shard, mock_guest, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1, self.dbinst2, self.dbinst3, self.dbinst4] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_all_instances_ready.return_value = True mock_add_shard.return_value = True mock_guest.return_value.cluster_complete.return_value = Mock() self.clustertasks.add_shard_cluster(Mock(), self.cluster_id, "shard-1", "rs1") mock_guest.return_value.cluster_complete.assert_called_with() mock_reset_task.assert_called_with() @patch.object(DBCluster, 'save') @patch.object(DBCluster, 'find_by') @patch.object(DBInstance, 'find_all') def test_delete_cluster(self, mock_find_all, mock_find_by, mock_save): mock_find_all.return_value.all.return_value = [] mock_find_by.return_value = self.db_cluster self.clustertasks.delete_cluster(Mock(), self.cluster_id) self.assertEqual(ClusterTaskStatus.NONE, self.db_cluster.task_status) mock_save.assert_called_with() def test_rolling_upgrade_cluster_without_order_specified(self): self._assert_rolling_upgrade_cluster(None, None) def test_rolling_upgrade_cluster_with_order_specified(self): ordering = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5 } def ordering_function(instance): return ordering[instance.id] self._assert_rolling_upgrade_cluster(ordering_function, ordering) @patch('trove.taskmanager.models.DBaaSInstanceUpgrade') @patch('trove.taskmanager.models.BuiltInstanceTasks') @patch('trove.taskmanager.models.EndNotification') @patch('trove.taskmanager.models.StartNotification') @patch('trove.taskmanager.models.Timeout') @patch.object(ClusterTasks, 'reset_task') @patch.object(DBInstance, 'find_all') def _assert_rolling_upgrade_cluster(self, ordering_function, ordering, mock_find_all, mock_reset_task, mock_timeout, mock_start, mock_end, mock_instance_task, mock_upgrade): class MockInstance(Mock): upgrade_counter = 0 def upgrade(self, _): MockInstance.upgrade_counter += 1 self.upgrade_number = MockInstance.upgrade_counter db_instances = [Mock() for _ in range(5)] for i in range(5): db_instances[i].id = i + 1 mock_find_all.return_value.all.return_value = db_instances instances = [] def load_side_effect(_, instance_id): return_value = MockInstance() return_value.id = instance_id instances.append(return_value) return return_value mock_instance_task.load.side_effect = load_side_effect if ordering is None: ordering = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5 } self.clustertasks.rolling_upgrade_cluster(MagicMock(), Mock(), Mock(), ordering_function) order_result = {inst.id: inst.upgrade_number for inst in instances} self.assertEqual(ClusterTaskStatus.NONE, self.db_cluster.task_status) self.assertDictEqual(ordering, order_result) @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, '_create_shard') @patch.object(ClusterTasks, 'get_guest') @patch.object(utils, 'generate_random_password', return_value='pwd') @patch.object(ClusterTasks, 'get_ip') @patch.object(Instance, 'load') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_cluster(self, mock_dv, mock_ds, mock_find_all, mock_all_instances_ready, mock_load, mock_ip, mock_password, mock_guest, mock_create_shard, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1, self.dbinst2, self.dbinst3, self.dbinst4] mock_all_instances_ready.return_value = True member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) config_server = BaseInstance( Mock(), self.dbinst4, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_load.side_effect = [member1, member2, query_router, config_server] mock_ip.side_effect = ["10.0.0.5"] mock_create_shard.return_value = True self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_guest().add_config_servers.assert_called_with(["10.0.0.5"]) mock_guest().create_admin_user.assert_called_with("pwd") mock_create_shard.assert_called_with( query_router, [member1, member2] ) self.assertEqual(4, mock_guest().cluster_complete.call_count) mock_reset_task.assert_called_with() @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_cluster_admin_password') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.mongodb.taskmanager.LOG') def test_add_query_routers_failure(self, mock_logging, mock_dv, mock_ds, mock_password, mock_guest, mock_update): query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_guest.side_effect = Exception("Boom!") ret_val = self.clustertasks._add_query_routers([query_router], ['10.0.0.5']) mock_update.assert_called_with(self.cluster_id) self.assertFalse(ret_val) @patch.object(ClusterTasks, 'get_guest') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_add_query_routers(self, mock_dv, mock_ds, mock_guest): password = 'pwd' query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) ret_val = self.clustertasks._add_query_routers([query_router], ['10.0.0.5'], admin_password=password) mock_guest.assert_called_with(query_router) mock_guest().add_config_servers.assert_called_with(['10.0.0.5']) mock_guest().store_admin_password.assert_called_with(password) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'get_guest') @patch.object(utils, 'generate_random_password') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_add_query_routers_new_cluster(self, mock_dv, mock_ds, mock_gen_password, mock_guest): password = 'pwd' query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_gen_password.return_value = password ret_val = self.clustertasks._add_query_routers([query_router], ['10.0.0.5']) mock_guest.assert_called_with(query_router) mock_guest().add_config_servers.assert_called_with(['10.0.0.5']) mock_guest().create_admin_user.assert_called_with(password) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(DBInstance, 'find_all') def _run_grow_cluster(self, mock_find_all, mock_all_instances_ready, mock_guest, mock_reset_task, new_instances_ids=None): mock_find_all().all.return_value = [self.dbinst1, self.dbinst2, self.dbinst3, self.dbinst4] mock_all_instances_ready.return_value = True self.clustertasks.grow_cluster(Mock(), self.cluster_id, new_instances_ids) self.assertEqual(len(new_instances_ids), mock_guest().cluster_complete.call_count) mock_reset_task.assert_called_with() @patch.object(ClusterTasks, '_add_query_routers') @patch.object(ClusterTasks, 'get_cluster_admin_password') @patch.object(ClusterTasks, 'get_ip') @patch.object(Instance, 'load') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_grow_cluster_query_router(self, mock_dv, mock_ds, mock_load, mock_ip, mock_get_password, mock_add_query_router): query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) config_server = BaseInstance( Mock(), self.dbinst4, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_load.side_effect = [query_router, config_server] mock_ip.return_value = '10.0.0.5' mock_add_query_router.return_value = True self._run_grow_cluster(new_instances_ids=[query_router.id]) mock_add_query_router.assert_called_with( [query_router], ['10.0.0.5'], admin_password=mock_get_password() ) @patch.object(ClusterTasks, '_create_shard') @patch.object(Instance, 'load') @patch.object(ClusterTasks, '_get_running_query_router_id') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_grow_cluster_shard(self, mock_dv, mock_ds, mock_running_qr_id, mock_load, mock_create_shard): mock_running_qr_id.return_value = '3' member1 = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) member2 = BaseInstance(Mock(), self.dbinst2, Mock(), InstanceServiceStatus(ServiceStatuses.NEW)) query_router = BaseInstance( Mock(), self.dbinst3, Mock(), InstanceServiceStatus(ServiceStatuses.NEW) ) mock_load.side_effect = [member1, member2, query_router] mock_create_shard.return_value = True self._run_grow_cluster(new_instances_ids=[member1.id, member2.id]) mock_create_shard.assert_called_with( query_router, [member1, member2] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/test_galera_clusters.py0000644000175000017500000003264700000000000027613 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from mock import Mock from mock import patch from trove.cluster.models import ClusterTasks as ClusterTaskStatus from trove.cluster.models import DBCluster from trove.common.exception import GuestError from trove.common.strategies.cluster.experimental.galera_common.taskmanager \ import GaleraCommonClusterTasks from trove.common.strategies.cluster.experimental.galera_common.taskmanager \ import GaleraCommonTaskManagerStrategy from trove.datastore import models as datastore_models from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceTasks from trove.taskmanager.models import ServiceStatuses from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util class GaleraClusterTasksTest(trove_testtools.TestCase): def setUp(self): super(GaleraClusterTasksTest, self).setUp() util.init_db() self.cluster_id = "1232" self.cluster_name = "Cluster-1234" self.tenant_id = "6789" self.db_cluster = DBCluster(ClusterTaskStatus.NONE, id=self.cluster_id, created=str(datetime.date), updated=str(datetime.date), name=self.cluster_name, task_id=ClusterTaskStatus.NONE._code, tenant_id=self.tenant_id, datastore_version_id="1", deleted=False) self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, type="member") self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2", compute_instance_id="compute-2", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-2", datastore_version_id="1", cluster_id=self.cluster_id, type="member") self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3", compute_instance_id="compute-3", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-3", datastore_version_id="1", cluster_id=self.cluster_id, type="member") mock_ds1 = Mock() mock_ds1.name = 'pxc' mock_dv1 = Mock() mock_dv1.name = '7.1' self.clustertasks = GaleraCommonClusterTasks( Mock(), self.db_cluster, datastore=mock_ds1, datastore_version=mock_dv1) self.cluster_context = { 'replication_user': { 'name': "name", 'password': "password", }, 'cluster_name': self.cluster_name, 'admin_password': "admin_password" } @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_with_server_error(self, mock_logging, mock_find, mock_db_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.NEW (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.BUILDING_ERROR_SERVER ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_bad_status(self, mock_logging, mock_find, mock_db_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.FAILED (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.NONE ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') def test_all_instances_ready(self, mock_find, mock_db_find): (mock_find.return_value. get_status.return_value) = ServiceStatuses.INSTANCE_READY (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.NONE ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) self.assertTrue(ret_val) @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch.object(GaleraCommonClusterTasks, '_all_instances_ready', return_value=False) @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch('trove.common.strategies.cluster.experimental.galera_common.' 'taskmanager.LOG') def test_create_cluster_instance_not_ready(self, mock_logging, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_update): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_update.assert_called_with(self.cluster_id) @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch.object(GaleraCommonClusterTasks, 'reset_task') @patch.object(GaleraCommonClusterTasks, 'get_ip') @patch.object(GaleraCommonClusterTasks, '_all_instances_ready') @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch('trove.common.strategies.cluster.experimental.galera_common.' 'taskmanager.LOG') def test_create_cluster_fail(self, mock_logging, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_ip, mock_reset_task, mock_update_status): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_ip.return_value = "10.0.0.2" guest_client = Mock() guest_client.install_cluster = Mock(side_effect=GuestError("Error")) with patch.object(GaleraCommonClusterTasks, 'get_guest', return_value=guest_client): self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_update_status.assert_called_with('1232') mock_reset_task.assert_called_with() @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch('trove.common.strategies.cluster.experimental.galera_common.' 'taskmanager.LOG') def test_grow_cluster_does_not_exist(self, mock_logging, mock_update_status): context = Mock() bad_cluster_id = '1234' new_instances = [Mock(), Mock()] self.clustertasks.grow_cluster(context, bad_cluster_id, new_instances) mock_update_status.assert_called_with( '1234', status=InstanceTasks.GROWING_ERROR) @patch.object(GaleraCommonClusterTasks, '_check_cluster_for_root') @patch.object(GaleraCommonClusterTasks, 'reset_task') @patch.object(GaleraCommonClusterTasks, '_render_cluster_config') @patch.object(GaleraCommonClusterTasks, 'get_ip') @patch.object(GaleraCommonClusterTasks, 'get_guest') @patch.object(GaleraCommonClusterTasks, '_all_instances_ready', return_value=True) @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_grow_cluster_successs(self, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_guest, mock_ip, mock_render, mock_reset_task, mock_check_root): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_ip.return_value = "10.0.0.2" context = Mock() new_instances = [Mock(), Mock()] mock_guest.get_cluster_context = Mock( return_value=self.cluster_context) mock_guest.reset_admin_password = Mock() self.clustertasks.grow_cluster(context, self.cluster_id, new_instances) mock_reset_task.assert_called_with() @patch.object(GaleraCommonClusterTasks, 'reset_task') @patch.object(Instance, 'load') @patch.object(Instance, 'delete') @patch.object(DBInstance, 'find_all') @patch.object(GaleraCommonClusterTasks, 'get_guest') @patch.object(GaleraCommonClusterTasks, 'get_ip') @patch.object(GaleraCommonClusterTasks, '_render_cluster_config') def test_shrink_cluster_success(self, mock_render, mock_ip, mock_guest, mock_find_all, mock_delete, mock_load, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1] context = Mock() remove_instances = [Mock()] mock_ip.return_value = "10.0.0.2" mock_guest.get_cluster_context = Mock( return_value=self.cluster_context) self.clustertasks.shrink_cluster(context, self.cluster_id, remove_instances) mock_reset_task.assert_called_with() @patch.object(Instance, 'load') @patch.object(GaleraCommonClusterTasks, 'update_statuses_on_failure') @patch('trove.common.strategies.cluster.experimental.galera_common.' 'taskmanager.LOG') def test_shrink_cluster_does_not_exist(self, mock_logging, mock_update_status, mock_load): context = Mock() bad_cluster_id = '1234' remove_instances = [Mock()] self.clustertasks.shrink_cluster(context, bad_cluster_id, remove_instances) mock_update_status.assert_called_with( '1234', status=InstanceTasks.SHRINKING_ERROR) class GaleraTaskManagerStrategyTest(trove_testtools.TestCase): def test_task_manager_cluster_tasks_class(self): strategy = GaleraCommonTaskManagerStrategy() self.assertFalse( hasattr(strategy.task_manager_cluster_tasks_class, 'rebuild_cluster')) self.assertTrue(callable( strategy.task_manager_cluster_tasks_class.create_cluster)) def test_task_manager_api_class(self): strategy = GaleraCommonTaskManagerStrategy() self.assertFalse(hasattr(strategy.task_manager_api_class, 'add_new_node')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/test_manager.py0000644000175000017500000003607700000000000026047 0ustar00coreycorey00000000000000# Copyright 2014 eBay Software Foundation # Copyright [2015] Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import MagicMock, Mock, patch, PropertyMock from proboscis.asserts import assert_equal from trove.backup.models import Backup from trove.common.exception import TroveError, ReplicationSlaveAttachError from trove.common import server_group as srv_grp from trove.instance.tasks import InstanceTasks from trove.taskmanager.manager import Manager from trove.taskmanager import models from trove.taskmanager import service from trove.tests.unittests import trove_testtools class TestManager(trove_testtools.TestCase): def setUp(self): super(TestManager, self).setUp() self.manager = Manager() self.context = trove_testtools.TroveTestContext(self) self.mock_slave1 = Mock() self.mock_slave2 = Mock() type(self.mock_slave1).id = PropertyMock(return_value='some-inst-id') type(self.mock_slave2).id = PropertyMock(return_value='inst1') self.mock_old_master = Mock() type(self.mock_old_master).slaves = PropertyMock( return_value=[self.mock_slave1, self.mock_slave2]) self.mock_master = Mock() type(self.mock_master).slaves = PropertyMock( return_value=[self.mock_slave1, self.mock_slave2]) def tearDown(self): super(TestManager, self).tearDown() self.manager = None def test_getattr_lookup(self): self.assertTrue(callable(self.manager.delete_cluster)) self.assertTrue(callable(self.manager.mongodb_add_shard_cluster)) def test_most_current_replica(self): master = Mock() master.id = 32 def test_case(txn_list, selected_master): with patch.object(self.manager, '_get_replica_txns', return_value=txn_list): result = self.manager._most_current_replica(master, None) assert_equal(result, selected_master) with self.assertRaisesRegex(TroveError, 'not all replicating from same'): test_case([['a', '2a99e-32bf', 2], ['b', '2a', 1]], None) test_case([['a', '2a99e-32bf', 2]], 'a') test_case([['a', '2a', 1], ['b', '2a', 2]], 'b') test_case([['a', '2a', 2], ['b', '2a', 1]], 'a') test_case([['a', '2a', 1], ['b', '2a', 1]], 'a') test_case([['a', None, 0]], 'a') test_case([['a', None, 0], ['b', '2a', 1]], 'b') def test_detach_replica(self): slave = Mock() master = Mock() with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[slave, master]): self.manager.detach_replica(self.context, 'some-inst-id') slave.detach_replica.assert_called_with(master) @patch.object(Manager, '_set_task_status') def test_promote_to_replica_source(self, mock_set_task_status): with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.manager.promote_to_replica_source( self.context, 'some-inst-id') self.mock_slave1.detach_replica.assert_called_with( self.mock_old_master, for_failover=True) self.mock_old_master.attach_replica.assert_called_with( self.mock_slave1) self.mock_slave1.make_read_only.assert_called_with(False) self.mock_slave2.detach_replica.assert_called_with( self.mock_old_master, for_failover=True) self.mock_slave2.attach_replica.assert_called_with(self.mock_slave1) self.mock_old_master.demote_replication_master.assert_any_call() mock_set_task_status.assert_called_with(([self.mock_old_master] + [self.mock_slave1, self.mock_slave2]), InstanceTasks.NONE) @patch.object(Manager, '_set_task_status') @patch.object(Manager, '_most_current_replica') def test_eject_replica_source(self, mock_most_current_replica, mock_set_task_status): with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_master, self.mock_slave1, self.mock_slave2]): self.manager.eject_replica_source(self.context, 'some-inst-id') mock_most_current_replica.assert_called_with(self.mock_master, [self.mock_slave1, self.mock_slave2]) mock_set_task_status.assert_called_with(([self.mock_master] + [self.mock_slave1, self.mock_slave2]), InstanceTasks.NONE) @patch.object(Manager, '_set_task_status') @patch('trove.taskmanager.manager.LOG') def test_exception_TroveError_promote_to_replica_source(self, *args): self.mock_slave2.detach_replica = Mock(side_effect=TroveError) with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.assertRaises(ReplicationSlaveAttachError, self.manager.promote_to_replica_source, self.context, 'some-inst-id') @patch.object(Manager, '_set_task_status') @patch.object(Manager, '_most_current_replica') @patch('trove.taskmanager.manager.LOG') def test_exception_TroveError_eject_replica_source( self, mock_logging, mock_most_current_replica, mock_set_task_status): self.mock_slave2.detach_replica = Mock(side_effect=TroveError) mock_most_current_replica.return_value = self.mock_slave1 with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_master, self.mock_slave1, self.mock_slave2]): self.assertRaises(ReplicationSlaveAttachError, self.manager.eject_replica_source, self.context, 'some-inst-id') @patch.object(Manager, '_set_task_status') def test_error_promote_to_replica_source(self, *args): self.mock_slave2.detach_replica = Mock( side_effect=RuntimeError('Error')) with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.assertRaisesRegex(RuntimeError, 'Error', self.manager.promote_to_replica_source, self.context, 'some-inst-id') @patch('trove.taskmanager.manager.LOG') def test_error_demote_replication_master_promote_to_replica_source( self, mock_logging): self.mock_old_master.demote_replication_master = Mock( side_effect=RuntimeError('Error')) with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_slave1, self.mock_old_master, self.mock_slave2]): self.assertRaises(ReplicationSlaveAttachError, self.manager.promote_to_replica_source, self.context, 'some-inst-id') @patch.object(Manager, '_set_task_status') @patch.object(Manager, '_most_current_replica') def test_error_eject_replica_source(self, mock_most_current_replica, mock_set_task_status): self.mock_slave2.detach_replica = Mock( side_effect=RuntimeError('Error')) mock_most_current_replica.return_value = self.mock_slave1 with patch.object(models.BuiltInstanceTasks, 'load', side_effect=[self.mock_master, self.mock_slave1, self.mock_slave2]): self.assertRaisesRegex(RuntimeError, 'Error', self.manager.eject_replica_source, self.context, 'some-inst-id') @patch.object(Backup, 'delete') @patch.object(models.BuiltInstanceTasks, 'load') def test_create_replication_slave(self, mock_load, mock_backup_delete): mock_tasks = Mock() mock_snapshot = {'dataset': {'snapshot_id': 'test-id'}} mock_tasks.get_replication_master_snapshot = Mock( return_value=mock_snapshot) mock_flavor = Mock() with patch.object(models.FreshInstanceTasks, 'load', return_value=mock_tasks): self.manager.create_instance(self.context, ['id1'], Mock(), mock_flavor, Mock(), None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'some_password', None, Mock(), 'some-master-id', None, None, None, None) mock_tasks.get_replication_master_snapshot.assert_called_with( self.context, 'some-master-id', mock_flavor, 'temp-backup-id', replica_number=1) mock_backup_delete.assert_called_with(self.context, 'test-id') @patch.object(models.FreshInstanceTasks, 'load') @patch.object(Backup, 'delete') @patch.object(models.BuiltInstanceTasks, 'load') @patch('trove.taskmanager.manager.LOG') def test_exception_create_replication_slave(self, mock_logging, mock_tasks, mock_delete, mock_load): mock_load.return_value.create_instance = Mock(side_effect=TroveError) self.assertRaises(TroveError, self.manager.create_instance, self.context, ['id1', 'id2'], Mock(), Mock(), Mock(), None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'some_password', None, Mock(), 'some-master-id', None, None, None, None) def test_AttributeError_create_instance(self): self.assertRaisesRegex( AttributeError, 'Cannot create multiple non-replica instances.', self.manager.create_instance, self.context, ['id1', 'id2'], Mock(), Mock(), Mock(), None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'some_password', None, Mock(), None, None, None, None, None) def test_create_instance(self): mock_tasks = Mock() mock_flavor = Mock() mock_override = Mock() mock_csg = Mock() type(mock_csg.return_value).id = PropertyMock( return_value='sg-id') with patch.object(models.FreshInstanceTasks, 'load', return_value=mock_tasks): with patch.object(srv_grp.ServerGroup, 'create', mock_csg): self.manager.create_instance( self.context, 'id1', 'inst1', mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'password', None, mock_override, None, None, None, None, 'affinity') mock_tasks.create_instance.assert_called_with(mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, 'temp-backup-id', None, 'password', None, mock_override, None, None, None, None, {'group': 'sg-id'}, access=None) mock_tasks.wait_for_instance.assert_called_with(36000, mock_flavor) def test_create_cluster(self): mock_tasks = Mock() with patch.object(models, 'load_cluster_tasks', return_value=mock_tasks): self.manager.create_cluster(self.context, 'some-cluster-id') mock_tasks.create_cluster.assert_called_with(self.context, 'some-cluster-id') def test_delete_cluster(self): mock_tasks = Mock() with patch.object(models, 'load_cluster_tasks', return_value=mock_tasks): self.manager.delete_cluster(self.context, 'some-cluster-id') mock_tasks.delete_cluster.assert_called_with(self.context, 'some-cluster-id') def test_shrink_cluster_with_success(self): self._assert_shrink_cluster(True) def test_shrink_cluster_with_error(self): self._assert_shrink_cluster(False) @patch('trove.taskmanager.manager.EndNotification') @patch('trove.taskmanager.manager.models.load_cluster_tasks') def _assert_shrink_cluster(self, success, mock_load, mock_notification): if success: mock_load.side_effect = Mock() else: mock_load.side_effect = Exception end_notification = MagicMock() mock_notification.return_value = end_notification context = Mock() cluster_id = Mock() instance_ids = Mock() try: self.manager.shrink_cluster(context, cluster_id, instance_ids) self.assertTrue(success) except Exception: self.assertFalse(success) mock_load.assert_called_once_with(context, cluster_id) mock_notification.assert_called_once_with(context, cluster_id=cluster_id, instance_ids=instance_ids) exit_error_type = end_notification.__exit__.call_args_list[0][0][0] if success: self.assertFalse(exit_error_type) else: self.assertTrue(exit_error_type) class TestTaskManagerService(trove_testtools.TestCase): def test_app_factory(self): test_service = service.app_factory(Mock()) self.assertIsInstance(test_service, service.TaskService) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/test_models.py0000644000175000017500000014337200000000000025715 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempfile import NamedTemporaryFile from cinderclient import exceptions as cinder_exceptions import cinderclient.v2.client as cinderclient from cinderclient.v2 import volumes as cinderclient_volumes import mock from mock import Mock, MagicMock, patch, PropertyMock, call import neutronclient.v2_0.client as neutronclient from novaclient import exceptions as nova_exceptions import novaclient.v2.flavors import novaclient.v2.servers from oslo_config import cfg from swiftclient.client import ClientException from testtools.matchers import Equals, Is import trove.backup.models from trove.backup import models as backup_models from trove.backup import state import trove.common.context from trove.common.exception import GuestError from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common.instance import ServiceStatuses from trove.common.notification import TroveInstanceModifyVolume import trove.common.template as template from trove.common import timeutils from trove.common import utils from trove.datastore import models as datastore_models import trove.db.models from trove.extensions.common import models as common_models from trove.extensions.mysql import models as mysql_models import trove.guestagent.api from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceStatus from trove.instance.tasks import InstanceTasks from trove import rpc from trove.taskmanager import models as taskmanager_models from trove.tests.unittests import trove_testtools from trove.tests.unittests.util import util INST_ID = 'dbinst-id-1' VOLUME_ID = 'volume-id-1' class FakeOptGroup(object): def __init__(self, tcp_ports=['3306', '3301-3307'], udp_ports=[], icmp=False): self.tcp_ports = tcp_ports self.udp_ports = udp_ports self.icmp = icmp class fake_Server(object): def __init__(self): self.id = None self.name = None self.image_id = None self.flavor_id = None self.files = None self.userdata = None self.block_device_mapping_v2 = None self.status = 'HEALTHY' self.key_name = None class fake_ServerManager(object): def create(self, name, image_id, flavor_id, files, userdata, block_device_mapping_v2=None, availability_zone=None, nics=None, config_drive=False, scheduler_hints=None, key_name=None): server = fake_Server() server.id = "server_id" server.name = name server.image_id = image_id server.flavor_id = flavor_id server.files = files server.userdata = userdata server.block_device_mapping_v2 = block_device_mapping_v2 server.availability_zone = availability_zone server.nics = nics server.key_name = key_name return server class fake_nova_client(object): def __init__(self): self.servers = fake_ServerManager() class fake_InstanceServiceStatus(object): _instance = None def __init__(self): self.deleted = False self.status = None pass def set_status(self, status): self.status = status pass def get_status(self): return self.status @classmethod def find_by(cls, **kwargs): if not cls._instance: cls._instance = fake_InstanceServiceStatus() return cls._instance def save(self): pass def delete(self): self.deleted = True pass def is_deleted(self): return self.deleted class fake_DBInstance(object): _instance = None def __init__(self): self.deleted = False pass @classmethod def find_by(cls, **kwargs): if not cls._instance: cls._instance = fake_DBInstance() return cls._instance def set_task_status(self, status): self.status = status pass def get_task_status(self): return self.status def save(self): pass def delete(self): self.deleted = True pass def is_deleted(self): return self.deleted class BaseFreshInstanceTasksTest(trove_testtools.TestCase): def setUp(self): super(BaseFreshInstanceTasksTest, self).setUp() mock_instance = patch('trove.instance.models.FreshInstance') mock_instance.start() self.addCleanup(mock_instance.stop) mock_instance.id = Mock(return_value='instance_id') mock_instance.tenant_id = Mock(return_value="tenant_id") mock_instance.hostname = Mock(return_value="hostname") mock_instance.name = Mock(return_value='name') mock_instance.nova_client = Mock( return_value=fake_nova_client()) mock_datastore_v = patch( 'trove.datastore.models.DatastoreVersion') mock_datastore_v.start() self.addCleanup(mock_datastore_v.stop) mock_datastore = patch( 'trove.datastore.models.Datastore') mock_datastore.start() self.addCleanup(mock_datastore.stop) taskmanager_models.FreshInstanceTasks.nova_client = fake_nova_client() self.orig_ISS_find_by = InstanceServiceStatus.find_by self.orig_DBI_find_by = DBInstance.find_by self.userdata = "hello moto" self.guestconfig_content = "guest config" with NamedTemporaryFile(mode="w", suffix=".cloudinit", delete=False) as f: self.cloudinit = f.name f.write(self.userdata) with NamedTemporaryFile(mode="w", delete=False) as f: self.guestconfig = f.name f.write(self.guestconfig_content) self.freshinstancetasks = taskmanager_models.FreshInstanceTasks( None, Mock(), None, None) def tearDown(self): super(BaseFreshInstanceTasksTest, self).tearDown() os.remove(self.cloudinit) os.remove(self.guestconfig) InstanceServiceStatus.find_by = self.orig_ISS_find_by DBInstance.find_by = self.orig_DBI_find_by class FreshInstanceTasksTest(BaseFreshInstanceTasksTest): def test_create_instance_userdata(self): cloudinit_location = os.path.dirname(self.cloudinit) datastore_manager = os.path.splitext(os.path.basename(self. cloudinit))[0] cfg.CONF.set_override('cloudinit_location', cloudinit_location) server = self.freshinstancetasks._create_server( None, None, datastore_manager, None, None, None) self.assertEqual(server.userdata, self.userdata) def test_create_instance_with_keypair(self): cfg.CONF.set_override('nova_keypair', 'fake_keypair') server = self.freshinstancetasks._create_server( None, None, None, None, None, None) self.assertEqual('fake_keypair', server.key_name) @patch.object(DBInstance, 'get_by') def test_create_instance_guestconfig(self, patch_get_by): cfg.CONF.set_override('guest_config', self.guestconfig) cfg.CONF.set_override('guest_info', 'guest_info.conf') cfg.CONF.set_override('injected_config_location', '/etc/trove/conf.d') # execute files = self.freshinstancetasks.get_injected_files("test") # verify self.assertTrue( '/etc/trove/conf.d/guest_info.conf' in files) self.assertTrue( '/etc/trove/conf.d/trove-guestagent.conf' in files) self.assertEqual( self.guestconfig_content, files['/etc/trove/conf.d/trove-guestagent.conf']) @patch.object(DBInstance, 'get_by') def test_create_instance_guestconfig_compat(self, patch_get_by): cfg.CONF.set_override('guest_config', self.guestconfig) cfg.CONF.set_override('guest_info', '/etc/guest_info') cfg.CONF.set_override('injected_config_location', '/etc') # execute files = self.freshinstancetasks.get_injected_files("test") # verify self.assertTrue( '/etc/guest_info' in files) self.assertTrue( '/etc/trove-guestagent.conf' in files) self.assertEqual( self.guestconfig_content, files['/etc/trove-guestagent.conf']) def test_create_instance_with_az_kwarg(self): # execute server = self.freshinstancetasks._create_server( None, None, None, None, availability_zone='nova', nics=None) # verify self.assertIsNotNone(server) def test_create_instance_with_az(self): # execute server = self.freshinstancetasks._create_server( None, None, None, None, 'nova', None) # verify self.assertIsNotNone(server) def test_create_instance_with_az_none(self): # execute server = self.freshinstancetasks._create_server( None, None, None, None, None, None) # verify self.assertIsNotNone(server) @patch.object(taskmanager_models.FreshInstanceTasks, 'hostname', new_callable=PropertyMock, return_value='fake-hostname') def test_servers_create_block_device_mapping_v2(self, mock_hostname): self.freshinstancetasks._prepare_userdata = Mock(return_value=None) mock_nova_client = self.freshinstancetasks.nova_client = Mock() mock_servers_create = mock_nova_client.servers.create self.freshinstancetasks._create_server('fake-flavor', 'fake-image', 'mysql', None, None, None) mock_servers_create.assert_called_with( 'fake-hostname', 'fake-image', 'fake-flavor', files={}, userdata=None, block_device_mapping_v2=None, availability_zone=None, nics=None, config_drive=True, scheduler_hints=None, key_name=None ) @patch.object(InstanceServiceStatus, 'find_by', return_value=fake_InstanceServiceStatus.find_by()) @patch.object(DBInstance, 'find_by', return_value=fake_DBInstance.find_by()) @patch('trove.taskmanager.models.LOG') def test_update_status_of_instance_failure( self, mock_logging, dbi_find_by_mock, iss_find_by_mock): self.freshinstancetasks.update_statuses_on_time_out() self.assertEqual(ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT, fake_InstanceServiceStatus.find_by().get_status()) self.assertEqual(InstanceTasks.BUILDING_ERROR_TIMEOUT_GA, fake_DBInstance.find_by().get_task_status()) @patch.object(BaseInstance, 'update_db') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_dns_entry') @patch.object(taskmanager_models.FreshInstanceTasks, 'get_injected_files') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_server') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_secgroup') @patch.object(taskmanager_models.FreshInstanceTasks, '_build_volume_info') @patch.object(taskmanager_models.FreshInstanceTasks, '_guest_prepare') @patch.object(template, 'SingleInstanceConfigTemplate') @patch('trove.taskmanager.models.FreshInstanceTasks._create_port') def test_create_instance(self, mock_create_port, mock_single_instance_template, mock_guest_prepare, mock_build_volume_info, mock_create_secgroup, mock_create_server, mock_get_injected_files, *args): cfg.CONF.set_override('tcp_ports', ['3306', '3301-3307'], group='mysql') mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} config_content = {'config_contents': 'some junk'} mock_single_instance_template.return_value.config_contents = ( config_content) overrides = Mock() mock_create_secgroup.return_value = 'fake_security_group_id' mock_create_port.return_value = 'fake-port-id' self.freshinstancetasks.create_instance( mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, None, None, None, [{'net-id': 'fake-net-id'}], overrides, None, None, 'volume_type', None, {'group': 'sg-id'} ) mock_create_secgroup.assert_called_with('mysql', []) mock_create_port.assert_called_once_with( 'fake-net-id', ['fake_security_group_id'], is_mgmt=False, is_public=False ) mock_build_volume_info.assert_called_with( 'mysql', volume_size=2, volume_type='volume_type' ) mock_guest_prepare.assert_called_with( 768, mock_build_volume_info(), 'mysql-server', None, None, None, config_content, None, overrides, None, None, None ) mock_create_server.assert_called_with( 8, 'mysql-image-id', 'mysql', mock_build_volume_info()['block_device'], None, [{'port-id': 'fake-port-id'}], mock_get_injected_files(), {'group': 'sg-id'} ) @patch.object(BaseInstance, 'update_db') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_dns_entry') @patch.object(taskmanager_models.FreshInstanceTasks, 'get_injected_files') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_server') @patch.object(taskmanager_models.FreshInstanceTasks, '_build_volume_info') @patch.object(taskmanager_models.FreshInstanceTasks, '_guest_prepare') @patch.object(template, 'SingleInstanceConfigTemplate') @patch('trove.common.clients_admin.neutron_client_trove_admin') def test_create_instance_with_mgmt_port(self, mock_neutron_client, mock_single_instance_template, mock_guest_prepare, mock_build_volume_info, mock_create_server, mock_get_injected_files, *args): self.patch_conf_property('management_networks', ['fake-mgmt-uuid']) mock_client = Mock() mock_client.create_security_group.return_value = { 'security_group': {'id': 'fake-sg-id'} } mock_client.create_port.side_effect = [ {'port': {'id': 'fake-mgmt-port-id'}}, {'port': {'id': 'fake-user-port-id'}} ] mock_client.list_networks.return_value = { 'networks': [{'id': 'fake-public-net-id'}] } mock_neutron_client.return_value = mock_client mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} config_content = {'config_contents': 'some junk'} mock_single_instance_template.return_value.config_contents = ( config_content) self.freshinstancetasks.create_instance( mock_flavor, 'mysql-image-id', None, None, 'mysql', 'mysql-server', 2, None, None, None, [{'net-id': 'fake-net-uuid'}, {'net-id': 'fake-mgmt-uuid'}], mock.ANY, None, None, 'volume_type', None, {'group': 'sg-id'}, access={'is_public': True, 'allowed_cidrs': ['192.168.0.1/24']} ) mock_build_volume_info.assert_called_with( 'mysql', volume_size=2, volume_type='volume_type' ) mock_guest_prepare.assert_called_with( 768, mock_build_volume_info(), 'mysql-server', None, None, None, config_content, None, mock.ANY, None, None, None) mock_create_server.assert_called_with( 8, 'mysql-image-id', 'mysql', mock_build_volume_info()['block_device'], None, [ {'port-id': 'fake-user-port-id'}, {'port-id': 'fake-mgmt-port-id'} ], mock_get_injected_files(), {'group': 'sg-id'} ) create_floatingip_param = { "floatingip": { 'floating_network_id': 'fake-public-net-id', 'port_id': 'fake-user-port-id', } } mock_client.create_floatingip.assert_called_once_with( create_floatingip_param ) @patch.object(BaseInstance, 'update_db') @patch.object(taskmanager_models, 'create_cinder_client') @patch.object(taskmanager_models.FreshInstanceTasks, 'device_path', new_callable=PropertyMock, return_value='fake-device-path') @patch.object(taskmanager_models.FreshInstanceTasks, 'volume_support', new_callable=PropertyMock, return_value=True) def test_build_volume_info(self, mock_volume_support, mock_device_path, mock_create_cinderclient, mock_update_db): cfg.CONF.set_override('block_device_mapping', 'fake-bdm') cfg.CONF.set_override('mount_point', 'fake-mount-point', group='mysql') mock_cinderclient = mock_create_cinderclient.return_value mock_volume = Mock(name='fake-vol', id='fake-vol-id', size=2, status='available') mock_cinderclient.volumes.create.return_value = mock_volume mock_cinderclient.volumes.get.return_value = mock_volume volume_info = self.freshinstancetasks._build_volume_info( 'mysql', volume_size=2, volume_type='volume_type') expected = { 'block_device': [{ 'uuid': 'fake-vol-id', 'source_type': 'volume', 'destination_type': 'volume', 'device_name': 'fake-bdm', 'volume_size': 2, 'delete_on_termination': True}], 'device_path': 'fake-device-path', 'mount_point': 'fake-mount-point' } self.assertEqual(expected, volume_info) @patch.object(BaseInstance, 'update_db') @patch.object(taskmanager_models.FreshInstanceTasks, '_create_volume') @patch.object(taskmanager_models.FreshInstanceTasks, 'device_path', new_callable=PropertyMock, return_value='fake-device-path') @patch.object(taskmanager_models.FreshInstanceTasks, 'volume_support', new_callable=PropertyMock, return_value=False) def test_build_volume_info_without_volume(self, mock_volume_support, mock_device_path, mock_create_volume, mock_update_db): cfg.CONF.set_override('mount_point', 'fake-mount-point', group='mysql') volume_info = self.freshinstancetasks._build_volume_info('mysql') self.assertFalse(mock_create_volume.called) expected = { 'block_device': None, 'device_path': 'fake-device-path', 'mount_point': 'fake-mount-point' } self.assertEqual(expected, volume_info) @patch.object(trove.guestagent.api.API, 'attach_replication_slave') @patch.object(rpc, 'get_client') @patch.object(DBInstance, 'get_by') def test_attach_replication_slave(self, mock_get_by, mock_get_client, mock_attach_replication_slave): mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} snapshot = {'replication_strategy': 'MysqlGTIDReplication', 'master': {'id': 'master-id'}} config_content = {'config_contents': 'some junk'} replica_config = MagicMock() replica_config.config_contents = config_content with patch.object(taskmanager_models.FreshInstanceTasks, '_render_replica_config', return_value=replica_config): self.freshinstancetasks.attach_replication_slave(snapshot, mock_flavor) mock_attach_replication_slave.assert_called_with(snapshot, config_content) @patch.object(BaseInstance, 'update_db') @patch.object(rpc, 'get_client') @patch.object(taskmanager_models.FreshInstanceTasks, '_render_replica_config') @patch.object(trove.guestagent.api.API, 'attach_replication_slave', side_effect=GuestError) @patch('trove.taskmanager.models.LOG') @patch.object(DBInstance, 'get_by') def test_error_attach_replication_slave(self, *args): mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} snapshot = {'replication_strategy': 'MysqlGTIDReplication', 'master': {'id': 'master-id'}} self.assertRaisesRegex( TroveError, 'Error attaching instance', self.freshinstancetasks.attach_replication_slave, snapshot, mock_flavor) class ResizeVolumeTest(trove_testtools.TestCase): def setUp(self): super(ResizeVolumeTest, self).setUp() self.utils_poll_until_patch = patch.object(utils, 'poll_until') self.utils_poll_until_mock = self.utils_poll_until_patch.start() self.addCleanup(self.utils_poll_until_patch.stop) self.timeutils_isotime_patch = patch.object(timeutils, 'isotime') self.timeutils_isotime_mock = self.timeutils_isotime_patch.start() self.addCleanup(self.timeutils_isotime_patch.stop) self.instance = Mock() self.old_vol_size = 1 self.new_vol_size = 2 self.action = taskmanager_models.ResizeVolumeAction(self.instance, self.old_vol_size, self.new_vol_size) class FakeGroup(object): def __init__(self): self.mount_point = 'var/lib/mysql' self.device_path = '/dev/vdb' self.taskmanager_models_CONF = patch.object(taskmanager_models, 'CONF') self.mock_conf = self.taskmanager_models_CONF.start() self.mock_conf.get = Mock(return_value=FakeGroup()) self.addCleanup(self.taskmanager_models_CONF.stop) def tearDown(self): super(ResizeVolumeTest, self).tearDown() @patch('trove.taskmanager.models.LOG') def test_resize_volume_unmount_exception(self, mock_logging): self.instance.guest.unmount_volume = Mock( side_effect=GuestError("test exception")) self.assertRaises(GuestError, self.action._unmount_volume, recover_func=self.action._recover_restart) self.assertEqual(1, self.instance.restart.call_count) self.instance.guest.unmount_volume.side_effect = None self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_detach_exception(self, mock_logging): self.instance.nova_client.volumes.delete_server_volume = Mock( side_effect=nova_exceptions.ClientException("test exception")) self.assertRaises(nova_exceptions.ClientException, self.action._detach_volume, recover_func=self.action._recover_mount_restart) self.assertEqual(1, self.instance.guest.mount_volume.call_count) self.assertEqual(1, self.instance.restart.call_count) self.instance.nova_client.volumes.delete_server_volume.side_effect = ( None) self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_extend_exception(self, mock_logging): self.instance.volume_client.volumes.extend = Mock( side_effect=cinder_exceptions.ClientException("test exception")) self.assertRaises(cinder_exceptions.ClientException, self.action._extend, recover_func=self.action._recover_full) attach_count = ( self.instance.nova_client.volumes.create_server_volume.call_count) self.assertEqual(1, attach_count) self.assertEqual(1, self.instance.guest.mount_volume.call_count) self.assertEqual(1, self.instance.restart.call_count) self.instance.volume_client.volumes.extend.side_effect = None self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_verify_extend_no_volume(self, mock_logging): self.instance.volume_client.volumes.get = Mock( return_value=None) self.assertRaises(cinder_exceptions.ClientException, self.action._verify_extend) self.instance.reset_mock() @patch('trove.taskmanager.models.LOG') def test_resize_volume_poll_timeout(self, mock_logging): utils.poll_until = Mock(side_effect=PollTimeOut) self.assertRaises(PollTimeOut, self.action._verify_extend) self.assertEqual(2, self.instance.volume_client.volumes.get.call_count) utils.poll_until.side_effect = None self.instance.reset_mock() @patch.object(TroveInstanceModifyVolume, 'notify') def test_resize_volume_active_server_succeeds(self, *args): server = Mock(status=InstanceStatus.ACTIVE) self.instance.attach_mock(server, 'server') self.action.execute() self.assertEqual(1, self.instance.guest.stop_db.call_count) self.assertEqual(1, self.instance.guest.unmount_volume.call_count) detach_count = ( self.instance.nova_client.volumes.delete_server_volume.call_count) self.assertEqual(1, detach_count) extend_count = self.instance.volume_client.volumes.extend.call_count self.assertEqual(1, extend_count) attach_count = ( self.instance.nova_client.volumes.create_server_volume.call_count) self.assertEqual(1, attach_count) self.assertEqual(1, self.instance.guest.resize_fs.call_count) self.assertEqual(1, self.instance.guest.mount_volume.call_count) self.assertEqual(1, self.instance.restart.call_count) self.instance.reset_mock() def test_resize_volume_server_error_fails(self): server = Mock(status=InstanceStatus.ERROR) self.instance.attach_mock(server, 'server') self.assertRaises(TroveError, self.action.execute) self.instance.reset_mock() class BuiltInstanceTasksTest(trove_testtools.TestCase): def get_inst_service_status(self, status_id, statuses): answers = [] for i, status in enumerate(statuses): inst_svc_status = InstanceServiceStatus(status, id="%s-%s" % (status_id, i)) inst_svc_status.save = MagicMock(return_value=None) answers.append(inst_svc_status) return answers def _stub_volume_client(self): self.instance_task._volume_client = MagicMock(spec=cinderclient.Client) stub_volume_mgr = MagicMock(spec=cinderclient.volumes.VolumeManager) self.instance_task.volume_client.volumes = stub_volume_mgr stub_volume_mgr.extend = MagicMock(return_value=None) stub_new_volume = cinderclient.volumes.Volume( stub_volume_mgr, {'status': 'available', 'size': 2}, True) stub_volume_mgr.get = MagicMock(return_value=stub_new_volume) stub_volume_mgr.attach = MagicMock(return_value=None) def _stub_neutron_client(self): stub_neutron_client = self.instance_task._neutron_client = MagicMock( spec=neutronclient.Client) stub_neutron_client.list_floatingips = MagicMock( return_value={'floatingips': [{ 'floating_ip_address': '192.168.10.1', 'id': 'fake-floatingip-id'}]}) stub_neutron_client.list_ports = MagicMock( return_value={'ports': [{ 'fixed_ips': [{'ip_address': '10.0.0.1'}, {'ip_address': 'fd4a:7bef:d1ed:1::1'}], 'id': 'fake-port-id'}]}) def setUp(self): super(BuiltInstanceTasksTest, self).setUp() self.new_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} stub_nova_server = MagicMock() self.rpc_patches = patch.multiple( rpc, get_notifier=MagicMock(), get_client=MagicMock()) self.rpc_mocks = self.rpc_patches.start() self.addCleanup(self.rpc_patches.stop) db_instance = DBInstance(InstanceTasks.NONE, id=INST_ID, name='resize-inst-name', datastore_version_id='1', datastore_id='id-1', flavor_id='6', manager='mysql', created=timeutils.utcnow(), updated=timeutils.utcnow(), compute_instance_id='computeinst-id-1', tenant_id='testresize-tenant-id', volume_size='1', volume_id=VOLUME_ID) # this is used during the final check of whether the resize successful db_instance.server_status = 'HEALTHY' self.db_instance = db_instance self.dm_dv_load_by_uuid_patch = patch.object( datastore_models.DatastoreVersion, 'load_by_uuid', MagicMock( return_value=datastore_models.DatastoreVersion(db_instance))) self.dm_dv_load_by_uuid_mock = self.dm_dv_load_by_uuid_patch.start() self.addCleanup(self.dm_dv_load_by_uuid_patch.stop) self.dm_ds_load_patch = patch.object( datastore_models.Datastore, 'load', MagicMock( return_value=datastore_models.Datastore(db_instance))) self.dm_ds_load_mock = self.dm_ds_load_patch.start() self.addCleanup(self.dm_ds_load_patch.stop) self.instance_task = taskmanager_models.BuiltInstanceTasks( trove.common.context.TroveContext(), db_instance, stub_nova_server, InstanceServiceStatus(ServiceStatuses.RUNNING, id='inst-stat-id-0')) self.instance_task._guest = MagicMock(spec=trove.guestagent.api.API) self.instance_task._nova_client = MagicMock( spec=novaclient.client) self.stub_server_mgr = MagicMock( spec=novaclient.v2.servers.ServerManager) self.stub_running_server = MagicMock( spec=novaclient.v2.servers.Server) self.stub_running_server.status = 'HEALTHY' self.stub_running_server.flavor = {'id': 6, 'ram': 512} self.stub_verifying_server = MagicMock( spec=novaclient.v2.servers.Server) self.stub_verifying_server.status = 'VERIFY_RESIZE' self.stub_verifying_server.flavor = {'id': 8, 'ram': 768} self.stub_server_mgr.get = MagicMock( return_value=self.stub_verifying_server) self.instance_task._nova_client.servers = self.stub_server_mgr stub_flavor_manager = MagicMock( spec=novaclient.v2.flavors.FlavorManager) self.instance_task._nova_client.flavors = stub_flavor_manager nova_flavor = novaclient.v2.flavors.Flavor(stub_flavor_manager, self.new_flavor, True) stub_flavor_manager.get = MagicMock(return_value=nova_flavor) self.instance_task._volume_client = MagicMock(spec=cinderclient) self.instance_task._volume_client.volumes = Mock( spec=cinderclient_volumes.VolumeManager) answers = (status for status in self.get_inst_service_status('inst_stat-id', [ServiceStatuses.SHUTDOWN, ServiceStatuses.RUNNING, ServiceStatuses.RUNNING, ServiceStatuses.RUNNING])) def side_effect_func(*args, **kwargs): if 'instance_id' in kwargs: return next(answers) elif ('id' in kwargs and 'deleted' in kwargs and not kwargs['deleted']): return db_instance else: return MagicMock() self.dbm_dbmb_patch = patch.object( trove.db.models.DatabaseModelBase, 'find_by', MagicMock(side_effect=side_effect_func)) self.dbm_dbmb_mock = self.dbm_dbmb_patch.start() self.addCleanup(self.dbm_dbmb_patch.stop) self.template_patch = patch.object( template, 'SingleInstanceConfigTemplate', MagicMock(spec=template.SingleInstanceConfigTemplate)) self.template_mock = self.template_patch.start() self.addCleanup(self.template_patch.stop) db_instance.save = MagicMock(return_value=None) self.tbmb_running_patch = patch.object( trove.backup.models.Backup, 'running', MagicMock(return_value=None)) self.tbmb_running_mock = self.tbmb_running_patch.start() self.addCleanup(self.tbmb_running_patch.stop) if 'volume' in self._testMethodName: self._stub_volume_client() if ('floating_ips' in self._testMethodName or 'public_ips' in self._testMethodName): self._stub_neutron_client() def tearDown(self): super(BuiltInstanceTasksTest, self).tearDown() def test_resize_flavor(self): orig_server = self.instance_task.server self.instance_task.resize_flavor({'id': 1, 'ram': 512}, self.new_flavor) # verify self.assertIsNot(self.instance_task.server, orig_server) self.instance_task._guest.stop_db.assert_any_call( do_not_start_on_reboot=True) orig_server.resize.assert_any_call(self.new_flavor['id']) self.assertThat(self.db_instance.task_status, Is(InstanceTasks.NONE)) self.assertEqual(1, self.stub_server_mgr.get.call_count) self.assertThat(self.db_instance.flavor_id, Is(self.new_flavor['id'])) @patch('trove.taskmanager.models.LOG') def test_resize_flavor_resize_failure(self, mock_logging): orig_server = self.instance_task.server self.stub_verifying_server.status = 'ERROR' with patch.object(self.instance_task._nova_client.servers, 'get', return_value=self.stub_verifying_server): # execute self.assertRaises(TroveError, self.instance_task.resize_flavor, {'id': 1, 'ram': 512}, self.new_flavor) # verify self.assertTrue(self.stub_server_mgr.get.called) self.assertIs(self.instance_task.server, self.stub_verifying_server) self.instance_task._guest.stop_db.assert_any_call( do_not_start_on_reboot=True) orig_server.resize.assert_any_call(self.new_flavor['id']) self.assertThat(self.db_instance.task_status, Is(InstanceTasks.NONE)) self.assertThat(self.db_instance.flavor_id, Is('6')) @patch.object(utils, 'poll_until') def test_reboot(self, mock_poll): self.instance_task.server.reboot = Mock() self.instance_task.set_datastore_status_to_paused = Mock() self.instance_task.reboot() self.instance_task._guest.stop_db.assert_any_call() self.instance_task.server.reboot.assert_any_call() self.instance_task.set_datastore_status_to_paused.assert_any_call() @patch.object(BaseInstance, 'update_db') def test_detach_replica(self, mock_update_db): with patch.object(self.instance_task, 'reset_task_status') as tr_mock: self.instance_task.detach_replica(Mock(), True) self.instance_task._guest.detach_replica.assert_called_with(True) mock_update_db.assert_called_with(slave_of_id=None) tr_mock.assert_not_called() with patch.object(self.instance_task, 'reset_task_status') as tr_mock: self.instance_task.detach_replica(Mock(), False) self.instance_task._guest.detach_replica.assert_called_with(False) mock_update_db.assert_called_with(slave_of_id=None) tr_mock.assert_called_once_with() @patch.object(BaseInstance, 'update_db') @patch('trove.taskmanager.models.LOG') def test_error_detach_replica(self, mock_logging, mock_update_db): with patch.object(self.instance_task, 'reset_task_status') as tr_mock: with patch.object(self.instance_task._guest, 'detach_replica', side_effect=GuestError): self.assertRaises( GuestError, self.instance_task.detach_replica, Mock(), True) mock_update_db.assert_not_called() tr_mock.assert_not_called() with patch.object(self.instance_task, 'reset_task_status') as tr_mock: with patch.object(self.instance_task._guest, 'detach_replica', side_effect=GuestError): self.assertRaises( GuestError, self.instance_task.detach_replica, Mock(), False) mock_update_db.assert_not_called() tr_mock.assert_called_once_with() @patch.object(BaseInstance, 'update_db') def test_make_read_only(self, mock_update_db): read_only = MagicMock() self.instance_task.make_read_only(read_only) self.instance_task._guest.make_read_only.assert_called_with(read_only) @patch.object(BaseInstance, 'update_db') def test_attach_replica(self, mock_update_db): master = MagicMock() replica_context = trove_testtools.TroveTestContext(self) mock_guest = MagicMock() mock_guest.get_replica_context = Mock(return_value=replica_context) type(master).guest = PropertyMock(return_value=mock_guest) config_content = {'config_contents': 'some junk'} replica_config = MagicMock() replica_config.config_contents = config_content with patch.object(taskmanager_models.BuiltInstanceTasks, '_render_replica_config', return_value=replica_config): self.instance_task.attach_replica(master) self.instance_task._guest.attach_replica.assert_called_with( replica_context, config_content) mock_update_db.assert_called_with(slave_of_id=master.id) @patch('trove.taskmanager.models.LOG') def test_error_attach_replica(self, mock_logging): with patch.object(self.instance_task._guest, 'attach_replica', side_effect=GuestError): self.assertRaises(GuestError, self.instance_task.attach_replica, Mock()) def test_get_floating_ips(self): floating_ips = self.instance_task._get_floating_ips() self.assertEqual({'192.168.10.1': 'fake-floatingip-id'}, floating_ips) @patch.object(BaseInstance, 'get_visible_ip_addresses', return_value=['192.168.10.1']) def test_detach_public_ips(self, mock_address): removed_ips = self.instance_task.detach_public_ips() self.assertEqual(['fake-floatingip-id'], removed_ips) mock_update_floatingip = (self.instance_task.neutron_client .update_floatingip) mock_update_floatingip.assert_called_once_with( removed_ips[0], {'floatingip': {'port_id': None}}) def test_attach_public_ips(self): self.instance_task.attach_public_ips(['fake-floatingip-id']) mock_list_ports = (self.instance_task.neutron_client .list_ports) mock_list_ports.assert_called_once_with(device_id='computeinst-id-1') mock_update_floatingip = (self.instance_task.neutron_client .update_floatingip) mock_update_floatingip.assert_called_once_with( 'fake-floatingip-id', {'floatingip': {'port_id': 'fake-port-id', 'fixed_ip_address': '10.0.0.1'}}) @patch.object(BaseInstance, 'update_db') def test_enable_as_master(self, mock_update_db): test_func = self.instance_task._guest.enable_as_master config_content = {'config_contents': 'some junk'} replica_source_config = MagicMock() replica_source_config.config_contents = config_content with patch.object(self.instance_task, '_render_replica_source_config', return_value=replica_source_config): self.instance_task.enable_as_master() mock_update_db.assert_called_with(slave_of_id=None) test_func.assert_called_with(config_content) def test_get_last_txn(self): self.instance_task.get_last_txn() self.instance_task._guest.get_last_txn.assert_any_call() def test_get_latest_txn_id(self): self.instance_task.get_latest_txn_id() self.instance_task._guest.get_latest_txn_id.assert_any_call() def test_wait_for_txn(self): self.instance_task.wait_for_txn(None) self.instance_task._guest.wait_for_txn.assert_not_called() txn = Mock() self.instance_task.wait_for_txn(txn) self.instance_task._guest.wait_for_txn.assert_called_with(txn) def test_cleanup_source_on_replica_detach(self): test_func = self.instance_task._guest.cleanup_source_on_replica_detach replica_info = Mock() self.instance_task.cleanup_source_on_replica_detach(replica_info) test_func.assert_called_with(replica_info) def test_demote_replication_master(self): self.instance_task.demote_replication_master() self.instance_task._guest.demote_replication_master.assert_any_call() @patch.multiple(taskmanager_models.BuiltInstanceTasks, get_injected_files=Mock(return_value="the-files")) def test_upgrade(self, *args): pre_rebuild_server = self.instance_task.server dsv = Mock(image_id='foo_image') mock_volume = Mock(attachments=[{'device': '/dev/mock_dev'}]) with patch.object(self.instance_task._volume_client.volumes, "get", Mock(return_value=mock_volume)): mock_server = Mock(status='ACTIVE') with patch.object(self.instance_task._nova_client.servers, 'get', Mock(return_value=mock_server)): with patch.multiple(self.instance_task._guest, pre_upgrade=Mock(return_value={}), post_upgrade=Mock()): self.instance_task.upgrade(dsv) self.instance_task._guest.pre_upgrade.assert_called_with() pre_rebuild_server.rebuild.assert_called_with( dsv.image_id, files="the-files") self.instance_task._guest.post_upgrade.assert_called_with( mock_volume.attachments[0]) def test_fix_device_path(self): self.assertEqual("/dev/vdb", self.instance_task. _fix_device_path("vdb")) self.assertEqual("/dev/dev", self.instance_task. _fix_device_path("dev")) self.assertEqual("/dev/vdb/dev", self.instance_task. _fix_device_path("vdb/dev")) class BackupTasksTest(trove_testtools.TestCase): def setUp(self): super(BackupTasksTest, self).setUp() self.backup = backup_models.DBBackup() self.backup.id = 'backup_id' self.backup.name = 'backup_test', self.backup.description = 'test desc' self.backup.location = 'http://xxx/z_CLOUD/12e48.xbstream.gz' self.backup.instance_id = 'instance id' self.backup.created = 'yesterday' self.backup.updated = 'today' self.backup.size = 2.0 self.backup.state = state.BackupState.NEW self.bm_backup_patches = patch.multiple( backup_models.Backup, delete=MagicMock(return_value=None), get_by_id=MagicMock(return_value=self.backup)) self.bm_backup_mocks = self.bm_backup_patches.start() self.addCleanup(self.bm_backup_patches.stop) self.bm_DBBackup_patch = patch.object( backup_models.DBBackup, 'save', MagicMock(return_value=self.backup)) self.bm_DBBackup_mock = self.bm_DBBackup_patch.start() self.addCleanup(self.bm_DBBackup_patch.stop) self.backup.delete = MagicMock(return_value=None) def tearDown(self): super(BackupTasksTest, self).tearDown() def test_delete_backup_nolocation(self): self.backup.location = '' taskmanager_models.BackupTasks.delete_backup('dummy context', self.backup.id) self.backup.delete.assert_any_call() @patch('trove.taskmanager.models.LOG') @patch('trove.common.clients.create_swift_client') def test_delete_backup_fail_delete_manifest(self, mock_swift_client, mock_logging): client_mock = MagicMock() client_mock.head_object.return_value = {} client_mock.delete_object.side_effect = ClientException("foo") mock_swift_client.return_value = client_mock self.assertRaises( TroveError, taskmanager_models.BackupTasks.delete_backup, 'dummy context', self.backup.id ) self.assertFalse(backup_models.Backup.delete.called) self.assertEqual( state.BackupState.DELETE_FAILED, self.backup.state, "backup should be in DELETE_FAILED status" ) def test_parse_manifest(self): manifest = 'container/prefix' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertEqual('container', cont) self.assertEqual('prefix', prefix) def test_parse_manifest_bad(self): manifest = 'bad_prefix' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertIsNone(cont) self.assertIsNone(prefix) def test_parse_manifest_long(self): manifest = 'container/long/path/to/prefix' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertEqual('container', cont) self.assertEqual('long/path/to/prefix', prefix) def test_parse_manifest_short(self): manifest = 'container/' cont, prefix = taskmanager_models.BackupTasks._parse_manifest(manifest) self.assertEqual('container', cont) self.assertEqual('', prefix) class NotifyMixinTest(trove_testtools.TestCase): def test_get_service_id(self): id_map = { 'mysql': '123', 'percona': 'abc' } mixin = taskmanager_models.NotifyMixin() self.assertThat(mixin._get_service_id('mysql', id_map), Equals('123')) @patch('trove.taskmanager.models.LOG') def test_get_service_id_unknown(self, mock_logging): id_map = { 'mysql': '123', 'percona': 'abc' } transformer = taskmanager_models.NotifyMixin() self.assertThat(transformer._get_service_id('m0ng0', id_map), Equals('unknown-service-id-error')) class RootReportTest(trove_testtools.TestCase): def setUp(self): super(RootReportTest, self).setUp() util.init_db() def tearDown(self): super(RootReportTest, self).tearDown() def test_report_root_first_time(self): context = Mock() context.user = utils.generate_uuid() report = mysql_models.RootHistory.create( context, utils.generate_uuid()) self.assertIsNotNone(report) def test_report_root_double_create(self): context = Mock() context.user = utils.generate_uuid() id = utils.generate_uuid() history = mysql_models.RootHistory(id, context.user).save() with patch.object(mysql_models.RootHistory, 'load', Mock(return_value=history)): report = mysql_models.RootHistory.create(context, id) self.assertTrue(mysql_models.RootHistory.load.called) self.assertEqual(history.user, report.user) self.assertEqual(history.id, report.id) class ClusterRootTest(trove_testtools.TestCase): @patch.object(common_models.RootHistory, "create") @patch.object(common_models.Root, "create") def test_cluster_root_create(self, root_create, root_history_create): context = Mock() context.user = utils.generate_uuid() id = utils.generate_uuid() password = "rootpassword" cluster_instances = [utils.generate_uuid(), utils.generate_uuid()] common_models.ClusterRoot.create(context, id, password, cluster_instances) root_create.assert_called_with(context, id, password, cluster_instances_list=None) self.assertEqual(2, root_history_create.call_count) calls = [ call(context, cluster_instances[0]), call(context, cluster_instances[1]) ] root_history_create.assert_has_calls(calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/taskmanager/test_vertica_clusters.py0000644000175000017500000002722100000000000030005 0ustar00coreycorey00000000000000# Copyright [2015] Hewlett-Packard Development Company, L.P. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from mock import Mock from mock import patch from trove.cluster.models import ClusterTasks as ClusterTaskStatus from trove.cluster.models import DBCluster import trove.common.context as context from trove.common.exception import GuestError from trove.common.strategies.cluster.experimental.vertica.taskmanager import ( VerticaClusterTasks as ClusterTasks) from trove.common.strategies.cluster.experimental.vertica.taskmanager import ( VerticaTaskManagerAPI as task_api) from trove.common.strategies.cluster.experimental.vertica.taskmanager import ( VerticaTaskManagerStrategy as task_strategy) from trove.datastore import models as datastore_models from trove.instance.models import BaseInstance from trove.instance.models import DBInstance from trove.instance.models import Instance from trove.instance.models import InstanceServiceStatus from trove.instance.models import InstanceTasks from trove import rpc from trove.taskmanager.models import ServiceStatuses from trove.tests.unittests import trove_testtools class VerticaClusterTasksTest(trove_testtools.TestCase): def setUp(self): super(VerticaClusterTasksTest, self).setUp() self.cluster_id = "1232" self.cluster_name = "Cluster-1234" self.tenant_id = "6789" self.db_cluster = DBCluster(ClusterTaskStatus.NONE, id=self.cluster_id, created=str(datetime.date), updated=str(datetime.date), name=self.cluster_name, task_id=ClusterTaskStatus.NONE._code, tenant_id=self.tenant_id, datastore_version_id="1", deleted=False) self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, type="master") self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2", compute_instance_id="compute-2", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-2", datastore_version_id="1", cluster_id=self.cluster_id, type="member") self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3", compute_instance_id="compute-3", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-3", datastore_version_id="1", cluster_id=self.cluster_id, type="member") mock_ds1 = Mock() mock_ds1.name = 'vertica' mock_dv1 = Mock() mock_dv1.name = '7.1' self.clustertasks = ClusterTasks(Mock(), self.db_cluster, datastore=mock_ds1, datastore_version=mock_dv1) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_with_server_error(self, mock_logging, mock_find, mock_db_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.NEW (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.BUILDING_ERROR_SERVER ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') @patch('trove.taskmanager.models.LOG') def test_all_instances_ready_bad_status(self, mock_logging, mock_find, mock_db_find, mock_update): (mock_find.return_value. get_status.return_value) = ServiceStatuses.FAILED (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.NONE ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) mock_update.assert_called_with(self.cluster_id, None) self.assertFalse(ret_val) @patch.object(DBInstance, 'find_by') @patch.object(InstanceServiceStatus, 'find_by') def test_all_instances_ready(self, mock_find, mock_db_find): (mock_find.return_value. get_status.return_value) = ServiceStatuses.INSTANCE_READY (mock_db_find.return_value. get_task_status.return_value) = InstanceTasks.NONE ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"], self.cluster_id) self.assertTrue(ret_val) @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, '_all_instances_ready', return_value=False) @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_cluster_instance_not_ready(self, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_reset_task): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_reset_task.assert_called_with() @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_guest') @patch.object(ClusterTasks, 'get_ip') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') def test_create_cluster(self, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_ip, mock_guest, mock_reset_task): cluster_instances = [self.dbinst1, self.dbinst2, self.dbinst3] for instance in cluster_instances: if instance['type'] == "master": mock_find_all.return_value.all.return_value = [self.dbinst1] mock_ready.return_value = True mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_ip.return_value = "10.0.0.2" self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_guest.return_value.install_cluster.assert_called_with( ['10.0.0.2']) mock_reset_task.assert_called_with() mock_guest.return_value.cluster_complete.assert_called_with() @patch.object(ClusterTasks, 'update_statuses_on_failure') @patch.object(ClusterTasks, 'reset_task') @patch.object(ClusterTasks, 'get_ip') @patch.object(ClusterTasks, '_all_instances_ready') @patch.object(Instance, 'load') @patch.object(DBInstance, 'find_all') @patch.object(datastore_models.Datastore, 'load') @patch.object(datastore_models.DatastoreVersion, 'load_by_uuid') @patch( 'trove.common.strategies.cluster.experimental.vertica.taskmanager.LOG') def test_create_cluster_fail(self, mock_logging, mock_dv, mock_ds, mock_find_all, mock_load, mock_ready, mock_ip, mock_reset_task, mock_update_status): mock_find_all.return_value.all.return_value = [self.dbinst1] mock_load.return_value = BaseInstance(Mock(), self.dbinst1, Mock(), InstanceServiceStatus( ServiceStatuses.NEW)) mock_ip.return_value = "10.0.0.2" guest_client = Mock() guest_client.install_cluster = Mock(side_effect=GuestError("Error")) with patch.object(ClusterTasks, 'get_guest', return_value=guest_client): self.clustertasks.create_cluster(Mock(), self.cluster_id) mock_update_status.assert_called_with('1232') mock_reset_task.assert_called_with() class VerticaTaskManagerAPITest(trove_testtools.TestCase): @patch.object(rpc, 'get_client', Mock(return_value=Mock())) def setUp(self): super(VerticaTaskManagerAPITest, self).setUp() self.context = context.TroveContext() self.api = task_api(self.context) self.call_context = trove_testtools.TroveTestContext(self) self.api.client.prepare = Mock(return_value=self.call_context) self.call_context.cast = Mock() self.rpc_api_version = '1.0' def test_task_manager_api_cast(self): self.api._cast(method_name='test_method', version=self.rpc_api_version) self.call_context.cast.assert_called_with(self.context, 'test_method') class VerticaTaskManagerStrategyTest(trove_testtools.TestCase): def test_task_manager_cluster_tasks_class(self): vertica_strategy = task_strategy() self.assertFalse( hasattr(vertica_strategy.task_manager_cluster_tasks_class, 'rebuild_cluster')) self.assertTrue(callable( vertica_strategy.task_manager_cluster_tasks_class.create_cluster)) def test_task_manager_api_class(self): vertica_strategy = task_strategy() self.assertFalse(hasattr(vertica_strategy.task_manager_api_class, 'add_new_node')) self.assertTrue( callable(vertica_strategy.task_manager_api_class._cast)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/trove_testtools.py0000644000175000017500000000617300000000000024352 0ustar00coreycorey00000000000000# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock import testtools from trove.common import cfg from trove.common.context import TroveContext from trove.common.notification import DBaaSAPINotification from trove.common import policy from trove.tests import root_logger def is_bool(val): return str(val).lower() in ['true', '1', 't', 'y', 'yes', 'on', 'set'] def patch_notifier(test_case): notification_notify = mock.patch.object( DBaaSAPINotification, "_notify") notification_notify.start() test_case.addCleanup(notification_notify.stop) class TroveTestNotification(DBaaSAPINotification): @abc.abstractmethod def event_type(self): return 'test_notification' @abc.abstractmethod def required_start_traits(self): return [] class TroveTestContext(TroveContext): def __init__(self, test_case, **kwargs): super(TroveTestContext, self).__init__(**kwargs) self.notification = TroveTestNotification( self, request_id='req_id', flavor_id='7') self.notification.server_type = 'api' patch_notifier(test_case) class TestCase(testtools.TestCase): @classmethod def setUpClass(cls): super(TestCase, cls).setUpClass() root_logger.DefaultRootLogger(enable_backtrace=False) def setUp(self): super(TestCase, self).setUp() self.addCleanup(cfg.CONF.reset) root_logger.DefaultRootHandler.set_info(self.id()) # Default manager used by all unittsest unless explicitly overridden. self.patch_datastore_manager('mysql') policy_patcher = mock.patch.object(policy, 'get_enforcer', return_value=mock.MagicMock()) self.addCleanup(policy_patcher.stop) policy_patcher.start() def tearDown(self): # yes, this is gross and not thread aware. # but the only way to make it thread aware would require that # we single thread all testing root_logger.DefaultRootHandler.set_info(info=None) super(TestCase, self).tearDown() def patch_datastore_manager(self, manager_name): return self.patch_conf_property('datastore_manager', manager_name) def patch_conf_property(self, property_name, value, section=None): target = cfg.CONF if section: target = target.get(section) conf_patcher = mock.patch.object( target, property_name, new_callable=mock.PropertyMock(return_value=value)) self.addCleanup(conf_patcher.stop) return conf_patcher.start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8041112 trove-12.1.0.dev92/trove/tests/unittests/upgrade/0000755000175000017500000000000000000000000022141 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/upgrade/__init__.py0000644000175000017500000000000000000000000024240 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/upgrade/test_controller.py0000644000175000017500000001157100000000000025742 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import jsonschema from mock import Mock, MagicMock, patch from trove.extensions.mgmt.upgrade.models import UpgradeMessageSender from trove.extensions.mgmt.upgrade.service import UpgradeController from trove.tests.unittests import trove_testtools class TestUpgradeController(trove_testtools.TestCase): def setUp(self): super(TestUpgradeController, self).setUp() self.controller = UpgradeController() self.body = { "upgrade": { "instance_id": "27e25b73-88a1-4526-b2b9-919a28b8b33f", "instance_version": "v1.0.1", "location": "http://swift/trove-guestagent-v1.0.1.tar.gz"} } def tearDown(self): super(TestUpgradeController, self).tearDown() self.body = {} def _get_validator(self, body): """ Helper method to return a validator """ schema = self.controller.get_schema('create', body) return jsonschema.Draft4Validator(schema) def test_validate_create(self): """ Test for valid payload in body """ validator = self._get_validator(self.body) self.assertTrue(validator.is_valid(self.body)) def test_validate_create_additional_params(self): """ Test for valid payload with additional params """ self.body["upgrade"]["description"] = "upgrade" validator = self._get_validator(self.body) self.assertTrue(validator.is_valid(self.body)) @patch.object(UpgradeMessageSender, 'create', Mock(return_value=Mock())) def test_controller_with_no_metadata(self): """ Test the mock controller w/out metadata """ tenant_id = '77889991010' instance_id = '27e25b73-88a1-4526-b2b9-919a28b8b33f' context = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) resp = self.controller.create(req, self.body, tenant_id, instance_id) instance_version = self.body["upgrade"]["instance_version"] location = self.body["upgrade"]["location"] metadata = None UpgradeMessageSender.create.assert_called_once_with( context, instance_id, instance_version, location, metadata) self.assertEqual(202, resp.status) @patch.object(UpgradeMessageSender, 'create', Mock(return_value=Mock())) def test_controller_with_metadata(self): """ Test the mock controller with metadata """ tenant_id = '77889991010' instance_id = '27e25b73-88a1-4526-b2b9-919a28b8b33f' context = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) # append the body w/ metadata self.body["upgrade"]["metadata"] = { "config_location": "swift://my.conf.location", "is_public": True, "is_encypted": True} resp = self.controller.create(req, self.body, tenant_id, instance_id) instance_version = self.body["upgrade"]["instance_version"] location = self.body["upgrade"]["location"] metadata = self.body["upgrade"]["metadata"] UpgradeMessageSender.create.assert_called_once_with( context, instance_id, instance_version, location, metadata) self.assertEqual(202, resp.status) @patch.object(UpgradeMessageSender, 'create', Mock(return_value=Mock())) def test_controller_with_empty_metadata(self): """ Test the mock controller with metadata """ tenant_id = '77889991010' instance_id = '27e25b73-88a1-4526-b2b9-919a28b8b33f' context = Mock() req = Mock() req.environ = MagicMock() req.environ.get = Mock(return_value=context) # append the body w/ empty metadata self.body["upgrade"]["metadata"] = {} resp = self.controller.create(req, self.body, tenant_id, instance_id) instance_version = self.body["upgrade"]['instance_version'] location = self.body["upgrade"]["location"] metadata = self.body["upgrade"]["metadata"] UpgradeMessageSender.create.assert_called_once_with( context, instance_id, instance_version, location, metadata) self.assertEqual(202, resp.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/upgrade/test_models.py0000644000175000017500000000671400000000000025045 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from mock import patch from trove.extensions.mgmt.upgrade.models import UpgradeMessageSender from trove import rpc from trove.tests.unittests import trove_testtools class TestUpgradeModel(trove_testtools.TestCase): def setUp(self): super(TestUpgradeModel, self).setUp() def tearDown(self): super(TestUpgradeModel, self).tearDown() def test_validate(self): """ Test validation method """ param = None self.assertRaises( ValueError, UpgradeMessageSender._validate, param, 36) param = '' self.assertRaises( ValueError, UpgradeMessageSender._validate, param, 36) param = '7169f46a-ac53-401a-ba35-f461db948b8c7' self.assertRaises( ValueError, UpgradeMessageSender._validate, param, 36) param = '7169f46a-ac53-401a-ba35-f461db948b8c' self.assertTrue(UpgradeMessageSender._validate(param, 36)) param = '7169f46a-ac53-401a-ba35' self.assertTrue(UpgradeMessageSender._validate(param, 36)) def test_create(self): self._assert_create_with_metadata() def test_create_with_metadata_none(self): self._assert_create_with_metadata(metadata=None) def test_create_with_empty_metadata(self): self._assert_create_with_metadata(metadata={}) def test_create_with_metadata(self): self._assert_create_with_metadata( metadata={"is_public": True, "is_encrypted": True, "config_location": "http://swift/trove-guestagent.conf"}) @patch('trove.guestagent.api.API.upgrade') @patch.object(rpc, 'get_client') @patch('trove.instance.models.get_instance_encryption_key', return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08') def _assert_create_with_metadata(self, mock_get_encryption_key, mock_client, api_upgrade_mock, metadata=None): """Exercise UpgradeMessageSender.create() call. """ context = trove_testtools.TroveTestContext(self) instance_id = "27e25b73-88a1-4526-b2b9-919a28b8b33f" instance_version = "v1.0.1" location = "http://swift/trove-guestagent-v1.0.1.tar.gz" func = (UpgradeMessageSender.create( context, instance_id, instance_version, location, metadata) if metadata is not None else UpgradeMessageSender.create( context, instance_id, instance_version, location)) self.assertTrue(callable(func)) func() # This call should translate to the API call asserted below. api_upgrade_mock.assert_called_once_with(instance_version, location, metadata) mock_get_encryption_key.assert_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8041112 trove-12.1.0.dev92/trove/tests/unittests/util/0000755000175000017500000000000000000000000021467 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/util/__init__.py0000644000175000017500000000000000000000000023566 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/util/matchers.py0000644000175000017500000001473200000000000023656 0ustar00coreycorey00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Matcher classes to be used inside of the testtools assertThat framework.""" import pprint class DictKeysMismatch(object): def __init__(self, d1only, d2only): self.d1only = d1only self.d2only = d2only def describe(self): return ('Keys in d1 and not d2: %(d1only)s.' ' Keys in d2 and not d1: %(d2only)s' % self.__dict__) def get_details(self): return {} class DictMismatch(object): def __init__(self, key, d1_value, d2_value): self.key = key self.d1_value = d1_value self.d2_value = d2_value def describe(self): return ("Dictionaries do not match at %(key)s." " d1: %(d1_value)s d2: %(d2_value)s" % self.__dict__) def get_details(self): return {} class DictMatches(object): def __init__(self, d1, approx_equal=False, tolerance=0.001): self.d1 = d1 self.approx_equal = approx_equal self.tolerance = tolerance def __str__(self): return 'DictMatches(%s)' % (pprint.pformat(self.d1)) # Useful assertions def match(self, d2): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ d1keys = set(self.d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys return DictKeysMismatch(d1only, d2only) for key in d1keys: d1value = self.d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= self.tolerance except (ValueError, TypeError): # If both values aren't convertible to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): matcher = DictMatches(d1value) did_match = matcher.match(d2value) if did_match is not None: return did_match elif 'DONTCARE' in (d1value, d2value): continue elif self.approx_equal and within_tolerance: continue elif d1value != d2value: return DictMismatch(key, d1value, d2value) class ListLengthMismatch(object): def __init__(self, len1, len2): self.len1 = len1 self.len2 = len2 def describe(self): return ('Length mismatch: len(L1)=%(len1)d != ' 'len(L2)=%(len2)d' % self.__dict__) def get_details(self): return {} class DictListMatches(object): def __init__(self, l1, approx_equal=False, tolerance=0.001): self.l1 = l1 self.approx_equal = approx_equal self.tolerance = tolerance def __str__(self): return 'DictListMatches(%s)' % (pprint.pformat(self.l1)) # Useful assertions def match(self, l2): """Assert a list of dicts are equivalent.""" l1count = len(self.l1) l2count = len(l2) if l1count != l2count: return ListLengthMismatch(l1count, l2count) for d1, d2 in zip(self.l1, l2): matcher = DictMatches(d2, approx_equal=self.approx_equal, tolerance=self.tolerance) did_match = matcher.match(d1) if did_match: return did_match class SubDictMismatch(object): def __init__(self, key=None, sub_value=None, super_value=None, keys=False): self.key = key self.sub_value = sub_value self.super_value = super_value self.keys = keys def describe(self): if self.keys: return "Keys between dictionaries did not match" else: return ("Dictionaries do not match at %s. d1: %s d2: %s" % (self.key, self.super_value, self.sub_value)) def get_details(self): return {} class IsSubDictOf(object): def __init__(self, super_dict): self.super_dict = super_dict def __str__(self): return 'IsSubDictOf(%s)' % (self.super_dict) def match(self, sub_dict): """Assert a sub_dict is subset of super_dict.""" if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())): return SubDictMismatch(keys=True) for k, sub_value in sub_dict.items(): super_value = self.super_dict[k] if isinstance(sub_value, dict): matcher = IsSubDictOf(super_value) did_match = matcher.match(sub_value) if did_match is not None: return did_match elif 'DONTCARE' in (sub_value, super_value): continue else: if sub_value != super_value: return SubDictMismatch(k, sub_value, super_value) class FunctionCallMatcher(object): def __init__(self, expected_func_calls): self.expected_func_calls = expected_func_calls self.actual_func_calls = [] def call(self, *args, **kwargs): func_call = {'args': args, 'kwargs': kwargs} self.actual_func_calls.append(func_call) def match(self): dict_list_matcher = DictListMatches(self.expected_func_calls) return dict_list_matcher.match(self.actual_func_calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/util/util.py0000644000175000017500000000175700000000000023030 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from trove.common import cfg from trove.db import get_db_api from trove.db.sqlalchemy import session CONF = cfg.CONF DB_SETUP = None LOCK = threading.Lock() def init_db(): with LOCK: global DB_SETUP if not DB_SETUP: db_api = get_db_api() db_api.db_sync(CONF) session.configure_db(CONF) DB_SETUP = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8041112 trove-12.1.0.dev92/trove/tests/unittests/volume_type/0000755000175000017500000000000000000000000023062 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/volume_type/__init__.py0000644000175000017500000000000000000000000025161 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/volume_type/test_volume_type.py0000644000175000017500000000371000000000000027044 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from trove.common import clients from trove.tests.unittests import trove_testtools from trove.volume_type import models class TestVolumeType(trove_testtools.TestCase): def test_volume_type(self): cinder_volume_type = mock.MagicMock() cinder_volume_type.id = 123 cinder_volume_type.name = 'test_type' cinder_volume_type.is_public = True cinder_volume_type.description = 'Test volume type' volume_type = models.VolumeType(cinder_volume_type) self.assertEqual(cinder_volume_type.id, volume_type.id) self.assertEqual(cinder_volume_type.name, volume_type.name) self.assertEqual(cinder_volume_type.is_public, volume_type.is_public) self.assertEqual(cinder_volume_type.description, volume_type.description) @mock.patch.object(clients, 'create_cinder_client') def test_volume_types(self, mock_client): mock_context = mock.MagicMock() mock_types = [mock.MagicMock(), mock.MagicMock()] mock_client(mock_context).volume_types.list.return_value = mock_types volume_types = models.VolumeTypes(mock_context) for i, volume_type in enumerate(volume_types): self.assertEqual(mock_types[i], volume_type.volume_type, "Volume type {} does not match.".format(i)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/unittests/volume_type/test_volume_type_views.py0000644000175000017500000000436200000000000030265 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from trove.tests.unittests import trove_testtools from trove.volume_type import views class TestVolumeTypeViews(trove_testtools.TestCase): def test_volume_type_view(self): test_id = 'test_id' test_name = 'test_name' test_is_public = True test_description = 'Test description' test_req = mock.MagicMock() volume_type = mock.MagicMock() volume_type.id = test_id volume_type.name = test_name volume_type.is_public = test_is_public volume_type.description = test_description volume_type_view = views.VolumeTypeView(volume_type, req=test_req) data = volume_type_view.data() self.assertEqual(volume_type, volume_type_view.volume_type) self.assertEqual(test_req, volume_type_view.req) self.assertEqual(test_id, data['volume_type']['id']) self.assertEqual(test_name, data['volume_type']['name']) self.assertEqual(test_is_public, data['volume_type']['is_public']) self.assertEqual(test_description, data['volume_type']['description']) self.assertEqual(test_req, volume_type_view.req) @mock.patch.object(views, 'VolumeTypeView') def test_volume_types_view(self, mock_single_view): test_type_1 = mock.MagicMock() test_type_2 = mock.MagicMock() volume_types_view = views.VolumeTypesView([test_type_1, test_type_2]) self.assertEqual( {'volume_types': [ mock_single_view(test_type_1, None).data()['volume_type'], mock_single_view(test_type_2, None).data()['volume_type']]}, volume_types_view.data()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8041112 trove-12.1.0.dev92/trove/tests/util/0000755000175000017500000000000000000000000017425 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/util/__init__.py0000644000175000017500000002446600000000000021552 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`tests` -- Utility methods for tests. =================================== .. automodule:: utils :platform: Unix :synopsis: Tests for Nova. """ import subprocess try: EVENT_AVAILABLE = True except ImportError: EVENT_AVAILABLE = False import glanceclient from keystoneauth1.identity import v3 from keystoneauth1 import session from neutronclient.v2_0 import client as neutron_client from novaclient import client as nova_client from proboscis.asserts import assert_true from proboscis.asserts import Check from proboscis.asserts import fail from proboscis import SkipTest from six.moves.urllib.parse import unquote from sqlalchemy import create_engine from sqlalchemy.sql.expression import text import tenacity from troveclient.compat import Dbaas from trove.common import cfg from trove.common.utils import import_class from trove.common.utils import import_object from trove.tests.config import CONFIG as test_config from trove.tests.util.client import TestClient from trove.tests.util import mysql from trove.tests.util import test_config as CONFIG from trove.tests.util.users import Requirements WHITE_BOX = test_config.white_box FLUSH = text("FLUSH PRIVILEGES;") CONF = cfg.CONF def create_client(*args, **kwargs): """ Using the User Requirements as arguments, finds a user and grabs a new DBAAS client. """ reqs = Requirements(*args, **kwargs) user = test_config.users.find_user(reqs) return create_dbaas_client(user) def create_dbaas_client(user): """Creates a rich client for the Trove API using the test config.""" auth_strategy = None kwargs = { 'service_type': 'database', 'insecure': test_config.values['trove_client_insecure'], } def set_optional(kwargs_name, test_conf_name): value = test_config.values.get(test_conf_name, None) if value is not None: kwargs[kwargs_name] = value force_url = 'override_trove_api_url' in test_config.values service_url = test_config.get('override_trove_api_url', None) if user.requirements.is_admin: service_url = test_config.get('override_admin_trove_api_url', service_url) if service_url: kwargs['service_url'] = service_url auth_strategy = None if user.requirements.is_admin: auth_strategy = test_config.get('admin_auth_strategy', test_config.auth_strategy) else: auth_strategy = test_config.auth_strategy set_optional('region_name', 'trove_client_region_name') if test_config.values.get('override_trove_api_url_append_tenant', False): kwargs['service_url'] += "/" + user.tenant if auth_strategy == 'fake': from troveclient.compat import auth class FakeAuth(auth.Authenticator): def authenticate(self): class FakeCatalog(object): def __init__(self, auth): self.auth = auth def get_public_url(self): return "%s/%s" % (test_config.dbaas_url, self.auth.tenant) def get_token(self): return self.auth.tenant return FakeCatalog(self) auth_strategy = FakeAuth if auth_strategy: kwargs['auth_strategy'] = auth_strategy if not user.requirements.is_admin: auth_url = test_config.trove_auth_url else: auth_url = test_config.values.get('trove_admin_auth_url', test_config.trove_auth_url) if test_config.values.get('trove_client_cls'): cls_name = test_config.trove_client_cls kwargs['client_cls'] = import_class(cls_name) dbaas = Dbaas(user.auth_user, user.auth_key, tenant=user.tenant, auth_url=auth_url, **kwargs) dbaas.authenticate() with Check() as check: check.is_not_none(dbaas.client.auth_token, "Auth token not set!") if not force_url and user.requirements.is_admin: expected_prefix = test_config.dbaas_url actual = dbaas.client.service_url msg = "Dbaas management url was expected to start with %s, but " \ "was %s." % (expected_prefix, actual) check.true(actual.startswith(expected_prefix), msg) return TestClient(dbaas) def create_keystone_session(user): auth = v3.Password(username=user.auth_user, password=user.auth_key, project_id=user.tenant_id, user_domain_name='Default', project_domain_name='Default', auth_url=test_config.auth_url) return session.Session(auth=auth) def create_nova_client(user, service_type=None): if not service_type: service_type = CONF.nova_compute_service_type openstack = nova_client.Client( CONF.nova_client_version, username=user.auth_user, password=user.auth_key, user_domain_name='Default', project_id=user.tenant_id, auth_url=CONFIG.auth_url, service_type=service_type, os_cache=False, cacert=test_config.values.get('cacert', None) ) return TestClient(openstack) def create_neutron_client(user): sess = create_keystone_session(user) client = neutron_client.Client( session=sess, service_type=CONF.neutron_service_type, region_name=CONFIG.trove_client_region_name, insecure=CONF.neutron_api_insecure, endpoint_type=CONF.neutron_endpoint_type ) return TestClient(client) def create_glance_client(user): sess = create_keystone_session(user) glance = glanceclient.Client(CONF.glance_client_version, session=sess) return TestClient(glance) def dns_checker(mgmt_instance): """Given a MGMT instance, ensures DNS provisioning worked. Uses a helper class which, given a mgmt instance (returned by the mgmt API) can confirm that the DNS record provisioned correctly. """ if CONFIG.values.get('trove_dns_checker') is not None: checker = import_class(CONFIG.trove_dns_checker) checker()(mgmt_instance) else: raise SkipTest("Can't access DNS system to check if DNS provisioned.") def process(cmd): output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) return output def string_in_list(str, substr_list): """Returns True if the string appears in the list.""" return any([str.find(x) >= 0 for x in substr_list]) def unquote_user_host(user_hostname): unquoted = unquote(user_hostname) if '@' not in unquoted: return unquoted, '%' if unquoted.endswith('@'): return unquoted, '%' splitup = unquoted.split('@') host = splitup[-1] user = '@'.join(splitup[:-1]) return user, host def iso_time(time_string): """Return a iso formated datetime: 2013-04-15T19:50:23Z.""" ts = time_string.replace(' ', 'T') try: micro = ts.rindex('.') ts = ts[:micro] except ValueError: pass return '%sZ' % ts def assert_contains(exception_message, substrings): for substring in substrings: assert_true(substring in exception_message, message="'%s' not in '%s'" % (substring, exception_message)) # TODO(dukhlov): Still required by trove integration # Should be removed after trove integration fix # https://bugs.launchpad.net/trove-integration/+bug/1228306 # TODO(cp16net): DO NOT USE needs to be removed def mysql_connection(): cls = CONFIG.get('mysql_connection', "local.MySqlConnection") if cls == "local.MySqlConnection": return MySqlConnection() return import_object(cls)() class MySqlConnection(object): def assert_fails(self, ip, user_name, password): try: with mysql.create_mysql_connection(ip, user_name, password): pass fail("Should have failed to connect: mysql --host %s -u %s -p%s" % (ip, user_name, password)) except mysql.MySqlPermissionsFailure: return # Good, this is what we wanted. except mysql.MySqlConnectionFailure as mcf: fail("Expected to see permissions failure. Instead got message:" "%s" % mcf.message) @tenacity.retry( wait=tenacity.wait_fixed(3), stop=tenacity.stop_after_attempt(5), reraise=True ) def create(self, ip, user_name, password): print("Connecting mysql, host: %s, user: %s, password: %s" % (ip, user_name, password)) return mysql.create_mysql_connection(ip, user_name, password) class LocalSqlClient(object): """A sqlalchemy wrapper to manage transactions.""" def __init__(self, engine, use_flush=True): self.engine = engine self.use_flush = use_flush def __enter__(self): self.conn = self.engine.connect() self.trans = self.conn.begin() return self.conn def __exit__(self, type, value, traceback): if self.trans: if type is not None: # An error occurred self.trans.rollback() else: if self.use_flush: self.conn.execute(FLUSH) self.trans.commit() self.conn.close() def execute(self, t, **kwargs): try: return self.conn.execute(t, kwargs) except Exception: self.trans.rollback() self.trans = None raise @staticmethod def init_engine(user, password, host): return create_engine("mysql+pymysql://%s:%s@%s:3306" % (user, password, host), pool_recycle=1800, echo=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/util/check.py0000644000175000017500000001601400000000000021056 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Like asserts, but does not raise an exception until the end of a block.""" import traceback from proboscis.asserts import assert_equal from proboscis.asserts import assert_false from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_true from proboscis.asserts import ASSERTION_ERROR from proboscis.asserts import Check import six def get_stack_trace_of_caller(level_up): """Gets the stack trace at the point of the caller.""" level_up += 1 st = traceback.extract_stack() caller_index = len(st) - level_up if caller_index < 0: caller_index = 0 new_st = st[0:caller_index] return new_st def raise_blame_caller(level_up, ex): """Raises an exception, changing the stack trace to point to the caller.""" new_st = get_stack_trace_of_caller(level_up + 2) six.reraise(type(ex), ex, new_st) class Checker(object): def __init__(self): self.messages = [] self.odd = True self.protected = False def _add_exception(self, _type, value, tb): """Takes an exception, and adds it as a string.""" if self.odd: prefix = "* " else: prefix = "- " start = "Check failure! Traceback:" middle = prefix.join(traceback.format_list(tb)) end = '\n'.join(traceback.format_exception_only(_type, value)) msg = '\n'.join([start, middle, end]) self.messages.append(msg) self.odd = not self.odd def equal(self, *args, **kwargs): self._run_assertion(assert_equal, *args, **kwargs) def false(self, *args, **kwargs): self._run_assertion(assert_false, *args, **kwargs) def not_equal(self, *args, **kwargs): _run_assertion(assert_not_equal, *args, **kwargs) def _run_assertion(self, assert_func, *args, **kwargs): """ Runs an assertion method, but catches any failure and adds it as a string to the messages list. """ if self.protected: try: assert_func(*args, **kwargs) except ASSERTION_ERROR as ae: st = get_stack_trace_of_caller(2) self._add_exception(ASSERTION_ERROR, ae, st) else: assert_func(*args, **kwargs) def __enter__(self): self.protected = True return self def __exit__(self, _type, value, tb): self.protected = False if _type is not None: # An error occurred other than an assertion failure. # Return False to allow the Exception to be raised return False if len(self.messages) != 0: final_message = '\n'.join(self.messages) raise ASSERTION_ERROR(final_message) def true(self, *args, **kwargs): self._run_assertion(assert_true, *args, **kwargs) class AttrCheck(Check): """Class for attr checks, links and other common items.""" def __init__(self): super(AttrCheck, self).__init__() def fail(self, msg): self.true(False, msg) def contains_allowed_attrs(self, list, allowed_attrs, msg=None): # Check these attrs only are returned in create response for attr in list: if attr not in allowed_attrs: self.fail("%s should not contain '%s'" % (msg, attr)) def links(self, links): allowed_attrs = ['href', 'rel'] for link in links: self.contains_allowed_attrs(link, allowed_attrs, msg="Links") class CollectionCheck(Check): """Checks for elements in a dictionary.""" def __init__(self, name, collection): self.name = name self.collection = collection super(CollectionCheck, self).__init__() def element_equals(self, key, expected_value): if key not in self.collection: message = 'Element "%s.%s" does not exist.' % (self.name, key) self.fail(message) else: value = self.collection[key] self.equal(value, expected_value) def has_element(self, key, element_type): if key not in self.collection: message = 'Element "%s.%s" does not exist.' % (self.name, key) self.fail(message) else: value = self.collection[key] match = False if not isinstance(element_type, tuple): type_list = [element_type] else: type_list = element_type for possible_type in type_list: if possible_type is None: if value is None: match = True else: if isinstance(value, possible_type): match = True if not match: self.fail('Element "%s.%s" does not match any of these ' 'expected types: %s' % (self.name, key, type_list)) class TypeCheck(Check): """Checks for attributes in an object.""" def __init__(self, name, instance): self.name = name self.instance = instance super(TypeCheck, self).__init__() def _check_type(value, attribute_type): if not isinstance(value, attribute_type): self.fail("%s attribute %s is of type %s (expected %s)." % (self.name, attribute_name, type(value), attribute_type)) def has_field(self, attribute_name, attribute_type, additional_checks=None): if not hasattr(self.instance, attribute_name): self.fail("%s missing attribute %s." % (self.name, attribute_name)) else: value = getattr(self.instance, attribute_name) match = False if isinstance(attribute_type, tuple): type_list = attribute_type else: type_list = [attribute_type] for possible_type in type_list: if possible_type is None: if value is None: match = True else: if isinstance(value, possible_type): match = True if not match: self.fail("%s attribute %s is of type %s (expected one of " "the following: %s)." % (self.name, attribute_name, type(value), attribute_type)) if match and additional_checks: additional_checks(value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/util/client.py0000644000175000017500000000747500000000000021272 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`tests` -- Utility methods for tests. =================================== .. automodule:: utils :platform: Unix :synopsis: Tests for Nova. """ from proboscis import asserts from trove.tests.config import CONFIG def add_report_event_to(home, name): """Takes a module, class, etc, and an attribute name to decorate.""" func = getattr(home, name) def __cb(*args, **kwargs): # While %s turns a var into a string but in some rare cases explicit # str() is less likely to raise an exception. arg_strs = [repr(arg) for arg in args] arg_strs += ['%s=%s' % (repr(key), repr(value)) for (key, value) in kwargs.items()] CONFIG.get_reporter().log("[RDC] Calling : %s(%s)..." % (name, ','.join(arg_strs))) value = func(*args, **kwargs) CONFIG.get_reporter.log("[RDC] returned %s." % str(value)) return value setattr(home, name, __cb) class TestClient(object): """Decorates the rich clients with some extra methods. These methods are filled with test asserts, meaning if you use this you get the tests for free. """ def __init__(self, real_client): """Accepts a normal client.""" self.real_client = real_client def assert_http_code(self, expected_http_code): resp, body = self.real_client.client.last_response asserts.assert_equal(resp.status, expected_http_code) @property def last_http_code(self): resp, body = self.real_client.client.last_response return resp.status @staticmethod def find_flavor_self_href(flavor): self_links = [link for link in flavor.links if link['rel'] == 'self'] asserts.assert_true(len(self_links) > 0, "Flavor had no self href!") flavor_href = self_links[0]['href'] asserts.assert_false(flavor_href is None, "Flavor link self href missing.") return flavor_href def find_flavors_by(self, condition, flavor_manager=None): flavor_manager = flavor_manager or self.flavors flavors = flavor_manager.list() return [flavor for flavor in flavors if condition(flavor)] def find_flavors_by_name(self, name, flavor_manager=None): return self.find_flavors_by(lambda flavor: flavor.name == name, flavor_manager) def find_flavors_by_ram(self, ram, flavor_manager=None): return self.find_flavors_by(lambda flavor: flavor.ram == ram, flavor_manager) def find_flavor_and_self_href(self, flavor_id, flavor_manager=None): """Given an ID, returns flavor and its self href.""" flavor_manager = flavor_manager or self.flavors asserts.assert_false(flavor_id is None) flavor = flavor_manager.get(flavor_id) asserts.assert_false(flavor is None) flavor_href = self.find_flavor_self_href(flavor) return flavor, flavor_href def __getattr__(self, item): if item == "__setstate__": raise AttributeError(item) if hasattr(self.real_client, item): return getattr(self.real_client, item) raise AttributeError(item) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/util/event_simulator.py0000644000175000017500000002535700000000000023233 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Simulates time itself to make the fake mode tests run even faster. Specifically, this forces all various threads of execution to run one at a time based on when they would have been scheduled using the various eventlet spawn functions. Because only one thing is running at a given time, it eliminates race conditions that would normally be present from testing multi-threaded scenarios. It also means that the simulated time.sleep does not actually have to sit around for the designated time, which greatly speeds up the time it takes to run the tests. Event Simulator Overview ======================== We use this to simulate all the threads of Trove running. i.e. (api,taskmanager,proboscis tests). All the services end up sleeping and having to wait for something to happen at times. Monkey Patching Methods ----------------------- We monkey patch a few methods to make this happen. A few sleep methods with a fake_sleep. * time.sleep * eventlet.sleep * greenthread.sleep A few spawn methods with a fake_spawn * eventlet.spawn_after * eventlet.spawn_n Raise an error if you try this one. * eventlet.spawn Replace the poll_until with a fake_poll_until. Coroutine Object ---------------- There is a Coroutine object here that mimics the behavior of a thread. It takes in a function with args and kwargs and executes it. If at any point that method calls time.sleep(seconds) then the event simulator will put that method on the stack of threads and run the fake_sleep method that will then iterate over all the threads in the stack updating the time they still need to sleep. Then as the threads hit the end of their sleep time period they will continue to execute. fake_threads ------------ One thing to note here is the idea of a stack of threads being kept in fake_threads list. Any new thread created is added to this stack. A fake_thread attributes: fake_thread = { 'sleep': time_from_now_in_seconds, 'greenlet': Coroutine(method_to_execute), 'name': str(func) } 'sleep' is the time it should wait to execute this method. 'greenlet' is the thread object 'name' is the unique name of the thread to track main_loop Method ---------------- The main_loop method is a loop that runs forever waiting on all the threads to complete while running pulse every 0.1 seconds. This is the key to simulated the threads quickly. We are pulsing every 0.1 seconds looking to make sure there are no threads just waiting around for no reason rather than waiting a full second to respond. pulse Method ------------ The pulse method is going through the stack(list) of threads looking for the next thread to execute while updating the 'sleep' time and the if the 'sleep' time is <=0 then it will run this thread until it calls for another time.sleep. If the method/thread running calls time.sleep for what ever reason then the thread's 'sleep' parameter is updated to the new 'next_sleep_time'. If the method/thread running completes without calling time.sleep because it finished all work needed to be done then there the 'next_sleep_time' is set to None and the method/thread is deleted from the stack(list) of threads. """ import eventlet from eventlet.event import Event from eventlet.semaphore import Semaphore from eventlet import spawn as true_spawn class Coroutine(object): """ This class simulates a coroutine, which is ironic, as greenlet actually *is* a coroutine. But trying to use greenlet here gives nasty results since eventlet thoroughly monkey-patches things, making it difficult to run greenlet on its own. Essentially think of this as a wrapper for eventlet's threads which has a run and sleep function similar to old school coroutines, meaning it won't start until told and when asked to sleep it won't wake back up without permission. """ ALL = [] def __init__(self, func, *args, **kwargs): self.my_sem = Semaphore(0) # This is held by the thread as it runs. self.caller_sem = None self.dead = False started = Event() self.id = 5 self.ALL.append(self) def go(): self.id = eventlet.corolocal.get_ident() started.send(True) self.my_sem.acquire(blocking=True, timeout=None) try: func(*args, **kwargs) # except Exception as e: # print("Exception in coroutine! %s" % e) finally: self.dead = True self.caller_sem.release() # Relinquish control back to caller. for i in range(len(self.ALL)): if self.ALL[i].id == self.id: del self.ALL[i] break true_spawn(go) started.wait() @classmethod def get_current(cls): """Finds the coroutine associated with the thread which calls it.""" return cls.get_by_id(eventlet.corolocal.get_ident()) @classmethod def get_by_id(cls, id): for cr in cls.ALL: if cr.id == id: return cr raise RuntimeError("Coroutine with id %s not found!" % id) def sleep(self): """Puts the coroutine to sleep until run is called again. This should only be called by the thread which owns this object. """ # Only call this from its own thread. assert eventlet.corolocal.get_ident() == self.id self.caller_sem.release() # Relinquish control back to caller. self.my_sem.acquire(blocking=True, timeout=None) def run(self): """Starts up the thread. Should be called from a different thread.""" # Don't call this from the thread which it represents. assert eventlet.corolocal.get_ident() != self.id self.caller_sem = Semaphore(0) self.my_sem.release() self.caller_sem.acquire() # Wait for it to finish. # Main global thread to run. main_greenlet = None # Stack of threads currently running or sleeping fake_threads = [] # Allow a sleep method to be called at least this number of times before # raising an error that there are not other active threads waiting to run. allowable_empty_sleeps = 1 sleep_allowance = allowable_empty_sleeps def other_threads_are_active(): """Returns True if concurrent activity is being simulated. Specifically, this means there is a fake thread in action other than the "pulse" thread and the main test thread. """ return len(fake_threads) >= 2 def fake_sleep(time_to_sleep): """Simulates sleep. Puts the coroutine which calls it to sleep. If a coroutine object is not associated with the caller this will fail. """ if time_to_sleep: global sleep_allowance sleep_allowance -= 1 if not other_threads_are_active(): if sleep_allowance < -1: raise RuntimeError("Sleeping for no reason.") else: return # Forgive the thread for calling this for one time. sleep_allowance = allowable_empty_sleeps cr = Coroutine.get_current() for ft in fake_threads: if ft['greenlet'].id == cr.id: ft['next_sleep_time'] = time_to_sleep cr.sleep() def fake_poll_until(retriever, condition=lambda value: value, sleep_time=1, time_out=0): """Fakes out poll until.""" from trove.common import exception slept_time = 0 while True: resource = retriever() if condition(resource): return resource fake_sleep(sleep_time) slept_time += sleep_time if time_out and slept_time >= time_out: raise exception.PollTimeOut() def run_main(func): """Runs the given function as the initial thread of the event simulator.""" global main_greenlet main_greenlet = Coroutine(main_loop) fake_spawn(0, func) main_greenlet.run() def main_loop(): """The coroutine responsible for calling each "fake thread." The Coroutine which calls this is the only one that won't end up being associated with the fake_threads list. The reason is this loop needs to wait on whatever thread is running, meaning it has to be a Coroutine as well. """ while len(fake_threads) > 0: pulse(0.1) def fake_spawn_n(func, *args, **kw): fake_spawn(0, func, *args, **kw) def fake_spawn(time_from_now_in_seconds, func, *args, **kw): """Fakes eventlet's spawn function by adding a fake thread.""" def thread_start(): # fake_sleep(time_from_now_in_seconds) return func(*args, **kw) cr = Coroutine(thread_start) fake_threads.append({'sleep': time_from_now_in_seconds, 'greenlet': cr, 'name': str(func)}) def pulse(seconds): """ Runs the event simulator for the amount of simulated time denoted by "seconds". """ index = 0 while index < len(fake_threads): t = fake_threads[index] t['sleep'] -= seconds if t['sleep'] <= 0: t['sleep'] = 0 t['next_sleep_time'] = None t['greenlet'].run() sleep_time = t['next_sleep_time'] if sleep_time is None or isinstance(sleep_time, tuple): del fake_threads[index] index -= 1 else: t['sleep'] = sleep_time index += 1 def wait_until_all_activity_stops(): """In fake mode, wait for all simulated events to chill out. This can be useful in situations where you need simulated activity (such as calls running in TaskManager) to "bleed out" and finish before running another test. """ if main_greenlet is None: return while other_threads_are_active(): fake_sleep(1) def monkey_patch(): """ Changes global functions such as time.sleep, eventlet.spawn* and others to their event_simulator equivalents. """ import time time.sleep = fake_sleep import eventlet from eventlet import greenthread eventlet.sleep = fake_sleep greenthread.sleep = fake_sleep eventlet.spawn_after = fake_spawn def raise_error(): raise RuntimeError("Illegal operation!") eventlet.spawn_n = fake_spawn_n eventlet.spawn = raise_error from trove.common import utils utils.poll_until = fake_poll_until ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541818.0 trove-12.1.0.dev92/trove/tests/util/mysql.py0000644000175000017500000001403200000000000021144 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Rackspace Hosting # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re from oslo_db.sqlalchemy import engines import pexpect from sqlalchemy.exc import OperationalError try: from sqlalchemy.exc import ResourceClosedError except ImportError: ResourceClosedError = Exception from trove import tests from trove.tests.config import CONFIG def create_mysql_connection(host, user, password): connection = CONFIG.mysql_connection_method if connection['type'] == "direct": return SqlAlchemyConnection(host, user, password) elif connection['type'] == "tunnel": if 'ssh' not in connection: raise RuntimeError("If connection type is 'tunnel' then a " "property 'ssh' is expected.") return PexpectMySqlConnection(connection['ssh'], host, user, password) else: raise RuntimeError("Unknown Bad test configuration for " "mysql_connection_method") class MySqlConnectionFailure(RuntimeError): def __init__(self, msg): super(MySqlConnectionFailure, self).__init__(msg) class MySqlPermissionsFailure(RuntimeError): def __init__(self, msg): super(MySqlPermissionsFailure, self).__init__(msg) class SqlAlchemyConnection(object): def __init__(self, host, user, password): self.host = host self.user = user self.password = password try: self.engine = self._init_engine(user, password, host) except OperationalError as oe: if self._exception_is_permissions_issue(str(oe)): raise MySqlPermissionsFailure(oe) else: raise MySqlConnectionFailure(oe) @staticmethod def _exception_is_permissions_issue(msg): """Assert message cited a permissions issue and not something else.""" pos_error = re.compile(r".*Host '[\w\.]*' is not allowed to connect " "to this MySQL server.*") pos_error1 = re.compile(".*Access denied for user " r"'[\w\*\!\@\#\^\&]*'@'[\w\.]*'.*") if (pos_error.match(msg) or pos_error1.match(msg)): return True def __enter__(self): try: self.conn = self.engine.connect() except OperationalError as oe: if self._exception_is_permissions_issue(str(oe)): raise MySqlPermissionsFailure(oe) else: raise MySqlConnectionFailure(oe) self.trans = self.conn.begin() return self def execute(self, cmd): """Execute some code.""" cmd = cmd.replace("%", "%%") try: return self.conn.execute(cmd).fetchall() except Exception: self.trans.rollback() self.trans = None try: raise except ResourceClosedError: return [] def __exit__(self, type, value, traceback): if self.trans: if type is not None: # An error occurred self.trans.rollback() else: self.trans.commit() self.conn.close() @staticmethod def _init_engine(user, password, host): return engines.create_engine( "mysql+pymysql://%s:%s@%s:3306" % (user, password, host)) class PexpectMySqlConnection(object): TIME_OUT = 30 def __init__(self, ssh_args, host, user, password): self.host = host self.user = user self.password = password cmd = '%s %s' % (tests.SSH_CMD, ssh_args) self.proc = pexpect.spawn(cmd) print(cmd) self.proc.expect(r":~\$", timeout=self.TIME_OUT) cmd2 = "mysql --host '%s' -u '%s' '-p%s'\n" % \ (self.host, self.user, self.password) print(cmd2) self.proc.send(cmd2) result = self.proc.expect([ 'mysql>', 'Access denied', "Can't connect to MySQL server"], timeout=self.TIME_OUT) if result == 1: raise MySqlPermissionsFailure(self.proc.before) elif result == 2: raise MySqlConnectionFailure(self.proc.before) def __enter__(self): return self def __exit__(self, type, value, traceback): self.proc.close() def execute(self, cmd): self.proc.send(cmd + "\\G\n") outcome = self.proc.expect(['Empty set', 'mysql>'], timeout=self.TIME_OUT) if outcome == 0: return [] else: # This next line might be invaluable for long test runs. print("Interpreting output: %s" % self.proc.before) lines = self.proc.before.split("\r\n") result = [] row = None for line in lines: plural_s = "s" if len(result) != 0 else "" end_line = "%d row%s in set" % ((len(result) + 1), plural_s) if len(result) == 0: end_line = "1 row in set" if (line.startswith("***************************") or line.startswith(end_line)): if row is not None: result.append(row) row = {} elif row is not None: colon = line.find(": ") field = line[:colon] value = line[colon + 2:] row[field] = value return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/util/server_connection.py0000644000175000017500000000614300000000000023530 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from proboscis.asserts import fail import tenacity from trove import tests from trove.tests.config import CONFIG from trove.tests import util from trove.tests.util.users import Requirements def create_server_connection(instance_id, ip_address=None): if util.test_config.use_local_ovz: return OpenVZServerConnection(instance_id) return ServerSSHConnection(instance_id, ip_address=ip_address) class ServerSSHConnection(object): def __init__(self, instance_id, ip_address=None): if not ip_address: req_admin = Requirements(is_admin=True) user = util.test_config.users.find_user(req_admin) dbaas_admin = util.create_dbaas_client(user) instance = dbaas_admin.management.show(instance_id) mgmt_interfaces = instance.server["addresses"].get( CONFIG.trove_mgmt_network, [] ) mgmt_addresses = [str(inf["addr"]) for inf in mgmt_interfaces if inf["version"] == 4] if len(mgmt_addresses) == 0: fail("No IPV4 ip found for management network.") else: self.ip_address = mgmt_addresses[0] else: self.ip_address = ip_address TROVE_TEST_SSH_USER = os.environ.get('TROVE_TEST_SSH_USER') if TROVE_TEST_SSH_USER and '@' not in self.ip_address: self.ip_address = TROVE_TEST_SSH_USER + '@' + self.ip_address @tenacity.retry( wait=tenacity.wait_fixed(5), stop=tenacity.stop_after_attempt(3), retry=tenacity.retry_if_exception_type(subprocess.CalledProcessError) ) def execute(self, cmd): exe_cmd = "%s %s %s" % (tests.SSH_CMD, self.ip_address, cmd) print("RUNNING COMMAND: %s" % exe_cmd) output = util.process(exe_cmd) print("OUTPUT: %s" % output) return output class OpenVZServerConnection(object): def __init__(self, instance_id): self.instance_id = instance_id req_admin = Requirements(is_admin=True) self.user = util.test_config.users.find_user(req_admin) self.dbaas_admin = util.create_dbaas_client(self.user) self.instance = self.dbaas_admin.management.show(self.instance_id) self.instance_local_id = self.instance.server["local_id"] def execute(self, cmd): exe_cmd = "sudo vzctl exec %s %s" % (self.instance_local_id, cmd) print("RUNNING COMMAND: %s" % exe_cmd) return util.process(exe_cmd) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/util/usage.py0000644000175000017500000000564400000000000021114 0ustar00coreycorey00000000000000# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict from oslo_log import log as logging import proboscis.asserts as asserts from proboscis.dependencies import SkipTest from trove.common import utils from trove.tests.config import CONFIG LOG = logging.getLogger(__name__) MESSAGE_QUEUE = defaultdict(list) def create_usage_verifier(): return utils.import_object(CONFIG.usage_endpoint) class UsageVerifier(object): def clear_events(self): """Hook that is called to allow endpoints to clean up.""" pass def check_message(self, resource_id, event_type, **attrs): messages = utils.poll_until(lambda: self.get_messages(resource_id), lambda x: len(x) > 0, time_out=30) found = None for message in messages: if message['event_type'] == event_type: found = message asserts.assert_is_not_none(found, "No message type %s for resource %s" % (event_type, resource_id)) with asserts.Check() as check: for key, value in attrs.items(): check.equal(found[key], value) def get_messages(self, resource_id, expected_messages=None): global MESSAGE_QUEUE msgs = MESSAGE_QUEUE.get(resource_id, []) if expected_messages is not None: asserts.assert_equal(len(msgs), expected_messages) return msgs class FakeVerifier(object): """This is the default handler in fake mode, it is basically a no-op.""" def clear_events(self): pass def check_message(self, *args, **kwargs): raise SkipTest("Notifications not available") def get_messages(self, *args, **kwargs): pass def notify(event_type, payload): """Simple test notify function which saves the messages to global list.""" payload['event_type'] = event_type if 'instance_id' in payload and 'server_type' not in payload: LOG.debug('Received Usage Notification: %s', event_type) resource_id = payload['instance_id'] global MESSAGE_QUEUE MESSAGE_QUEUE[resource_id].append(payload) LOG.debug('Message Queue for %(id)s now has %(msg_count)d messages', {'id': resource_id, 'msg_count': len(MESSAGE_QUEUE[resource_id])}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/util/users.py0000644000175000017500000001174000000000000021143 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Information on users / identities we can hit the services on behalf of. This code allows tests to grab from a set of users based on the features they possess instead of specifying exact identities in the test code. """ class Requirements(object): """Defines requirements a test has of a user.""" def __init__(self, is_admin=None, services=None): self.is_admin = is_admin self.services = services or ["trove"] # Make sure they're all the same kind of string. self.services = [str(service) for service in self.services] def satisfies(self, reqs): """True if these requirements conform to the given requirements.""" if reqs.is_admin is not None: # Only check if it was specified. if reqs.is_admin != self.is_admin: return False for service in reqs.services: if service not in self.services: return False return True def __str__(self): return "is_admin=%s, services=%s" % (self.is_admin, self.services) class ServiceUser(object): """Represents a user who uses a service. Importantly, this represents general information, such that a test can be written to state the general information about a user it needs (for example, if the user is an admin or not) rather than explicitly list users. """ def __init__(self, auth_user=None, auth_key=None, services=None, tenant=None, tenant_id=None, requirements=None): """Creates info on a user.""" self.auth_user = auth_user self.auth_key = auth_key self.tenant = tenant self.tenant_id = tenant_id self.requirements = requirements self.test_count = 0 if self.requirements.is_admin is None: raise ValueError("'is_admin' must be specified for a user.") def __str__(self): return ("{ user_name=%s, tenant=%s, tenant_id=%s, reqs=%s, tests=%d }" % (self.auth_user, self.tenant, self.tenant_id, self.requirements, self.test_count)) class Users(object): """Collection of users with methods to find them via requirements.""" def __init__(self, user_list): self.users = [] for user_dict in user_list: reqs = Requirements(**user_dict["requirements"]) user = ServiceUser(auth_user=user_dict["auth_user"], auth_key=user_dict["auth_key"], tenant=user_dict["tenant"], tenant_id=user_dict.get("tenant_id", None), requirements=reqs) self.users.append(user) def find_all_users_who_satisfy(self, requirements, black_list=None): """Returns a list of all users who satisfy the given requirements.""" black_list = black_list or [] return (user for user in self.users if user.auth_user not in black_list and user.requirements.satisfies(requirements)) def find_user(self, requirements, black_list=None): """Finds a user who meets the requirements and has been used least.""" users = self.find_all_users_who_satisfy(requirements, black_list) try: user = min(users, key=lambda user: user.test_count) except ValueError: # Raised when "users" is empty. raise RuntimeError("The test configuration data lacks a user " "who meets these requirements: %s" % requirements) user.test_count += 1 return user def _find_user_by_condition(self, condition): users = (user for user in self.users if condition(user)) try: user = min(users, key=lambda user: user.test_count) except ValueError: raise RuntimeError('Did not find a user with specified condition') user.test_count += 1 return user def find_user_by_name(self, name): """Finds a user who meets the requirements and has been used least.""" condition = lambda user: user.auth_user == name return self._find_user_by_condition(condition) def find_user_by_tenant_id(self, tenant_id): """Finds a user who meets the requirements and has been used least.""" condition = lambda user: user.tenant_id == tenant_id return self._find_user_by_condition(condition) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/tests/util/utils.py0000644000175000017500000000373300000000000021145 0ustar00coreycorey00000000000000# Copyright 2016 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from functools import wraps from oslo_log import log as logging LOG = logging.getLogger(__name__) def retry(expected_exception_cls, retries=3, delay_fun=lambda n: 3 * n): """Retry decorator. Executes the decorated function N times with a variable timeout on a given exception(s). :param expected_exception_cls: Handled exception classes. :type expected_exception_cls: class or tuple of classes :param delay_fun: The time delay in sec as a function of the number of attempts (n) already executed. :type delay_fun: callable """ def retry_deco(f): @wraps(f) def wrapper(*args, **kwargs): remaining_attempts = retries while remaining_attempts > 1: try: return f(*args, **kwargs) except expected_exception_cls: remaining_attempts -= 1 delay = delay_fun(retries - remaining_attempts) LOG.exception( "Retrying in %(delay)d seconds " "(remaining attempts: %(remaining)d)..." % {'delay': delay, 'remaining': remaining_attempts}) time.sleep(delay) return f(*args, **kwargs) return wrapper return retry_deco ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/version.py0000644000175000017500000000130000000000000017337 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('trove') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/versions.py0000644000175000017500000000613400000000000017534 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import routes from trove.common import cfg from trove.common import wsgi CONF = cfg.CONF VERSIONS = { "1.0": { "id": "v1.0", "status": "CURRENT", "updated": "2012-08-01T00:00:00Z", "links": [], }, } class VersionsController(wsgi.Controller): def index(self, request): """Respond to a request for API versions.""" versions = [] for key, data in VERSIONS.items(): v = BaseVersion( data["id"], data["status"], request.application_url, data["updated"]) versions.append(v) return wsgi.Result(VersionsDataView(versions)) def show(self, request): """Respond to a request for a specific API version.""" data = VERSIONS[request.url_version] v = Version(data["id"], data["status"], request.application_url, data["updated"]) return wsgi.Result(VersionDataView(v)) class BaseVersion(object): def __init__(self, id, status, base_url, updated): self.id = id self.status = status self.base_url = CONF.public_endpoint or base_url self.updated = updated def data(self): return { "id": self.id, "status": self.status, "updated": self.updated, "links": [{"rel": "self", "href": self.url()}], } def url(self): url = os.path.join(self.base_url, self.id) if not url.endswith("/"): return url + "/" return url class Version(BaseVersion): def url(self): if not self.base_url.endswith("/"): return self.base_url + "/" return self.base_url class VersionDataView(object): def __init__(self, version): self.version = version def data_for_json(self): return {'version': self.version.data()} class VersionsDataView(object): def __init__(self, versions): self.versions = versions def data_for_json(self): return {'versions': [version.data() for version in self.versions]} class VersionsAPI(wsgi.Router): def __init__(self): mapper = routes.Mapper() versions_resource = VersionsController().create_resource() mapper.connect("/", controller=versions_resource, action="index") super(VersionsAPI, self).__init__(mapper) def app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return VersionsAPI() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.8041112 trove-12.1.0.dev92/trove/volume_type/0000755000175000017500000000000000000000000017656 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/volume_type/__init__.py0000644000175000017500000000000000000000000021755 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/volume_type/models.py0000644000175000017500000000462000000000000021515 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes that form the core of volume-support functionality""" from cinderclient import exceptions as cinder_exception from trove.common import clients from trove.common import exception as trove_exception from trove.common import models class VolumeType(object): _data_fields = ['id', 'name', 'is_public', 'description'] def __init__(self, volume_type=None): """Initialize a cinder client volume_type object""" self.volume_type = volume_type @classmethod def load(cls, volume_type_id, context=None, client=None): if not(client or context): raise trove_exception.InvalidModelError( "client or context must be provided to load a volume_type") if not client: client = clients.create_cinder_client(context) try: volume_type = client.volume_types.get(volume_type_id) except cinder_exception.NotFound: raise trove_exception.NotFound(uuid=volume_type_id) except cinder_exception.ClientException as ce: raise trove_exception.TroveError(str(ce)) return cls(volume_type) @property def id(self): return self.volume_type.id @property def name(self): return self.volume_type.name @property def is_public(self): return self.volume_type.is_public @property def description(self): return self.volume_type.description class VolumeTypes(models.CinderRemoteModelBase): def __init__(self, context): volume_types = clients.create_cinder_client( context).volume_types.list() self.volume_types = [VolumeType(volume_type=item) for item in volume_types] def __iter__(self): for item in self.volume_types: yield item ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/volume_type/service.py0000644000175000017500000000263500000000000021676 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import wsgi from trove.volume_type import models from trove.volume_type import views class VolumeTypesController(wsgi.Controller): """A controller for the Cinder Volume Types functionality.""" def show(self, req, tenant_id, id): """Return a single volume type.""" context = req.environ[wsgi.CONTEXT_KEY] volume_type = models.VolumeType.load(id, context=context) return wsgi.Result(views.VolumeTypeView(volume_type, req).data(), 200) def index(self, req, tenant_id): """Return all volume types.""" context = req.environ[wsgi.CONTEXT_KEY] volume_types = models.VolumeTypes(context=context) return wsgi.Result(views.VolumeTypesView(volume_types, req).data(), 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701857.0 trove-12.1.0.dev92/trove/volume_type/views.py0000644000175000017500000000266200000000000021373 0ustar00coreycorey00000000000000# Copyright 2016 Tesora, Inc # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class VolumeTypeView(object): def __init__(self, volume_type, req=None): self.volume_type = volume_type self.req = req def data(self): volume_type = { 'id': self.volume_type.id, 'name': self.volume_type.name, 'is_public': self.volume_type.is_public, 'description': self.volume_type.description } return {"volume_type": volume_type} class VolumeTypesView(object): def __init__(self, volume_types, req=None): self.volume_types = volume_types self.req = req def data(self): data = [] for volume_type in self.volume_types: data.append(VolumeTypeView(volume_type, req=self.req).data()['volume_type']) return {"volume_types": data} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586541819.7281098 trove-12.1.0.dev92/trove.egg-info/0000755000175000017500000000000000000000000017000 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/trove.egg-info/PKG-INFO0000644000175000017500000000551700000000000020105 0ustar00coreycorey00000000000000Metadata-Version: 1.1 Name: trove Version: 12.1.0.dev92 Summary: OpenStack DBaaS Home-page: https://docs.openstack.org/trove/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ===== Trove ===== .. image:: https://governance.openstack.org/tc/badges/trove.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Trove is Database as a Service for OpenStack. Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://opendev.org/openstack/trove For information on how to contribute to trove, please see CONTRIBUTING.rst_ and HACKING.rst_ .. _CONTRIBUTING.rst: https://opendev.org/openstack/trove/src/branch/master/CONTRIBUTING.rst .. _HACKING.rst: https://opendev.org/openstack/trove/src/branch/master/HACKING.rst * `Wiki `_ * `Developer Docs `_ You can raise bugs here: `Bug Tracker `_ The plan for trove can be found at `Trove Specs `_ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/trove Python client ------------- Python-troveclient_ is a client for Trove. .. _Python-troveclient: https://opendev.org/openstack/python-troveclient Dashboard plugin ---------------- Trove-dashboard_ is OpenStack dashbaord plugin for Trove. .. _Trove-dashboard: https://opendev.org/openstack/trove-dashboard References ---------- * `Installation docs`_ * `Manual installation docs`_ * `Build guest image`_ .. _Installation docs: https://docs.openstack.org/trove/latest/install/install.html .. _Manual installation docs: https://docs.openstack.org/trove/latest/install/manual_install.html .. _Build guest image: https://docs.openstack.org/trove/latest/admin/building_guest_images.html Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/trove.egg-info/SOURCES.txt0000644000175000017500000015653100000000000020677 0ustar00coreycorey00000000000000.coveragerc .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst babel.cfg bindep.txt generate_examples.py lower-constraints.txt pylintrc requirements.txt run_tests.py setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/api-versions.inc api-ref/source/backups.inc api-ref/source/conf.py api-ref/source/configurations.inc api-ref/source/databases.inc api-ref/source/datastore-versions.inc api-ref/source/datastores.inc api-ref/source/index.rst api-ref/source/instance-actions.inc api-ref/source/instance-logs.inc api-ref/source/instances.inc api-ref/source/parameters.yaml api-ref/source/quotas.inc api-ref/source/users.inc api-ref/source/samples/backup-create-request.json api-ref/source/samples/backup-create-response.json api-ref/source/samples/backup-get-response.json api-ref/source/samples/backup-list-response.json api-ref/source/samples/config-group-create-request.json api-ref/source/samples/config-group-create-response.json api-ref/source/samples/config-group-list-instances-response.json api-ref/source/samples/config-group-patch-request.json api-ref/source/samples/config-group-put-request.json api-ref/source/samples/config-group-show-response.json api-ref/source/samples/config-groups-list-response.json api-ref/source/samples/databases-create-request.json api-ref/source/samples/databases-list-response.json api-ref/source/samples/datastore-list-response.json api-ref/source/samples/datastore-show-response.json api-ref/source/samples/datastore-version-create-request.json api-ref/source/samples/datastore-version-list-response.json api-ref/source/samples/datastore-version-mgmt-list-response.json api-ref/source/samples/datastore-version-mgmt-patch-request.json api-ref/source/samples/datastore-version-mgmt-show-response.json api-ref/source/samples/datastore-version-parameter-create-request.json api-ref/source/samples/datastore-version-parameter-create-response.json api-ref/source/samples/datastore-version-parameter-list-response.json api-ref/source/samples/datastore-version-parameter-show-response.json api-ref/source/samples/datastore-version-parameter-update-request.json api-ref/source/samples/datastore-version-parameter-update-response.json api-ref/source/samples/datastore-version-show-response.json api-ref/source/samples/instance-action-eject-replica-request.json api-ref/source/samples/instance-action-promote-replica-request.json api-ref/source/samples/instance-action-reset-status-request.json api-ref/source/samples/instance-action-resize-request.json api-ref/source/samples/instance-action-resize-volume-request.json api-ref/source/samples/instance-action-restart-request.json api-ref/source/samples/instance-backup-list-response.json api-ref/source/samples/instance-configuration-list-response.json api-ref/source/samples/instance-create-request.json api-ref/source/samples/instance-create-response.json api-ref/source/samples/instance-list-detail-response.json api-ref/source/samples/instance-list-response.json api-ref/source/samples/instance-log-disable-request.json api-ref/source/samples/instance-log-disable-response.json api-ref/source/samples/instance-log-discard-request.json api-ref/source/samples/instance-log-discard-response.json api-ref/source/samples/instance-log-enable-request.json api-ref/source/samples/instance-log-enable-response.json api-ref/source/samples/instance-log-list-response.json api-ref/source/samples/instance-log-publish-request.json api-ref/source/samples/instance-log-publish-response.json api-ref/source/samples/instance-log-show-request.json api-ref/source/samples/instance-log-show-response.json api-ref/source/samples/instance-mgmt-action-migrate-request.json api-ref/source/samples/instance-mgmt-action-reboot-request.json api-ref/source/samples/instance-mgmt-action-reset-task-status-request.json api-ref/source/samples/instance-mgmt-action-stop-request.json api-ref/source/samples/instance-mgmt-list-response.json api-ref/source/samples/instance-mgmt-show-response.json api-ref/source/samples/instance-patch-detach-replica-request.json api-ref/source/samples/instance-patch-update-name-request.json api-ref/source/samples/instance-patch-upgrade-datastore-version-request.json api-ref/source/samples/instance-put-attach-config-group-request.json api-ref/source/samples/instance-show-response.json api-ref/source/samples/limit-show-response.json api-ref/source/samples/quota-show-response.json api-ref/source/samples/quota-update.json api-ref/source/samples/user-check-root-response.json api-ref/source/samples/user-create-request.json api-ref/source/samples/user-enable-root-response.json api-ref/source/samples/user-grant-databases-access-request.json api-ref/source/samples/user-list-response.json api-ref/source/samples/user-put-request.json api-ref/source/samples/user-show-response.json api-ref/source/samples/user-show-root-history-response.json api-ref/source/samples/users-put-request.json api-ref/source/samples/versions-response.json contrib/trove-guestagent devstack/README.rst devstack/plugin.sh devstack/settings devstack/files/apache-trove-api.template devstack/files/debs/trove devstack/files/rpms/trove devstack/files/rpms-suse/trove doc/source/conf.py doc/source/index.rst doc/source/admin/building_guest_images.rst doc/source/admin/database_module_usage.rst doc/source/admin/datastore.rst doc/source/admin/index.rst doc/source/admin/run_trove_in_production.rst doc/source/admin/secure_oslo_messaging.rst doc/source/cli/index.rst doc/source/cli/trove-manage.rst doc/source/cli/trove-status.rst doc/source/contributor/contributing.rst doc/source/contributor/design.rst doc/source/contributor/index.rst doc/source/contributor/testing.rst doc/source/install/apache-mod-wsgi.rst doc/source/install/common_configure.txt doc/source/install/common_prerequisites.txt doc/source/install/dashboard.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-devstack.rst doc/source/install/install-manual.rst doc/source/install/install-redhat.rst doc/source/install/install-suse.rst doc/source/install/install-ubuntu.rst doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/reference/index.rst doc/source/reference/notifier.rst doc/source/reference/trove_api_extensions.rst doc/source/user/backup-db-incremental.rst doc/source/user/backup-db.rst doc/source/user/create-db.rst doc/source/user/index.rst doc/source/user/instance-status.rst doc/source/user/manage-db-and-users.rst doc/source/user/manage-db-config.rst doc/source/user/set-up-clustering.rst doc/source/user/set-up-replication.rst doc/source/user/upgrade-cluster-datastore.rst doc/source/user/upgrade-datastore.rst etc/apache2/trove etc/tests/core.test.conf etc/tests/localhost.test.conf etc/trove/README-policy.generated.md etc/trove/api-paste.ini etc/trove/api-paste.ini.test etc/trove/trove-guestagent.conf.sample etc/trove/trove-logging-guestagent.conf etc/trove/trove-workbook.yaml etc/trove/trove.conf.sample etc/trove/trove.conf.test etc/trove/cloudinit/README etc/trove/conf.d/README etc/trove/conf.d/guest_info.conf integration/README.md integration/scripts/conf.json.example integration/scripts/create_vm integration/scripts/functions integration/scripts/functions_qemu integration/scripts/image-projects-list integration/scripts/local.conf.rc integration/scripts/localrc.rc integration/scripts/projects-list integration/scripts/reviews.rc integration/scripts/trovestack integration/scripts/trovestack.rc integration/scripts/conf/cassandra.conf integration/scripts/conf/couchbase.conf integration/scripts/conf/couchdb.conf integration/scripts/conf/db2.conf integration/scripts/conf/mariadb.conf integration/scripts/conf/mongodb.conf integration/scripts/conf/mysql.conf integration/scripts/conf/percona.conf integration/scripts/conf/postgresql.conf integration/scripts/conf/pxc.conf integration/scripts/conf/redis.conf integration/scripts/conf/test_begin.conf integration/scripts/conf/test_end.conf integration/scripts/conf/vertica.conf integration/scripts/files/trove-guest.systemd.conf integration/scripts/files/trove-guest.upstart.conf integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/15-trove-dep integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/20-guest-systemd integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/62-ssh-key integration/scripts/files/deprecated-elements/fedora-guest/install.d/15-trove-dep integration/scripts/files/deprecated-elements/fedora-guest/install.d/20-etc integration/scripts/files/deprecated-elements/fedora-guest/install.d/21-use-fedora-certificates integration/scripts/files/deprecated-elements/fedora-guest/install.d/50-user integration/scripts/files/deprecated-elements/fedora-guest/install.d/62-ssh-key integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/05-ipforwarding integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/62-trove-guest-sudoers integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/90-yum-update integration/scripts/files/deprecated-elements/fedora-mariadb/README.md integration/scripts/files/deprecated-elements/fedora-mariadb/install.d/10-mariadb integration/scripts/files/deprecated-elements/fedora-mariadb/pre-install.d/10-percona-copr integration/scripts/files/deprecated-elements/fedora-mongodb/README.md integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/10-mongodb integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/25-trove-mongo-dep integration/scripts/files/deprecated-elements/fedora-mysql/README.md integration/scripts/files/deprecated-elements/fedora-mysql/install.d/10-mysql integration/scripts/files/deprecated-elements/fedora-mysql/install.d/40-xtrabackup integration/scripts/files/deprecated-elements/fedora-mysql/post-install.d/30-register-mysql-service integration/scripts/files/deprecated-elements/fedora-percona/install.d/05-percona-server integration/scripts/files/deprecated-elements/fedora-percona/install.d/10-mysql integration/scripts/files/deprecated-elements/fedora-postgresql/install.d/10-postgresql integration/scripts/files/deprecated-elements/fedora-redis/README.md integration/scripts/files/deprecated-elements/fedora-redis/install.d/10-redis integration/scripts/files/deprecated-elements/ubuntu-cassandra/install.d/10-cassandra integration/scripts/files/deprecated-elements/ubuntu-couchbase/install.d/10-couchbase integration/scripts/files/deprecated-elements/ubuntu-couchdb/install.d/10-couchdb integration/scripts/files/deprecated-elements/ubuntu-db2/README.md integration/scripts/files/deprecated-elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs integration/scripts/files/deprecated-elements/ubuntu-db2/install.d/10-db2 integration/scripts/files/deprecated-elements/ubuntu-mongodb/README.md integration/scripts/files/deprecated-elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key integration/scripts/files/deprecated-elements/ubuntu-percona/install.d/30-mysql integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/10-percona-apt-key integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local integration/scripts/files/deprecated-elements/ubuntu-pxc/install.d/30-mysql integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/10-percona-apt-key integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local integration/scripts/files/deprecated-elements/ubuntu-redis/README.md integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/30-redis integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env integration/scripts/files/deprecated-elements/ubuntu-vertica/README.md integration/scripts/files/deprecated-elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb integration/scripts/files/deprecated-elements/ubuntu-vertica/install.d/97-vertica integration/scripts/files/deprecated-elements/ubuntu-xenial-cassandra/element-deps integration/scripts/files/deprecated-elements/ubuntu-xenial-couchbase/element-deps integration/scripts/files/deprecated-elements/ubuntu-xenial-couchdb/element-deps integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/element-deps integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/20-mongodb integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/35-check-numa integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/element-deps integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/element-deps integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/element-deps integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/install.d/31-fix-init-file integration/scripts/files/elements/apt-conf-dir/README.rst integration/scripts/files/elements/apt-conf-dir/extra-data.d/99-use-host-apt-confd integration/scripts/files/elements/guest-agent/README.rst integration/scripts/files/elements/guest-agent/element-deps integration/scripts/files/elements/guest-agent/package-installs.yaml integration/scripts/files/elements/guest-agent/pkg-map integration/scripts/files/elements/guest-agent/source-repository-guest-agent integration/scripts/files/elements/guest-agent/svc-map integration/scripts/files/elements/guest-agent/environment.d/99-reliable-apt-key-importing.bash integration/scripts/files/elements/guest-agent/install.d/50-user integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/75-guest-agent-install integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.conf integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.init integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.logrotate integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.service integration/scripts/files/elements/guest-agent/post-install.d/11-enable-guest-agent-systemd integration/scripts/files/elements/guest-agent/post-install.d/99-clean-apt integration/scripts/files/elements/no-resolvconf/README.rst integration/scripts/files/elements/no-resolvconf/finalise.d/99-disable-resolv-conf integration/scripts/files/elements/ubuntu-guest/environment.d/99-reliable-apt-key-importing.bash integration/scripts/files/elements/ubuntu-guest/extra-data.d/15-trove-dep integration/scripts/files/elements/ubuntu-guest/extra-data.d/62-ssh-key integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep integration/scripts/files/elements/ubuntu-guest/install.d/50-user integration/scripts/files/elements/ubuntu-guest/install.d/62-ssh-key integration/scripts/files/elements/ubuntu-guest/install.d/98-ssh integration/scripts/files/elements/ubuntu-guest/install.d/99-clean-apt integration/scripts/files/elements/ubuntu-guest/post-install.d/05-ipforwarding integration/scripts/files/elements/ubuntu-guest/post-install.d/10-ntp integration/scripts/files/elements/ubuntu-guest/post-install.d/62-trove-guest-sudoers integration/scripts/files/elements/ubuntu-guest/post-install.d/90-apt-get-update integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools integration/scripts/files/elements/ubuntu-mariadb/README.md integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/20-apparmor-mysql-local integration/scripts/files/elements/ubuntu-mysql/pre-install.d/10-percona-apt-key integration/scripts/files/elements/ubuntu-mysql/pre-install.d/20-apparmor-mysql-local integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo integration/scripts/files/elements/ubuntu-xenial-guest/element-deps integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/91-hwe-kernel integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps integration/scripts/files/keys/authorized_keys integration/scripts/files/keys/id_rsa integration/scripts/files/keys/id_rsa.pub integration/scripts/files/requirements/fedora-requirements.txt integration/scripts/files/requirements/ubuntu-requirements.txt integration/scripts/local.conf.d/ceilometer_cinder.conf.rc integration/scripts/local.conf.d/ceilometer_nova.conf.rc integration/scripts/local.conf.d/ceilometer_services.conf.rc integration/scripts/local.conf.d/sample.rc integration/scripts/local.conf.d/trove_services.conf.rc integration/scripts/local.conf.d/use_kvm.rc integration/scripts/local.conf.d/use_uuid_token.rc integration/scripts/local.conf.d/using_vagrant.rc integration/tests/integration/core.test.conf integration/tests/integration/int_tests.py integration/tests/integration/localhost.test.conf integration/tests/integration/tests/__init__.py integration/tests/integration/tests/colorizer.py integration/tests/integration/tests/initialize.py integration/tests/integration/tests/util/__init__.py integration/tests/integration/tests/util/report.py integration/tests/integration/tests/util/rpc.py integration/tests/integration/tests/util/services.py playbooks/trove-devstack-base.yaml playbooks/image-build/post.yaml playbooks/image-build/run.yaml playbooks/legacy/grenade-dsvm-trove/post.yaml playbooks/legacy/grenade-dsvm-trove/run.yaml releasenotes/notes/.placeholder releasenotes/notes/add-cassandra-log-retrieval-a295f3d0d4c56804.yaml releasenotes/notes/add-cors-support-fe3ecbecb68f7efd.yaml releasenotes/notes/add-designate-v2-dns-driver-8d1be56ab2c71b83.yaml releasenotes/notes/add-icmp-flag-58937cce344e77d9.yaml releasenotes/notes/add-instance-detailed-list-e712dccf6c9091c0.yaml releasenotes/notes/add-max-prep-stmts-ac1056e127de7609.yaml releasenotes/notes/add-new-relic-license-driver-0f314edabb7561c4.yaml releasenotes/notes/alter-user-portable-021f4b792e2c129b.yaml releasenotes/notes/associate-volume-type-datastore-97defb9279b61c1f.yaml releasenotes/notes/avoid-diverged-slave-when-migrating-mariadb-master-37e2429a1ea75913.yaml releasenotes/notes/cassandra-backup-and-restore-00de234de67ea5ee.yaml releasenotes/notes/cassandra-configuration-groups-e6bcf4014a79f14f.yaml releasenotes/notes/cassandra-user-functions-041abfa4f4baa591.yaml releasenotes/notes/cluster-configuration-groups-37f7de9e5a343165.yaml releasenotes/notes/cluster-notifications-fd205f5f0148b052.yaml releasenotes/notes/cluster-volume-type-901329a3b3667cb4.yaml releasenotes/notes/cluster_list_show_all_ips-3547635440.yaml releasenotes/notes/cluster_restart-bb5abb7372131ee0.yaml releasenotes/notes/couchdb-backup-restore-0cc3324c3088f947.yaml releasenotes/notes/couchdb-user-db-functions-fa41ac47fce095cb.yaml releasenotes/notes/datastore-manager-refactor-5aeac4e6bfa6e07b.yaml releasenotes/notes/db2-backup-restore-96ab214cddd15181.yaml releasenotes/notes/db2-configuration-groups-ca2164be741d35f9.yaml releasenotes/notes/db2-online-backup-restore-3783afe752562e70.yaml releasenotes/notes/dbaas-ceilometer-notifications-5a623d0d6520be72.yaml releasenotes/notes/deprecate-default_neutron_networks-84cd00224d6b7bc1.yaml releasenotes/notes/deprecate-long-query-time-b85af24772e2e7cb.yaml releasenotes/notes/disply_module_bools_properly-571cca9a87f28339.yaml releasenotes/notes/drop-py-2-7-010fe6df0c10352d.yaml releasenotes/notes/drop-python-26-support-39dff0c5636edc74.yaml releasenotes/notes/fix-apply-configuration-on-prepare-4cff827b7f3c4d33.yaml releasenotes/notes/fix-bad-swift-endpoint-in-guestlog-05f7483509dacbbf.yaml releasenotes/notes/fix-cluster-show-346798b3e3.yaml releasenotes/notes/fix-cluster-type-error-71cd846897dfd32e.yaml releasenotes/notes/fix-deprecated-SafeConfigParse-ca3fd3e9f52a8cc8.yaml releasenotes/notes/fix-galera_common-cluster-shrink-e2c80913423772dd.yaml releasenotes/notes/fix-gtid-parsing-9f60ad6e9e8f173f.yaml releasenotes/notes/fix-module-apply-after-remove-97c84c30fb320a46.yaml releasenotes/notes/fix-mongo-cluster-grow-8fa4788af0ce5309.yaml releasenotes/notes/fix-mysql-replication-bf2b131994a5a772.yaml releasenotes/notes/fix-mysql-replication-ca0928069c0bfab8.yaml releasenotes/notes/fix-postgres-pg-rewind-6eef0afb568439ce.yaml releasenotes/notes/fix-redis-configuration-f0543ede84f8aac3.yaml releasenotes/notes/fix-trove-events-8ce54233504065cf.yaml releasenotes/notes/fix_mod_inst_cmd-3a46c7233e3.yaml releasenotes/notes/fix_module_apply-042fc6e61f721540.yaml releasenotes/notes/fix_module_driver_logging-666601f411db784a.yaml releasenotes/notes/fix_notification_err_msgs-e52771108633c9cf.yaml releasenotes/notes/fixes-mariadb-config-groups-b5fa4f44a8ed7b85.yaml releasenotes/notes/flavor-list-disk-6213c3760e374441.yaml releasenotes/notes/flavor-list-ephemeral-edf2dc35d5c247b3.yaml releasenotes/notes/flavor-list-vcpu-817b0f5715820377.yaml releasenotes/notes/force_delete-c2b06dbead554726.yaml releasenotes/notes/grow-cluster-nic-az-0e0fe4083666c300.yaml releasenotes/notes/guest-call-timeout-2781a57ca8feb89a.yaml releasenotes/notes/implement-cassandra-clustering-9f7bc3ae6817c19e.yaml releasenotes/notes/implement-cassandra-root-b0870d23dbf1a848.yaml releasenotes/notes/implement-mariadb-clustering-088ac2f6012689fb.yaml releasenotes/notes/implement-redis-root-347b5ee0107debb5.yaml releasenotes/notes/improve-mysql-user-list-pagination-71457d934500f817.yaml releasenotes/notes/incremental_backup-1910ded0fc3474a3.yaml releasenotes/notes/instance-show-comp-vol-id-964db9f52a5ac9c1.yaml releasenotes/notes/instance-upgrade-7d464f85e025d729.yaml releasenotes/notes/locality-support-for-clusters-78bb74145d867df2.yaml releasenotes/notes/locality-support-for-replication-01d9b05d27b92d82.yaml releasenotes/notes/make-password-length-datastore-specific-7cdb1bfeab6e6227.yaml releasenotes/notes/mariadb-gtid-replication-1ea972bcfe909773.yaml releasenotes/notes/mask-configuration-passwords-317ff6d2415b2ca1.yaml releasenotes/notes/module-management-66d3979cc45ed440.yaml releasenotes/notes/module-ordering-92b6445a8ac3a3bf.yaml releasenotes/notes/module-support-for-clusters-87b41dd7648275bf.yaml releasenotes/notes/module_reapply-342c0965a4318d4e.yaml releasenotes/notes/module_reapply_update_values-1fb88dc58701368d.yaml releasenotes/notes/mongo-cluster-create-use-extended-perperties-ced87fde31c6c110.yaml releasenotes/notes/mongo-cluster-grow-use-az-and-nic-values-207b041113e7b4fb.yaml releasenotes/notes/mountpoint-detection-096734f0097eb75a.yaml releasenotes/notes/multi-region-cd8da560bfe00de5.yaml releasenotes/notes/mysql-config-preserve-types-77b970162bf6df08.yaml releasenotes/notes/mysql-root-fix-35079552e25170ca.yaml releasenotes/notes/mysql-user-list-pagination-9496c401c180f605.yaml releasenotes/notes/percona-2.3-support-2eab8f12167e44bc.yaml releasenotes/notes/persist-error-message-fb69ddf885bcde84.yaml releasenotes/notes/pgsql-incremental-backup-acb4421f7de3ac09.yaml releasenotes/notes/pgsql-streaming-replication-f4df7e4047988b21.yaml releasenotes/notes/post-upgrade-fixes-828811607826d433.yaml releasenotes/notes/postgres-user-list-race-46624dc9e4420e02.yaml releasenotes/notes/postgresql-use-proper-guestagent-models-7ba601c7b4c001d6.yaml releasenotes/notes/pxc-cluster-root-enable-30c366e3b5bcda51.yaml releasenotes/notes/pxc-grow-shrink-0b1ee689cbc77743.yaml releasenotes/notes/quota-management-3792cbc25ebe16bb.yaml releasenotes/notes/redis-upgrade-63769ddb1b546cb9.yaml releasenotes/notes/remove-override-templates-85429da7f66e006a.yaml releasenotes/notes/remove-support-of-use-nova-server-volume-2a334f57d8213810.yaml releasenotes/notes/return-http-204-for-disable-root-api-a818fc41fd6e75eb.yaml releasenotes/notes/reuse-cassandra-connections-092cf2a762a2e796.yaml releasenotes/notes/secure-mongodb-instances-1e6d7df3febab8f4.yaml releasenotes/notes/slo-backups-3c35135316f837e1.yaml releasenotes/notes/support-nova-keypair-a2cdb2da5c1511e9.yaml releasenotes/notes/train-01-backup-filtering-90ff6deac7b411e9.yaml releasenotes/notes/train-02-management-security-group.yaml releasenotes/notes/train-03-public-trove-instance-8ec456bed46411e9.yaml releasenotes/notes/train-04-public-trove-images-127300c0df6c11e9.yaml releasenotes/notes/trove-status-upgrade-check-framework-b9d3d3e2463ec26d.yaml releasenotes/notes/update-myisam-recover-opt-232b9d680bc362bf.yaml releasenotes/notes/use-oslo-policy-bbd1b911e6487c36.yaml releasenotes/notes/use-osprofiler-options-58263c311617b127.yaml releasenotes/notes/ussuri-add-service-status-updated.yaml releasenotes/notes/ussuri-admin-clients-a14514a835ae11ea.yaml releasenotes/notes/ussuri-database-instance-healthy.yaml releasenotes/notes/ussuri-delete-datastoredad784e2345711ea.yaml releasenotes/notes/ussuri-service-credential-config.yaml releasenotes/notes/ussuri-support-xfs-disk-format.yaml releasenotes/notes/vertica-configuration-groups-710c892c1e3d6a90.yaml releasenotes/notes/vertica-grow-shrink-cluster-e32d48f5b2e1bfab.yaml releasenotes/notes/vertica-load-via-curl-call-4d47c4e0b1b53471.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po roles/trove-devstack/README roles/trove-devstack/defaults/main.yml roles/trove-devstack/tasks/main.yml tools/install_venv.py tools/start-fake-mode.sh tools/stop-fake-mode.sh tools/trove-policy-generator.conf tools/trove-pylint.README tools/trove-pylint.config tools/trove-pylint.py tools/with_venv.sh trove/README trove/__init__.py trove/rpc.py trove/version.py trove/versions.py trove.egg-info/PKG-INFO trove.egg-info/SOURCES.txt trove.egg-info/dependency_links.txt trove.egg-info/entry_points.txt trove.egg-info/not-zip-safe trove.egg-info/pbr.json trove.egg-info/requires.txt trove.egg-info/top_level.txt trove/backup/__init__.py trove/backup/models.py trove/backup/service.py trove/backup/state.py trove/backup/views.py trove/cluster/__init__.py trove/cluster/models.py trove/cluster/service.py trove/cluster/tasks.py trove/cluster/views.py trove/cmd/__init__.py trove/cmd/api.py trove/cmd/app.wsgi trove/cmd/common.py trove/cmd/conductor.py trove/cmd/fakemode.py trove/cmd/guest.py trove/cmd/manage.py trove/cmd/status.py trove/cmd/taskmanager.py trove/common/__init__.py trove/common/api.py trove/common/apischema.py trove/common/auth.py trove/common/base_exception.py trove/common/base_wsgi.py trove/common/cfg.py trove/common/clients.py trove/common/clients_admin.py trove/common/configurations.py trove/common/context.py trove/common/crypto_utils.py trove/common/debug_utils.py trove/common/exception.py trove/common/extensions.py trove/common/i18n.py trove/common/instance.py trove/common/limits.py trove/common/local.py trove/common/models.py trove/common/neutron.py trove/common/notification.py trove/common/pagination.py trove/common/pastedeploy.py trove/common/policy.py trove/common/profile.py trove/common/serializable_notification.py trove/common/server_group.py trove/common/stream_codecs.py trove/common/template.py trove/common/timeutils.py trove/common/trove_remote.py trove/common/utils.py trove/common/views.py trove/common/wsgi.py trove/common/xmlutils.py trove/common/db/__init__.py trove/common/db/models.py trove/common/db/cassandra/__init__.py trove/common/db/cassandra/models.py trove/common/db/couchdb/__init__.py trove/common/db/couchdb/models.py trove/common/db/mongodb/__init__.py trove/common/db/mongodb/models.py trove/common/db/mysql/__init__.py trove/common/db/mysql/data.py trove/common/db/mysql/models.py trove/common/db/postgresql/__init__.py trove/common/db/postgresql/models.py trove/common/db/redis/__init__.py trove/common/db/redis/models.py trove/common/policies/__init__.py trove/common/policies/backups.py trove/common/policies/base.py trove/common/policies/clusters.py trove/common/policies/configuration_parameters.py trove/common/policies/configurations.py trove/common/policies/databases.py trove/common/policies/datastores.py trove/common/policies/flavors.py trove/common/policies/instances.py trove/common/policies/limits.py trove/common/policies/modules.py trove/common/policies/root.py trove/common/policies/user_access.py trove/common/policies/users.py trove/common/rpc/__init__.py trove/common/rpc/conductor_guest_serializer.py trove/common/rpc/conductor_host_serializer.py trove/common/rpc/secure_serializer.py trove/common/rpc/serializer.py trove/common/rpc/service.py trove/common/rpc/version.py trove/common/schemas/atom-link.rng trove/common/schemas/atom.rng trove/common/schemas/v1.1/limits.rng trove/common/strategies/__init__.py trove/common/strategies/strategy.py trove/common/strategies/cluster/__init__.py trove/common/strategies/cluster/base.py trove/common/strategies/cluster/strategy.py trove/common/strategies/cluster/experimental/__init__.py trove/common/strategies/cluster/experimental/cassandra/__init__.py trove/common/strategies/cluster/experimental/cassandra/api.py trove/common/strategies/cluster/experimental/cassandra/guestagent.py trove/common/strategies/cluster/experimental/cassandra/taskmanager.py trove/common/strategies/cluster/experimental/galera_common/__init__.py trove/common/strategies/cluster/experimental/galera_common/api.py trove/common/strategies/cluster/experimental/galera_common/guestagent.py trove/common/strategies/cluster/experimental/galera_common/taskmanager.py trove/common/strategies/cluster/experimental/mongodb/__init__.py trove/common/strategies/cluster/experimental/mongodb/api.py trove/common/strategies/cluster/experimental/mongodb/guestagent.py trove/common/strategies/cluster/experimental/mongodb/taskmanager.py trove/common/strategies/cluster/experimental/redis/__init__.py trove/common/strategies/cluster/experimental/redis/api.py trove/common/strategies/cluster/experimental/redis/guestagent.py trove/common/strategies/cluster/experimental/redis/taskmanager.py trove/common/strategies/cluster/experimental/vertica/__init__.py trove/common/strategies/cluster/experimental/vertica/api.py trove/common/strategies/cluster/experimental/vertica/guestagent.py trove/common/strategies/cluster/experimental/vertica/taskmanager.py trove/common/strategies/storage/__init__.py trove/common/strategies/storage/base.py trove/common/strategies/storage/swift.py trove/common/strategies/storage/experimental/__init__.py trove/conductor/__init__.py trove/conductor/api.py trove/conductor/manager.py trove/conductor/models.py trove/configuration/__init__.py trove/configuration/models.py trove/configuration/service.py trove/configuration/views.py trove/datastore/__init__.py trove/datastore/models.py trove/datastore/service.py trove/datastore/views.py trove/db/__init__.py trove/db/models.py trove/db/sqlalchemy/__init__.py trove/db/sqlalchemy/api.py trove/db/sqlalchemy/mappers.py trove/db/sqlalchemy/migration.py trove/db/sqlalchemy/session.py trove/db/sqlalchemy/utils.py trove/db/sqlalchemy/migrate_repo/README trove/db/sqlalchemy/migrate_repo/__init__.py trove/db/sqlalchemy/migrate_repo/manage.py trove/db/sqlalchemy/migrate_repo/migrate.cfg trove/db/sqlalchemy/migrate_repo/schema.py trove/db/sqlalchemy/migrate_repo/versions/001_base_schema.py trove/db/sqlalchemy/migrate_repo/versions/002_service_images.py trove/db/sqlalchemy/migrate_repo/versions/003_service_statuses.py trove/db/sqlalchemy/migrate_repo/versions/004_root_enabled.py trove/db/sqlalchemy/migrate_repo/versions/005_heartbeat.py trove/db/sqlalchemy/migrate_repo/versions/006_dns_records.py trove/db/sqlalchemy/migrate_repo/versions/007_add_volume_flavor.py trove/db/sqlalchemy/migrate_repo/versions/008_add_instance_fields.py trove/db/sqlalchemy/migrate_repo/versions/009_add_deleted_flag_to_instances.py trove/db/sqlalchemy/migrate_repo/versions/010_add_usage.py trove/db/sqlalchemy/migrate_repo/versions/011_quota.py trove/db/sqlalchemy/migrate_repo/versions/012_backup.py trove/db/sqlalchemy/migrate_repo/versions/013_add_security_group_artifacts.py trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py trove/db/sqlalchemy/migrate_repo/versions/015_add_service_type.py trove/db/sqlalchemy/migrate_repo/versions/016_add_datastore_type.py trove/db/sqlalchemy/migrate_repo/versions/017_update_datastores.py trove/db/sqlalchemy/migrate_repo/versions/018_datastore_versions_fix.py trove/db/sqlalchemy/migrate_repo/versions/019_datastore_fix.py trove/db/sqlalchemy/migrate_repo/versions/020_configurations.py trove/db/sqlalchemy/migrate_repo/versions/021_conductor_last_seen.py trove/db/sqlalchemy/migrate_repo/versions/022_add_backup_parent_id.py trove/db/sqlalchemy/migrate_repo/versions/023_add_instance_indexes.py trove/db/sqlalchemy/migrate_repo/versions/024_add_backup_indexes.py trove/db/sqlalchemy/migrate_repo/versions/025_add_service_statuses_indexes.py trove/db/sqlalchemy/migrate_repo/versions/026_datastore_versions_unique_fix.py trove/db/sqlalchemy/migrate_repo/versions/027_add_datastore_capabilities.py trove/db/sqlalchemy/migrate_repo/versions/028_recreate_agent_heartbeat.py trove/db/sqlalchemy/migrate_repo/versions/029_add_backup_datastore.py trove/db/sqlalchemy/migrate_repo/versions/030_add_master_slave.py trove/db/sqlalchemy/migrate_repo/versions/031_add_timestamps_to_configurations.py trove/db/sqlalchemy/migrate_repo/versions/032_clusters.py trove/db/sqlalchemy/migrate_repo/versions/033_datastore_parameters.py trove/db/sqlalchemy/migrate_repo/versions/034_change_task_description.py trove/db/sqlalchemy/migrate_repo/versions/035_flavor_id_int_to_string.py trove/db/sqlalchemy/migrate_repo/versions/036_add_datastore_version_metadata.py trove/db/sqlalchemy/migrate_repo/versions/037_modules.py trove/db/sqlalchemy/migrate_repo/versions/038_instance_faults.py trove/db/sqlalchemy/migrate_repo/versions/039_region.py trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py trove/db/sqlalchemy/migrate_repo/versions/042_add_cluster_configuration_id.py trove/db/sqlalchemy/migrate_repo/versions/__init__.py trove/dns/__init__.py trove/dns/driver.py trove/dns/manager.py trove/dns/models.py trove/dns/designate/__init__.py trove/dns/designate/driver.py trove/extensions/__init__.py trove/extensions/common/__init__.py trove/extensions/common/models.py trove/extensions/common/service.py trove/extensions/common/views.py trove/extensions/mgmt/__init__.py trove/extensions/mgmt/clusters/__init__.py trove/extensions/mgmt/clusters/models.py trove/extensions/mgmt/clusters/service.py trove/extensions/mgmt/clusters/views.py trove/extensions/mgmt/configuration/__init__.py trove/extensions/mgmt/configuration/service.py trove/extensions/mgmt/configuration/views.py trove/extensions/mgmt/datastores/__init__.py trove/extensions/mgmt/datastores/service.py trove/extensions/mgmt/datastores/views.py trove/extensions/mgmt/instances/__init__.py trove/extensions/mgmt/instances/models.py trove/extensions/mgmt/instances/service.py trove/extensions/mgmt/instances/views.py trove/extensions/mgmt/quota/__init__.py trove/extensions/mgmt/quota/service.py trove/extensions/mgmt/quota/views.py trove/extensions/mgmt/upgrade/__init__.py trove/extensions/mgmt/upgrade/models.py trove/extensions/mgmt/upgrade/service.py trove/extensions/mongodb/__init__.py trove/extensions/mongodb/service.py trove/extensions/mysql/__init__.py trove/extensions/mysql/common.py trove/extensions/mysql/models.py trove/extensions/mysql/service.py trove/extensions/mysql/views.py trove/extensions/pxc/__init__.py trove/extensions/pxc/service.py trove/extensions/redis/__init__.py trove/extensions/redis/models.py trove/extensions/redis/service.py trove/extensions/redis/views.py trove/extensions/routes/__init__.py trove/extensions/routes/mgmt.py trove/extensions/routes/mysql.py trove/extensions/security_group/__init__.py trove/extensions/security_group/models.py trove/extensions/vertica/__init__.py trove/extensions/vertica/service.py trove/flavor/__init__.py trove/flavor/models.py trove/flavor/service.py trove/flavor/views.py trove/guestagent/__init__.py trove/guestagent/api.py trove/guestagent/dbaas.py trove/guestagent/guest_log.py trove/guestagent/models.py trove/guestagent/pkg.py trove/guestagent/service.py trove/guestagent/volume.py trove/guestagent/backup/__init__.py trove/guestagent/backup/backupagent.py trove/guestagent/common/__init__.py trove/guestagent/common/configuration.py trove/guestagent/common/guestagent_utils.py trove/guestagent/common/operating_system.py trove/guestagent/common/sql_query.py trove/guestagent/datastore/__init__.py trove/guestagent/datastore/manager.py trove/guestagent/datastore/service.py trove/guestagent/datastore/experimental/__init__.py trove/guestagent/datastore/experimental/cassandra/__init__.py trove/guestagent/datastore/experimental/cassandra/manager.py trove/guestagent/datastore/experimental/cassandra/service.py trove/guestagent/datastore/experimental/couchbase/__init__.py trove/guestagent/datastore/experimental/couchbase/manager.py trove/guestagent/datastore/experimental/couchbase/service.py trove/guestagent/datastore/experimental/couchbase/system.py trove/guestagent/datastore/experimental/couchdb/__init__.py trove/guestagent/datastore/experimental/couchdb/manager.py trove/guestagent/datastore/experimental/couchdb/service.py trove/guestagent/datastore/experimental/couchdb/system.py trove/guestagent/datastore/experimental/db2/__init__.py trove/guestagent/datastore/experimental/db2/manager.py trove/guestagent/datastore/experimental/db2/service.py trove/guestagent/datastore/experimental/db2/system.py trove/guestagent/datastore/experimental/mariadb/__init__.py trove/guestagent/datastore/experimental/mariadb/manager.py trove/guestagent/datastore/experimental/mariadb/service.py trove/guestagent/datastore/experimental/mongodb/__init__.py trove/guestagent/datastore/experimental/mongodb/manager.py trove/guestagent/datastore/experimental/mongodb/service.py trove/guestagent/datastore/experimental/mongodb/system.py trove/guestagent/datastore/experimental/percona/__init__.py trove/guestagent/datastore/experimental/percona/manager.py trove/guestagent/datastore/experimental/percona/service.py trove/guestagent/datastore/experimental/postgresql/__init__.py trove/guestagent/datastore/experimental/postgresql/manager.py trove/guestagent/datastore/experimental/postgresql/pgsql_query.py trove/guestagent/datastore/experimental/postgresql/service.py trove/guestagent/datastore/experimental/pxc/__init__.py trove/guestagent/datastore/experimental/pxc/manager.py trove/guestagent/datastore/experimental/pxc/service.py trove/guestagent/datastore/experimental/redis/__init__.py trove/guestagent/datastore/experimental/redis/manager.py trove/guestagent/datastore/experimental/redis/service.py trove/guestagent/datastore/experimental/redis/system.py trove/guestagent/datastore/experimental/vertica/__init__.py trove/guestagent/datastore/experimental/vertica/manager.py trove/guestagent/datastore/experimental/vertica/service.py trove/guestagent/datastore/experimental/vertica/system.py trove/guestagent/datastore/galera_common/__init__.py trove/guestagent/datastore/galera_common/manager.py trove/guestagent/datastore/galera_common/service.py trove/guestagent/datastore/mysql/__init__.py trove/guestagent/datastore/mysql/manager.py trove/guestagent/datastore/mysql/service.py trove/guestagent/datastore/mysql_common/__init__.py trove/guestagent/datastore/mysql_common/manager.py trove/guestagent/datastore/mysql_common/service.py trove/guestagent/datastore/technical-preview/__init__.py trove/guestagent/module/__init__.py trove/guestagent/module/driver_manager.py trove/guestagent/module/module_manager.py trove/guestagent/module/drivers/__init__.py trove/guestagent/module/drivers/module_driver.py trove/guestagent/module/drivers/new_relic_license_driver.py trove/guestagent/module/drivers/ping_driver.py trove/guestagent/strategies/__init__.py trove/guestagent/strategies/backup/__init__.py trove/guestagent/strategies/backup/base.py trove/guestagent/strategies/backup/mysql_impl.py trove/guestagent/strategies/backup/experimental/__init__.py trove/guestagent/strategies/backup/experimental/cassandra_impl.py trove/guestagent/strategies/backup/experimental/couchbase_impl.py trove/guestagent/strategies/backup/experimental/couchdb_impl.py trove/guestagent/strategies/backup/experimental/db2_impl.py trove/guestagent/strategies/backup/experimental/mariadb_impl.py trove/guestagent/strategies/backup/experimental/mongo_impl.py trove/guestagent/strategies/backup/experimental/postgresql_impl.py trove/guestagent/strategies/backup/experimental/redis_impl.py trove/guestagent/strategies/replication/__init__.py trove/guestagent/strategies/replication/base.py trove/guestagent/strategies/replication/mysql_base.py trove/guestagent/strategies/replication/mysql_binlog.py trove/guestagent/strategies/replication/mysql_gtid.py trove/guestagent/strategies/replication/experimental/__init__.py trove/guestagent/strategies/replication/experimental/mariadb_gtid.py trove/guestagent/strategies/replication/experimental/postgresql_impl.py trove/guestagent/strategies/replication/experimental/redis_sync.py trove/guestagent/strategies/restore/__init__.py trove/guestagent/strategies/restore/base.py trove/guestagent/strategies/restore/mysql_impl.py trove/guestagent/strategies/restore/experimental/__init__.py trove/guestagent/strategies/restore/experimental/cassandra_impl.py trove/guestagent/strategies/restore/experimental/couchbase_impl.py trove/guestagent/strategies/restore/experimental/couchdb_impl.py trove/guestagent/strategies/restore/experimental/db2_impl.py trove/guestagent/strategies/restore/experimental/mariadb_impl.py trove/guestagent/strategies/restore/experimental/mongo_impl.py trove/guestagent/strategies/restore/experimental/postgresql_impl.py trove/guestagent/strategies/restore/experimental/redis_impl.py trove/hacking/__init__.py trove/hacking/checks.py trove/instance/__init__.py trove/instance/models.py trove/instance/service.py trove/instance/tasks.py trove/instance/views.py trove/limits/__init__.py trove/limits/service.py trove/limits/views.py trove/module/__init__.py trove/module/models.py trove/module/service.py trove/module/views.py trove/network/__init__.py trove/network/base.py trove/network/neutron.py trove/network/nova.py trove/quota/__init__.py trove/quota/models.py trove/quota/quota.py trove/taskmanager/__init__.py trove/taskmanager/api.py trove/taskmanager/manager.py trove/taskmanager/models.py trove/taskmanager/service.py trove/templates/cassandra/config.template trove/templates/cassandra/validation-rules.json trove/templates/couchbase/config.template trove/templates/couchdb/config.template trove/templates/db2/config.template trove/templates/db2/validation-rules.json trove/templates/mariadb/cluster.config.template trove/templates/mariadb/config.template trove/templates/mariadb/replica.config.template trove/templates/mariadb/replica_source.config.template trove/templates/mariadb/validation-rules.json trove/templates/mongodb/config.template trove/templates/mongodb/validation-rules.json trove/templates/mysql/config.template trove/templates/mysql/replica.config.template trove/templates/mysql/replica_source.config.template trove/templates/mysql/validation-rules.json trove/templates/mysql/5.5/replica.config.template trove/templates/mysql/5.5/replica_source.config.template trove/templates/mysql/mysql-test/config.template trove/templates/percona/config.template trove/templates/percona/replica.config.template trove/templates/percona/replica_source.config.template trove/templates/percona/validation-rules.json trove/templates/percona/5.5/replica.config.template trove/templates/percona/5.5/replica_source.config.template trove/templates/postgresql/config.template trove/templates/postgresql/replica.config.template trove/templates/postgresql/replica_source.config.template trove/templates/postgresql/validation-rules.json trove/templates/pxc/cluster.config.template trove/templates/pxc/config.template trove/templates/pxc/replica.config.template trove/templates/pxc/replica_source.config.template trove/templates/pxc/validation-rules.json trove/templates/pxc/5.5/replica.config.template trove/templates/pxc/5.5/replica_source.config.template trove/templates/redis/config.template trove/templates/redis/replica.config.template trove/templates/redis/replica_source.config.template trove/templates/redis/validation-rules.json trove/templates/vertica/config.template trove/templates/vertica/validation-rules.json trove/tests/__init__.py trove/tests/config.py trove/tests/int_tests.py trove/tests/root_logger.py trove/tests/api/__init__.py trove/tests/api/backups.py trove/tests/api/configurations.py trove/tests/api/databases.py trove/tests/api/datastores.py trove/tests/api/instances.py trove/tests/api/instances_actions.py trove/tests/api/instances_delete.py trove/tests/api/instances_resize.py trove/tests/api/limits.py trove/tests/api/replication.py trove/tests/api/root.py trove/tests/api/user_access.py trove/tests/api/users.py trove/tests/api/versions.py trove/tests/api/mgmt/__init__.py trove/tests/api/mgmt/configurations.py trove/tests/api/mgmt/datastore_versions.py trove/tests/api/mgmt/instances_actions.py trove/tests/api/mgmt/quotas.py trove/tests/db/__init__.py trove/tests/db/migrations.py trove/tests/examples/__init__.py trove/tests/examples/client.py trove/tests/examples/snippets.py trove/tests/fakes/__init__.py trove/tests/fakes/common.py trove/tests/fakes/conf.py trove/tests/fakes/dns.py trove/tests/fakes/guestagent.py trove/tests/fakes/keystone.py trove/tests/fakes/limits.py trove/tests/fakes/neutron.py trove/tests/fakes/nova.py trove/tests/fakes/swift.py trove/tests/fakes/taskmanager.py trove/tests/scenario/__init__.py trove/tests/scenario/groups/__init__.py trove/tests/scenario/groups/backup_group.py trove/tests/scenario/groups/cluster_group.py trove/tests/scenario/groups/configuration_group.py trove/tests/scenario/groups/database_actions_group.py trove/tests/scenario/groups/guest_log_group.py trove/tests/scenario/groups/instance_actions_group.py trove/tests/scenario/groups/instance_create_group.py trove/tests/scenario/groups/instance_delete_group.py trove/tests/scenario/groups/instance_error_create_group.py trove/tests/scenario/groups/instance_force_delete_group.py trove/tests/scenario/groups/instance_upgrade_group.py trove/tests/scenario/groups/module_group.py trove/tests/scenario/groups/replication_group.py trove/tests/scenario/groups/root_actions_group.py trove/tests/scenario/groups/test_group.py trove/tests/scenario/groups/user_actions_group.py trove/tests/scenario/helpers/__init__.py trove/tests/scenario/helpers/cassandra_helper.py trove/tests/scenario/helpers/couchbase_helper.py trove/tests/scenario/helpers/couchdb_helper.py trove/tests/scenario/helpers/db2_helper.py trove/tests/scenario/helpers/mariadb_helper.py trove/tests/scenario/helpers/mongodb_helper.py trove/tests/scenario/helpers/mysql_helper.py trove/tests/scenario/helpers/percona_helper.py trove/tests/scenario/helpers/postgresql_helper.py trove/tests/scenario/helpers/pxc_helper.py trove/tests/scenario/helpers/redis_helper.py trove/tests/scenario/helpers/sql_helper.py trove/tests/scenario/helpers/test_helper.py trove/tests/scenario/helpers/vertica_helper.py trove/tests/scenario/runners/__init__.py trove/tests/scenario/runners/backup_runners.py trove/tests/scenario/runners/cluster_runners.py trove/tests/scenario/runners/configuration_runners.py trove/tests/scenario/runners/database_actions_runners.py trove/tests/scenario/runners/guest_log_runners.py trove/tests/scenario/runners/instance_actions_runners.py trove/tests/scenario/runners/instance_create_runners.py trove/tests/scenario/runners/instance_delete_runners.py trove/tests/scenario/runners/instance_error_create_runners.py trove/tests/scenario/runners/instance_force_delete_runners.py trove/tests/scenario/runners/instance_upgrade_runners.py trove/tests/scenario/runners/module_runners.py trove/tests/scenario/runners/negative_cluster_actions_runners.py trove/tests/scenario/runners/replication_runners.py trove/tests/scenario/runners/root_actions_runners.py trove/tests/scenario/runners/test_runners.py trove/tests/scenario/runners/user_actions_runners.py trove/tests/unittests/__init__.py trove/tests/unittests/trove_testtools.py trove/tests/unittests/api/__init__.py trove/tests/unittests/api/test_versions.py trove/tests/unittests/api/common/__init__.py trove/tests/unittests/api/common/test_extensions.py trove/tests/unittests/api/common/test_limits.py trove/tests/unittests/backup/__init__.py trove/tests/unittests/backup/test_backup_controller.py trove/tests/unittests/backup/test_backup_models.py trove/tests/unittests/backup/test_backupagent.py trove/tests/unittests/backup/test_storage.py trove/tests/unittests/cluster/__init__.py trove/tests/unittests/cluster/test_cassandra_cluster.py trove/tests/unittests/cluster/test_cluster.py trove/tests/unittests/cluster/test_cluster_controller.py trove/tests/unittests/cluster/test_cluster_models.py trove/tests/unittests/cluster/test_cluster_pxc_controller.py trove/tests/unittests/cluster/test_cluster_redis_controller.py trove/tests/unittests/cluster/test_cluster_vertica_controller.py trove/tests/unittests/cluster/test_cluster_views.py trove/tests/unittests/cluster/test_galera_cluster.py trove/tests/unittests/cluster/test_models.py trove/tests/unittests/cluster/test_mongodb_cluster.py trove/tests/unittests/cluster/test_redis_cluster.py trove/tests/unittests/cluster/test_vertica_cluster.py trove/tests/unittests/cmd/__init__.py trove/tests/unittests/cmd/test_status.py trove/tests/unittests/common/__init__.py trove/tests/unittests/common/test_auth.py trove/tests/unittests/common/test_conductor_serializer.py trove/tests/unittests/common/test_context.py trove/tests/unittests/common/test_crypto_utils.py trove/tests/unittests/common/test_dbmodels.py trove/tests/unittests/common/test_exception.py trove/tests/unittests/common/test_notification.py trove/tests/unittests/common/test_pagination.py trove/tests/unittests/common/test_policy.py trove/tests/unittests/common/test_secure_serializer.py trove/tests/unittests/common/test_serializer.py trove/tests/unittests/common/test_server_group.py trove/tests/unittests/common/test_stream_codecs.py trove/tests/unittests/common/test_template.py trove/tests/unittests/common/test_timeutils.py trove/tests/unittests/common/test_utils.py trove/tests/unittests/common/test_wsgi.py trove/tests/unittests/conductor/__init__.py trove/tests/unittests/conductor/test_conf.py trove/tests/unittests/conductor/test_methods.py trove/tests/unittests/configuration/__init__.py trove/tests/unittests/configuration/test_configuration_controller.py trove/tests/unittests/datastore/__init__.py trove/tests/unittests/datastore/base.py trove/tests/unittests/datastore/test_capability.py trove/tests/unittests/datastore/test_datastore.py trove/tests/unittests/datastore/test_datastore_version_metadata.py trove/tests/unittests/datastore/test_datastore_versions.py trove/tests/unittests/db/__init__.py trove/tests/unittests/db/test_migration_utils.py trove/tests/unittests/domain-name-service/__init__.py trove/tests/unittests/domain-name-service/test_designate_driver.py trove/tests/unittests/extensions/__init__.py trove/tests/unittests/extensions/common/__init__.py trove/tests/unittests/extensions/common/test_service.py trove/tests/unittests/extensions/redis/__init__.py trove/tests/unittests/extensions/redis/test_service.py trove/tests/unittests/flavor/__init__.py trove/tests/unittests/flavor/test_flavor_views.py trove/tests/unittests/guestagent/__init__.py trove/tests/unittests/guestagent/test_agent_heartbeats_models.py trove/tests/unittests/guestagent/test_api.py trove/tests/unittests/guestagent/test_backups.py trove/tests/unittests/guestagent/test_cassandra_manager.py trove/tests/unittests/guestagent/test_configuration.py trove/tests/unittests/guestagent/test_couchbase_manager.py trove/tests/unittests/guestagent/test_couchdb_manager.py trove/tests/unittests/guestagent/test_datastore_manager.py trove/tests/unittests/guestagent/test_dbaas.py trove/tests/unittests/guestagent/test_galera_cluster_api.py trove/tests/unittests/guestagent/test_galera_manager.py trove/tests/unittests/guestagent/test_guestagent_utils.py trove/tests/unittests/guestagent/test_manager.py trove/tests/unittests/guestagent/test_mariadb_manager.py trove/tests/unittests/guestagent/test_models.py trove/tests/unittests/guestagent/test_mysql_manager.py trove/tests/unittests/guestagent/test_operating_system.py trove/tests/unittests/guestagent/test_pkg.py trove/tests/unittests/guestagent/test_query.py trove/tests/unittests/guestagent/test_redis_manager.py trove/tests/unittests/guestagent/test_service.py trove/tests/unittests/guestagent/test_volume.py trove/tests/unittests/hacking/__init__.py trove/tests/unittests/hacking/test_check.py trove/tests/unittests/instance/__init__.py trove/tests/unittests/instance/test_instance_controller.py trove/tests/unittests/instance/test_instance_models.py trove/tests/unittests/instance/test_instance_status.py trove/tests/unittests/instance/test_instance_views.py trove/tests/unittests/mgmt/__init__.py trove/tests/unittests/mgmt/test_clusters.py trove/tests/unittests/mgmt/test_datastore_controller.py trove/tests/unittests/mgmt/test_datastores.py trove/tests/unittests/mgmt/test_models.py trove/tests/unittests/module/__init__.py trove/tests/unittests/module/test_module_controller.py trove/tests/unittests/module/test_module_models.py trove/tests/unittests/module/test_module_views.py trove/tests/unittests/mysql/__init__.py trove/tests/unittests/mysql/test_common.py trove/tests/unittests/mysql/test_user_controller.py trove/tests/unittests/quota/__init__.py trove/tests/unittests/quota/test_quota.py trove/tests/unittests/router/__init__.py trove/tests/unittests/router/test_router.py trove/tests/unittests/taskmanager/__init__.py trove/tests/unittests/taskmanager/test_api.py trove/tests/unittests/taskmanager/test_clusters.py trove/tests/unittests/taskmanager/test_galera_clusters.py trove/tests/unittests/taskmanager/test_manager.py trove/tests/unittests/taskmanager/test_models.py trove/tests/unittests/taskmanager/test_vertica_clusters.py trove/tests/unittests/upgrade/__init__.py trove/tests/unittests/upgrade/test_controller.py trove/tests/unittests/upgrade/test_models.py trove/tests/unittests/util/__init__.py trove/tests/unittests/util/matchers.py trove/tests/unittests/util/util.py trove/tests/unittests/volume_type/__init__.py trove/tests/unittests/volume_type/test_volume_type.py trove/tests/unittests/volume_type/test_volume_type_views.py trove/tests/util/__init__.py trove/tests/util/check.py trove/tests/util/client.py trove/tests/util/event_simulator.py trove/tests/util/mysql.py trove/tests/util/server_connection.py trove/tests/util/usage.py trove/tests/util/users.py trove/tests/util/utils.py trove/volume_type/__init__.py trove/volume_type/models.py trove/volume_type/service.py trove/volume_type/views.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/trove.egg-info/dependency_links.txt0000644000175000017500000000000100000000000023046 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/trove.egg-info/entry_points.txt0000644000175000017500000000230700000000000022300 0ustar00coreycorey00000000000000[console_scripts] trove-api = trove.cmd.api:main trove-conductor = trove.cmd.conductor:main trove-fake-mode = trove.cmd.fakemode:main trove-guestagent = trove.cmd.guest:main trove-manage = trove.cmd.manage:main trove-mgmt-taskmanager = trove.cmd.taskmanager:mgmt_main trove-status = trove.cmd.status:main trove-taskmanager = trove.cmd.taskmanager:main [oslo.messaging.notify.drivers] trove.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver trove.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver trove.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver trove.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver trove.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver [oslo.policy.policies] trove = trove.common.policies:list_rules [trove.api.extensions] mgmt = trove.extensions.routes.mgmt:Mgmt mysql = trove.extensions.routes.mysql:Mysql [trove.guestagent.module.drivers] new_relic_license = trove.guestagent.module.drivers.new_relic_license_driver:NewRelicLicenseDriver ping = trove.guestagent.module.drivers.ping_driver:PingDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1576701859.0 trove-12.1.0.dev92/trove.egg-info/not-zip-safe0000644000175000017500000000000100000000000021226 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/trove.egg-info/pbr.json0000644000175000017500000000006000000000000020452 0ustar00coreycorey00000000000000{"git_version": "8c3df10a", "is_release": false}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/trove.egg-info/requires.txt0000644000175000017500000000204300000000000021377 0ustar00coreycorey00000000000000Babel!=2.4.0,>=2.3.4 Jinja2>=2.10 Paste>=2.0.2 PasteDeploy>=1.5.0 PyMySQL>=0.7.6 Routes>=2.3.1 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 WebOb>=1.7.1 cryptography>=2.1.4 diskimage-builder!=1.6.0,!=1.7.0,!=1.7.1,>=1.1.2 eventlet!=0.18.3,!=0.20.1,>=0.18.2 httplib2>=0.9.1 iso8601>=0.1.11 jsonschema>=2.6.0 keystonemiddleware>=4.17.0 lxml!=3.7.0,>=3.4.1 netaddr>=0.7.18 oslo.concurrency>=3.26.0 oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.db>=4.27.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=1.30.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.upgradecheck>=0.1.0 oslo.utils>=3.33.0 osprofiler>=1.4.0 passlib>=1.7.0 pbr!=2.1.0,>=2.0.0 pexpect!=3.3,>=3.1 python-cinderclient>=3.3.0 python-designateclient>=2.7.0 python-glanceclient>=2.8.0 python-heatclient>=1.10.0 python-keystoneclient>=3.8.0 python-neutronclient>=6.7.0 python-novaclient>=9.1.0 python-swiftclient>=3.2.0 python-troveclient>=2.2.0 six>=1.10.0 sqlalchemy-migrate>=0.11.0 stevedore>=1.20.0 xmltodict>=0.10.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586541819.0 trove-12.1.0.dev92/trove.egg-info/top_level.txt0000644000175000017500000000000600000000000021526 0ustar00coreycorey00000000000000trove