././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1815686 mistral-10.0.0.0b3/0000755000175000017500000000000000000000000014113 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/.coveragerc0000644000175000017500000000014400000000000016233 0ustar00coreycorey00000000000000[run] branch = True source = mistral omit = .tox/* mistral/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/.dockerignore0000644000175000017500000000142400000000000016570 0ustar00coreycorey00000000000000api-ref/ devstack/ doc/ functionaltests/ mistral.egg-info/ playbooks/ rally-jobs/ releasenotes/ envs/ *.py[cod] *.sqlite # C extensions *.so # Packages *.egg* dist build .venv eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports .coverage .coverage.* .stestr .tox nosetests.xml cover/* .testrepository/ subunit.log .mistral.conf AUTHORS ChangeLog # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject .idea .DS_Store etc/*.conf etc/mistral.conf.sample #Linux swap files range from .saa to .swp *.s[a-w][a-p] # Files created by releasenotes build releasenotes/build # Files created by doc build doc/build/ doc/source/api doc/source/_static/ # Files created by API build api-ref/build/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/.stestr.conf0000644000175000017500000000006400000000000016364 0ustar00coreycorey00000000000000[DEFAULT] test_path=./mistral/tests/unit top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/.zuul.yaml0000644000175000017500000001053000000000000016053 0ustar00coreycorey00000000000000- job: name: mistral-rally-task parent: rally-task-mistral vars: devstack_plugins: rally-openstack: https://opendev.org/openstack/rally-openstack rally_task: rally-jobs/task-mistral.yaml devstack_localrc: USE_PYTHON3: true devstack_local_conf: post-config: $MISTRAL_CONF_FILE: engine: execution_field_size_limit_kb: 8192 required-projects: - openstack/rally-openstack - openstack/mistral-lib - openstack/mistral-extra - job: name: mistral-docker-buildimage parent: publish-openstack-artifacts run: playbooks/docker-buildimage/run.yaml post-run: playbooks/docker-buildimage/post.yaml timeout: 1800 required-projects: - openstack/mistral - job: name: mistral-docker-buildimage-test run: playbooks/docker-buildimage/run.yaml post-run: playbooks/docker-buildimage/post.yaml timeout: 1800 required-projects: - openstack/mistral # This job does not work. We can come back to it later. # - job: # name: mistral-ha # parent: legacy-base # run: playbooks/legacy/mistral-ha/run # timeout: 4200 - job: name: mistral-tox-unit-mysql parent: openstack-tox vars: tox_envlist: unit-mysql irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ timeout: 3600 required-projects: - openstack/mistral-lib - openstack/mistral-extra - job: name: mistral-tox-unit-postgresql parent: openstack-tox vars: tox_envlist: unit-postgresql tox_environment: {CI_PROJECT: "{{ zuul['project']['name'] }}"} irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ timeout: 3600 required-projects: - openstack/mistral-lib - openstack/mistral-extra - project: templates: - openstack-python3-ussuri-jobs - publish-openstack-docs-pti - check-requirements - release-notes-jobs-python3 check: jobs: - openstack-tox-cover: voting: false irrelevant-files: - ^.zuul.yaml$ - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ required-projects: - openstack/mistral-lib - openstack/mistral-extra - openstack-tox-py36: required-projects: - openstack/mistral-lib - openstack/mistral-extra - openstack-tox-py37: required-projects: - openstack/mistral-lib - openstack/mistral-extra - openstack-tox-py38: required-projects: - openstack/mistral-lib - openstack/mistral-extra - openstack-tox-docs: required-projects: - openstack/mistral-lib - openstack/mistral-extra - mistral-devstack - mistral-devstack-tempest-ipv6-only - mistral-devstack-non-apache-tempest-ipv6-only - mistral-devstack-non-apache - mistral-devstack-kombu - mistral-tox-unit-mysql - mistral-tox-unit-postgresql # TripleO jobs that deploy Mistral. # Note we don't use a project-template here, so it's easier # to disable voting on one specific job if things go wrong. # If you need any support to debug these jobs in case of # failures, please reach us on #tripleo IRC channel. - tripleo-ci-centos-7-containers-multinode: voting: false - mistral-rally-task: voting: false - openstack-tox-lower-constraints: required-projects: - openstack/mistral-lib - openstack/mistral-extra gate: queue: mistral jobs: - mistral-devstack - mistral-devstack-tempest-ipv6-only - mistral-devstack-non-apache-tempest-ipv6-only - mistral-devstack-non-apache - mistral-tox-unit-mysql - mistral-tox-unit-postgresql - mistral-devstack-kombu - openstack-tox-lower-constraints - tripleo-ci-centos-7-undercloud-containers post: jobs: - mistral-docker-buildimage: branches: master experimental: jobs: - mistral-docker-buildimage-test: branches: master # This job doesn't work yet. # - mistral-ha: # voting: false ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/AUTHORS0000644000175000017500000002353700000000000015175 0ustar00coreycorey00000000000000Abhishek Chanda Adriano Petrich Alexander Kuznetsov Alfredo Moralejo Anastasia Kuznetsova Andras Kovi Andrea Visnyei Andreas Jaeger Andrey Kurilin Angus Salkeld Ankita Wagh Antoine Musso Anusree Artem Lapin Bertrand Lallau Bertrand Lallau Bhaskar Duvvuri Bo Tran Bob HADDLETON Bob Haddleton Bob.Haddleton Boris Bobrov Boris Pavlovic Brad P. Crochet Béla Vancsics Cao Xuan Hoang Chandan Kumar Chaozhe.Chen Chen Eilat Christian Berendt Claudiu Belu Cong Phuoc Hoang Corey Bryant Dai Dang Van Dan Prince Dao Cong Tien Daryl Mowrer David C Kennedy Dawid Deja Derek Higgins Dharmendra Dirk Mueller Dmitri Zimine Dmitry Tantsur Dmitry Tantsur Dominic Schlegel Doug Hellmann Dougal Matthews Dougal Matthews Ed Cranford Emilien Macchi Endre János Kovács Eyal Fei Long Wang Flavio Percoco Gal Margalit Ghanshyam Mann Guy Paz Guy Shaanan Ha Manh Dong Hangdong Zhang Hardik Parekh Hervé Beraud Hieu LE Hongbin Lu Honza Pokorny Ian Wienand Idan Narotzki Istvan Imre Istvan Imre Jaewook Oh James E. Blair Jeff Peeler Jeffrey Guan Jeffrey Zhang Jeremy Liu Jeremy Stanley Ji zhaoxuan Ji-Wei Jiri Tomasek John Eckersberg Jose Castro Leon Juan Antonio Osorio Robles Kaustuv Royburman Kevin Pouget Kien Nguyen Kien Nguyen Kirill Izotov Kupai József Lakshmi Kannan Lakshmi Kannan Lance Bragstad Limor Limor Stotland Lingxian Kong LingxianKong LingxianKong LiuNanke Lucky samadhiya Luong Anh Tuan Manas Kelshikar MaoyangLiu Marc Gariepy Marcos Fermin Lobo Michael Krotscheck Michal Gershenzon Michal Gershenzon Michal Gershenzon Mike Fedosin Mike Fedosin Miles Gould Monty Taylor Morgan Jones Moshe Elisha MosheElisha Márton Csuha Nguyen Hai Nguyen Hung Phuong Nguyen Van Trung Nick Maludy Nikolay Mahotkin Nikolay Mahotkin Nina Goradia Nishant Kumar Noa Koffman Noa Koffman Oleg Ovcharuk Oleh Huzei Oleksii Chuprykov Oleksiy Petrenko OpenStack Release Bot PanFengyun Paul Belanger Pierre Gaxatte Pierre-Arthur MATHIEU Pradeep Kilambi Prince Katiyar PrivateRookie <996514515@qq.com> Q.hongtao Rafael Folco Rajiv Kumar Ray Chen Renat Akhmerov Renat Akhmerov Renato Recio Rinat Sabitov Roman Dobosz Ryan Brady Sean McGinnis Sergey Kolekonov Sergey Murashov Shaik Apsar Sharat Sharat Sharma Shuquan Huang Spencer Yu Steven Hardy Thierry Carrez Thomas Goirand Thomas Herve Timur Nurlygayanov TimurNurlygayanov TitanLi Toure Dunnon Tovin Seven TuanLuong Van Hung Pham Venkata Mahesh Jonnalagadda Vitalii Solodilov Vlad Gusev Vu Cong Tuan W Chan Wes Hayutin Winson Chan Winson Chan Xavier Hardy XieYingYun Yaroslav Lobankov Yuval Adar Zane Bitter Zhao Lei Zhenguo Niu ZhiQiang Fan ZhiQiang Fan akhiljain23 ali amassalh apetrich apetrich avnish bhavenst byhan caoyuan caoyue chenaidong1 cheneydc chengebj5238 chenjiao chenxiangui csatari dharmendra dzimine fengchaoyang gecong1973 gengchc2 ghanshyam guotao.bj hardik hardikj hnyang howardlee hparekh int32bit jacky06 junboli keliang kennedda kong lijunjie liu-sheng liuyamin lixinhui loooosy lvdongbing manasdk noakoffman pawnesh.kumar pengdake <19921207pq@gmail.com> pengyuesheng qiufossen rajat29 rakhmerov ravikiran rico.lin ricolin rsritesh shubhendu sunqingliang6 syed ahsan shamim zaidi tengqm tonybrad tshtilma venkatamahesh visnyei wanghao wangqi wangxiyuan wangxu wangzhh wudong xpress yfzhao yong sheng gong ypbao yushangbin zhang.lei zhangboye zhangdebo zhangdetong zhangguoqing zhangyanxian zhangyanxian zhu.boxiang zhu.rong zhufl zhulingjie 翟小君 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/CONTRIBUTING.rst0000644000175000017500000000337700000000000016566 0ustar00coreycorey00000000000000======================= Contributing to Mistral ======================= If you're interested in contributing to the Mistral project, the following will help get you started. Contributor License Agreement ============================= In order to contribute to the Mistral project, you need to have signed OpenStack's contributor's agreement: * https://docs.openstack.org/infra/manual/developers.html * https://wiki.openstack.org/CLA Project Hosting Details ======================= * Bug trackers * General mistral tracker: https://launchpad.net/mistral * Python client tracker: https://launchpad.net/python-mistralclient * Mailing list (prefix subjects with ``[Mistral]`` for faster responses) http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss * Documentation * https://docs.openstack.org/mistral/latest/ * IRC channel * #openstack-mistral at FreeNode * https://wiki.openstack.org/wiki/Mistral/Meetings_Meetings * Code Hosting * https://github.com/openstack/mistral * https://github.com/openstack/python-mistralclient * https://github.com/openstack/mistral-dashboard * https://github.com/openstack/mistral-lib * https://github.com/openstack/mistral-specs * https://github.com/openstack/mistral-specs * Code Review * https://review.opendev.org/#/q/mistral * https://review.opendev.org/#/q/python-mistralclient * https://review.opendev.org/#/q/mistral-dashboard * https://review.opendev.org/#/q/mistral-lib * https://review.opendev.org/#/q/mistral-extra * https://review.opendev.org/#/q/mistral-specs * https://docs.openstack.org/infra/manual/developers.html#development-workflow * Mistral Design Specifications * https://specs.openstack.org/openstack/mistral-specs/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/ChangeLog0000644000175000017500000032474100000000000015700 0ustar00coreycorey00000000000000CHANGES ======= 10.0.0.0b3 ---------- * Use constraints for docs installs * Add YAQL sanitizing for iterators * Added two new Rally scenarios * Update hacking for Python3 * Fix serialization of structures that might contain YAQL types * Bump oslo.serialization version to 2.21.1 * Keep openstack mapping\_path option * Fix ContextView JSON serialization * Fix incorrect in-depth search of affected tasks * Add an article about profiling into the docs * Add a unit test for @tx\_cached and fix a bug in it * Removing duplicated descriptions * Fix readme * Add caching for YAQL/Jinja expression functions * Move preinstalled workflows to mistral-extra * Fix requirements * Remove OpenStack actions from mistral * Refactor expressions * Fix YAQL engine initialization * Extend task and workflow notification data 10.0.0.0b2 ---------- * Release note for "convert\_output\_data" config option * Fix adhoc actions * Make tripleO CI job non-voting * Move registration of CLI options to launch.py * Add "convert\_output\_data" config property for YAQL * Init profiler in for a new thread in post\_tx\_queue.py * Set the delayed call "key" field to the right value * Initialize profiler for scheduler threads * Fix fake clients in actions * Set tempest configuration to support service api * Adjust images in the docs and other small fixes * Move "Long Running Business Process" article from Wiki to docs * Move the article about testing from Wiki to the built-in docs * Move FAQ from Wiki to the built-in docs * Added a new API to fetch sub-execution of an execution or a task * Add json param to HTTPAction * Move "Cloud Cron" cookbook from Wiki to the built-in docs * The first iteration of restructuring Mistral doc * Disable key sorting in workflow definition * wrapped the value of parameters in inputs(in wf get API) with "" * Gnocchi: fix client options * Fix typo * Add 'interface' field to Workflow resource * Fix requirements remove py2 * Fix keycloak authentication * Remove mistral-devstack-base * Add the 1st version of Mistral coding guidelines into the docs * Add namespaces to Ad-Hoc actions * Fixed a bug regarding workbooks namespace in postgresql * Designate uses only v2 * Use 406 error instead of 500 error when Service API is not supported * Fix duplicated words issue like "from from parsed token" * Update hacking and fix warnings * Remove unnecessary comma in help message in conf * Don't use eventlet.monkey\_patch under wsgi * Add coordination support for devstack * Use MISTRAL\_SERVICE\_HOST as the host ip for standalone * [train][goal] Run 'mistral-devstack-tempest-ipv6-only' job in gate * Allow the prefixes like "eq:" and "neq:" in the custom REST UUID type * Disable the use of anchors when parsing yaml 10.0.0.0b1 ---------- * Add a release note for graceful scale-in feature * Implement engine graceful shutdown * Enlarge tags support * Fix log messages in the action heartbeats mechanism * Remove the TripleO standalone job * Make it possible to set None to REST API filters * [ussuri][goal] Drop python 2.7 support and testing * Mask sensitive data when logging action results * Make action heartbeats work for all executor types * Make sure minimum amqp is 2.5.2 * Refactor action execution reporter * Extend capabilities to clean up old executions * Refactor rerun of joins * Task publish is ignored silently * Add a script that generates a test workflow with needed paralellism * Fix task expression context * Evaluate input expression should check the in\_context * Make test\_action\_definition\_cache\_ttl more robust * tox: Keeping going with docs * Prohibit creation workflows with spaces in name * Cap psycopg2 to 2.8.3 * Using std.ssh without private\_key\_filename causes TypeError * Add pdf build support * Switch to Ussuri jobs * adjust doc string to correct key * Add Source links to readme * Add release note for fix error of cron trigger run * Add missing :param statement in doc string * Adjust doc string to correct param * Change the action error message format * Fix the global publish for task * New alembic migration to support namespaces in postgresql * Log the original exception in is\_sync * Fix "root\_execution" lazy loading issue and refactor execution.py * Add the explicit dependency from kombu * Update master for stable/train 9.0.0.0rc1 ---------- * Add an ability to disable workflow text validation * Fix scheduled jobs migration * Remove volumes.promote and volumes.reenable action from cinder * Pass a real session to ironicclient in \_get\_fake\_client * Exclude ironicclient 3.0.0 from requirements * Optimize creation of language specs * Use v2 designate client instead of v1 * Change 403 exception message to something more informative and accurate 9.0.0.0b1 --------- * Remove unneeded Zuul branch matcher * moved generic util functions from mistral to mistral-lib * Fix error validate token when run cron trigger * Fix don't work with senlin actions * Add a cookiecutter template to generate custom stuff * Fix missing in workflow documents * Add db api tests for scheduled jobs * Need to run only doc related jobs on doc change * Fix workflow documents * Add a migration to create the scheduled\_jobs table * Fix workflow language tests after updating jsonschema to 3.0.2 * Fix misspell word * Blacklist eventlet 0.21.0,0.23.0,0.25.0 * Check if workflow execution is empty in integrity checker * Add "published\_global" field to the task execution REST resource * Improve workflow notifications and webhook data * Fix 'with-items' expression evaluation * Mistral don't work with cron trigger * Support OpenStack services dynamic versions * Bump lower constraint of python-zunclient * remove unused gate code \* post\_test\_hook. used in legacy gate \* run\_functional\_test probably not used and also doean't work since mistral\_tempest was moved * Add "retry\_count" field into workflow execution report * Improve new scheduler * Fail-on policy * Create docs for the workflow namespaces feature * Use raw strings in all pattern matching strings * Upgrade hacking * Improve error message when sync actions timeout * Allow to filter event notifications by their type * Provide better docs for the "target" task attribute * A1 B should be tasks rather than workflow levels * Update api-ref location * Rename the test class so it will be PyCharm friendly * When installing devstack enable the bash completion * Retry a DB transaction on "Too many connections" error * Bulk delete of stored delayed calls * Allow to delete multiple objects with advanced filters * remove mistral-tempest-plugin from setup.cfg * Fix execution deletion in case of insecure context * New rerun events * Reformat rerun logic for tasks with join * Add workflow\_execution\_id to task notification object * Use SessionClient for Ironic actions * Add Python 3 Train unit tests * Fix invalid assert states * Add bindep.txt file for binary dependencies used in unit tests * Initialize the lazily loaded execution "input" field in API * Create needed infrastructure to switch scheduler implementations * Fix workflow execution cascade delete error * Use eventlet-aware threading events * Make more JSON fields in execution objects lazy-loaded * Optimize finding upstream task executions * Direct workflow code cleanup and refactoring * Use openstack-python3-train-jobs for python3 test runtime * Fix sporadically failing test test\_with\_items\_action\_context * Store next task names in DB * Limit max search depth * Change mistral opendev.org to releases.openstack.org * Add python 3.7 classifier to setup.cfg * Simple optimization of creating/updating workflows * Reduce number of fields in notification data * Delete delayed calls for deleted entities * Exclude broken ironicclient versions 2.5.2 and 2.7.1 * Handle action inputs properly to prevent tasks stuck in RUNNING state * Allow to use 'task()' yaql function in task policies * Use get\_task\_executions\_count for any\_cancels method * Rework updating action executions heartbeats * Bump openstackdocstheme to 1.30.0 * Blacklist sphinx 2.1.0 (autodoc bug) * Skip context evaluation for non-conditional transitions * Remove \_get\_next\_clauses * Add release notes to changes to wflanguage execution object * Prepare cache for \_is\_upstream\_task\_execution * Remove \_find\_task\_execution\_by\_name * Move action caching to db layer * Send task and workflow notifications out of the transaction * Rework finding indirectly affected created joins * Fix adhoc action lookup * Blacklist python-cinderclient 4.0.0 * Constraint networkx to <2.3 for Python 2 * Get rid of lookup utils * Fix how "has\_next\_tasks" is calculated for task executions * Optimize searching of upstream task executions * Rework joining mechanism * Remove deprecated nova commands * Reduce the number of "on-xxx" evaluations * Removes insecure parameter from barbican client * Add back the secrets\_store action into mapping.json * Adding root\_execution\_id to the jinja executor function * Fix the import for filter\_utils * Adds secrets\_retrieve to the list of available actions in barbican * Pass auth context to the event publishers * Docs improvements: task timeout, global context, Docker and jinja * Docs: Change description from on-success to on-error * Reformat retry logic for tasks with join * Add option to start subworkflow via RPC * Replace git.openstack.org URLs with opendev.org URLs * Add delay option to std.echo to emulate external lags * OpenDev Migration Patch * Add release notes for reply-to feature * Fix an action execution controller test * Don't use default mutable parameter * inspect.getargspec is deprecated on py3 * Add reply-to to std.email * Dropping the py35 testing * Fix E305 codestyle errors * Drop py35 jobs * Fix doc * Fix an expression context for all\_errors\_handled() * Fix an obsolete name of a profiler trace * Add a script to generate a report about profile traces * Add release note for I04ba85488b27cb05c3b81ad8c973c3cc3fe56d36 * Replace openstack.org git:// URLs with https:// * Stop sending workflow output to on\_action\_complete * Add "convert\_input\_data" config property for YAQL expressions * Setting the lower version of sphinxcontrib-pecanwsme to 0.10.0 * Add release notes for engine optimizations * Improve profiler logging * Update master for stable/stein * Add Python 3.7 to tox * Add "root\_execution" mapped property to WorkflowExecution model * Optimize action scheduling 8.0.0.0rc1 ---------- * Retries shouldn't execute if join task failed because of child task * Add http\_proxy\_to\_wsgi middleware 8.0.0.0b2 --------- * add python 3.7 unit test job * standalone/undercloud do not cover full mistral workflow * Stop using deprecated keystone\_authtoken/auth\_uri * Update dogpile.cache to match global requirements * Revert "Fix how Mistral prepares data for evaluating a YAQL expression" * Release note for fixing event-engines HA * Fix how Mistral prepares data for evaluating a YAQL expression * Add a workflow execution report endpoint * Sending TASK\_FAILED event in case of MistralException * Process all task batches in wf output evaluation * Replace tripleo-scenario003-multinode with scenario003-standalone * Fix the misspelling of "default" 8.0.0.0b1 --------- * fix typo mistakes * Amend the spelling error of a word * Update mailinglist from dev to discuss * Remove those copy words occured twice times in wf\_lang\_v2.rst * Gate fix for failing at openstack-tox-docs * Adds private\_key parameter in the standard ssh actions * Add loging for sending an action to executor * Clarify REST field valiation error * Remove tripleo newton and ocata jobs * Set admin security context in the action execution checker thread * Fix the tests for workflow "started\_at" and "finished\_at" times * Add Python 3.6 classifier to setup.cfg * Eliminating datetime.now() * Add missing ws separator between words * Add started\_at and finished\_at to task execution * Fix "join" when the last indirect inbound task failed * Clone cached action definitions * Fix race condition in refreshing "join" task state * Omit the twice occured words in index.rst * Remove setup.py check from pep8 job * Fix how action result is assigned to task 'state\_info' field * Divide yaml input to save it into definitions separately * Refactor action execution checker without using scheduler * Add batch size for integrity checker * Simplify workflow and join completion logic * Allow None for 'params' when starting a workflow execution * Update min tox version to 2.0 * Improve workflow completion logic by removing periodic jobs * Fix senlin fake client creation * Fix usage of cachetools in lookup\_utils * Improve join by removing periodic jobs * Mistral install guide * [Event-engine] Allow event\_engine to work in HA * Reduce the concurrency in the 500 wb join Rally task * An execution hangs in the RUNNING state after rerun * Add sqlalchemy.exc.OperationalError to the retry decorator * make user\_info\_endpoint\_url independent of auth\_url * Increment versioning with pbr instruction * Update OnClauseSPec task name criteria * Fix next link in get resource list rest API * Remove remaining references to the rpc\_backend * Update version.version\_string to actually be a string * Don't quote {posargs} in tox.ini * Add a release note for Ic98e2db02abd8483591756d73e06784cc2e9cbe3 * Make task execution logging more readable and informative * Cleanup transport along RPC clients * Add entry point to allow for oslo.policy CLI usage * Fix how Mistral calculates workflow output * Fix SSHAction under python3 * Remove extra information from std.ssh action * Add release note for auth\_context bugfix * Minor improvement of the NoopPublisher * Minor bug of \_assert\_multiple\_items function * Performance: remove unnecessary workflow execution update * add python 3.6 unit test job * switch documentation job to new PTI * Increase delayed\_calls\_v2.auth\_context * Add py36 to tox and default to python3 for pep8 and venv * Use mock to patch lookup\_utils.\_ACTION\_DEF\_CACHE * Fix some format errors in installation guide * Explicitly convert X-Target-Insecure to a boolean * Remove -u root as mysql is executed with root user * import zuul job settings from project-config * Add tripleo-ci-centos-7-undercloud-containers job * New experimental scheduler: the first working version * Removes non needed parameter passed in magnum client creation * Update reno for stable/rocky 7.0.0 ----- * Update workbook namespace to '' on migration 7.0.0.0b3 --------- * expose the user info url as a configuration * Fix docker image not building * Keycloak and Docker * Clarify what an exception from an action means * Improve std.email action * remove invalid todo comment * Fix flaky cron trigger test * Add namespace parameter to Workbook API doc * Add a debug log for the webhook publisher * Remove extra a specification validation * Add namespace support for workbooks * Enable mutable config in mistral * Support Manila actions in Mistral * Remove hardcoded usage of v2 authentication in Barbican actions * Use json.loads instead of eval() on the config * Update the Custom Action documentation to use mistral-lib * Add documentation on event notifier * Migrate mistral to using the serialization code in mistral-lib * Use register\_session\_conf\_options API * Install mistral-lib as a sibling for tox jobs * Allow engine commands as task name * Release note for adding "oslo\_rpc\_executor" config option * Fix testenv cover in tox.ini * Fix rev-id parameter in mistral-db-manage * Return the result of the MistralHTTPAction * modify grammar mistake * add docs for states. add docs explaining what each state mean * Add the config option for Oslo Messaging executor type * Pin get-pip.py to 3.2 * Follow the new PTI for document build * Add a policy to control the right to publish resources * detect https and act accordingly * Add CloudFlow info to Mistral documentation * Add missing Tacker actions to Mistral * fix tox python3 overrides * Fix typos * Add better output to the user deleting executions * switch mysql functional tests to PyMySQL * Add missing query paramater \`scope\` to the workbook api * Remove time.sleep from tests * Generate default execution id * Fix mistral CI * Update install guide about running mistral * Refresh a number of retry a task when task was rerun * Use on-clause and retry\_policy get\_spec for validation * Add info about source execution to a workflow execution description * Amend the spelling error of a word * Make cron-triggers not play catchup * A mechanism to close stuck running action executions * add the release note * Update default Docker configuration and documentation 7.0.0.0b2 --------- * Add the restructuredtext check to the flake8 job * Fix the call to start\_workflow from events triggers * Register the collect\_timing keystone\_authtoken option * Release note for adding YAQL engine options * Add YAQL engine options * Create Base class for Mistral Exceptions and Errors * Switch to using stestr * Add log message to log the stack trace in case failed to send an HTTP request * Add test for \_try\_import * Fix \`def \_admin\` keystone client factory with trust scope * Replace Chinese quotes with English quotes * Replace port 35357 with 5000 for "identity\_uri" * Release note for using "passive\_deletes=True" * Add "fields" argument to DB API methods * Support Qinling actions in Mistral * Added test to update the database schema to the latest revision * Release note for using "passive\_deletes=True" * Use partial loading of workflow execution in the controller * Use "passive\_deletes=True" in ORM relationships * Fix workflows query with fields=input * Add namespace in exception info * Purge README.rst in favor of the generated documentation * Fix error workbook example * Fixed workflow output in case of execution\_field\_size\_limit\_kb * Release note for workflow environment optimizations * Do not copy workflow environment into subworkflows * Get rid of a extra copy of workflow environment * Support actions for zun * Add release note for jinja expression bug * Only allow for deleting completed executions * Remove unused params * Make sure there are no duplicates in the spec cache w/o restarts * Fix error about SenlinAction * Added Vitrage actions into Mistral Implements: blueprint mistral-vitrage-actions * Added JavaScript evaluator which doesn't require a compilation * Added 'safe-rerun' policy to task-defaults section * Minor \`update\_on\_match\` improvement * Fixed Jinja error handling * Update mistral upgrade guide * Stop using slave\_scripts/install-distro-packages.sh * Fix docker image build job * Release note for batched evaluation of final workflow context * Trivial: Update pypi url to new url * Optimize final workflow context evaluation with a batch request 7.0.0.0b1 --------- * add lower-constraints job * Release note for not persisting '\_\_task\_execution' in DB * Add '\_\_task\_execution' structure to task execution context on the fly * Correct the string formatting in a info log message * Optimizing big 'on-XXX' clauses * Fix DB connection url in config guide * Adding a test for std.test\_dict action * Fix call to model\_query in sqlalchemy.api.\_get\_collection * do not list setuptools as explicit dependency * uncap eventlet * Adding WWW-Authenticate info * Fix tag searching * Add Swift Service OpenStack Actions * explicitly set the notifier type in unit tests * Fix malformed state\_info when json is set to it * Update cut\_list() to return the specified number of characters * Add a release note for new indexes * Replace the unsupported format character with the format method * Added thread pool for eventlet executor mode * Fix join examples and text * Fix WF execution getting stuck on null description * Update cut\_dict() to return no more than specified by length * Add a release note for the 'pause' command fix * Fix std.ssh "password" parameter * Updated from global requirements * Improve Mistral API DB error handling * Fix 'pause' engine command * Updated from global requirements * Change unreasonable title * Updated from global requirements * Add new indexes to optimize \`task().result\` expression function * Fixed the Mistral Docker build job * Ensure workflow check is retried on DB error * Improve the Docker integration * Fixed a vhost in transport\_url processing by kombu driver * Update Duplicate entry exceptions to provide more information * Update test-requirements * Remove unused nova actions * Fix initializers of standard actions * Updated from global requirements * Fixed the broken MySQL job * Clarify usage of break-on and continue-on * Remove a redundant initializer of InvalidUnicodeAction in the tests * Add a unit test to check initializers of MistralException hierarcy * Updated from global requirements * Fix server info string * Updated from global requirements * Make sure not to swallow original exceptions on handling remote errors * Add a release note for execution events noitifications * Retry DB operation on deadlock * Updated from global requirements * Fixed the length of a task name * Prevent a action completion multiple times * Remove unnecessary locks * Rename task\_id to task\_execution\_id * Restore rally job * Updated from global requirements * Updated from global requirements * Fix docker image publish job * Fix errors of parameter note * Fix doc format * Replaced the deprecated options in the configuration guide * Add the 'error\_data' parameter to the FailAction * Implement notification of execution events * Remove duplicate word 'the' * Update the description about mistral client guide * Hard code top package name * Explain better combinations of task names and engine commands in docs * Updated from global requirements * Update reno for stable/queens * Updated from global requirements * Cache action definitions * Correction of comments for the #539039 review 6.0.0 ----- * Really make the cron trigger execution interval configurable * Consider size of output\_on\_error * Tags in workflows were not being properly checked * Make the cron trigger execution interval configurable * Adding Keycloak authorization support * Fix how a cron trigger starts a workflow * Fixes mistral-server --version command * More tests for running workflows based on existing * Remove achieved goals from the lis of annual goals * Fixing grammar mistake * Add a step to install tox * Using oslo\_log instead of logging * Update mysql connection in doc * Fix error sql about privileges in doc * Propagated a task timeout to a action execution * modify the import order * Fix docs to better reflect Jinja and YAQL usage * Remove the invalid toctree * Add claim\_messages and delete\_messages zaqar actions 6.0.0.0b3 --------- * Fix some reST field lists in docstrings * Updated from global requirements * Remove addition of a new task execution to task\_executions collection * Disable the wsme Sphinx extension from the API ref docs * Fix the 'params' field of the workflow execution REST resource * Running new workflow based on an existing execution * the word arguements should be arguments * Updated from global requirements * Migrate the jobs to native Zuul v3 format * TrivialFix: remove redundant import alias * Remove any old client actions that no longer exist * Fix break\_on calculation in before\_task\_start * Fix std.http action doc * task name can not be reserved keyword * Fixed integration of the unit tests with PosrgeSQL * Remove the redundant word * Added session.flush() before update\_on\_match() * Added the limit on selection of delayed calls * Modify error spelling word * change import order * fix syntax error the 'that' can not be ignore * Updated from global requirements * Allow ssh utils to use an absolute path * Updated from global requirements * Added the missing options (SCHEDULER\_GROUP and CRON\_TRIGGER\_GROUP) to a generating config * Fix the error url * Remove ceilometer actions from mistral * Remove call to sys.exc\_clear() in Python 3 * Make workflow execution creation idempotent * Add missing user/project name in action context * Gracefully handle DB disconnected connect errors * Readonly db transactions for testing * Remove intree mistral tempest plugin * Minor cosmetic changes * Updated from global requirements * Actually add the yaml\_dump expression * Add executions yaql filter * Disable unstable tempest test\_run\_ssh\_proxied\_action test * Updated from global requirements * Use mock for HTTP calls in unit tests * Updated from global requirements * Change log level for RestControllers * Remove the \_\_init\_\_ method from the test action * Fix inconsistencies when setting policy values * Use the new action context in MistralHTTPAction * Pass the new ActionContext to mistral-lib * Use the latest policy-json-file reference * Clear error info 6.0.0.0b2 --------- * Re-work the direct action call tempest test * Make more CI jobs voting * Fix race condition between task completion and child task processing * Updated from global requirements * Log a warning log message if the task isn't found * Fix swift endpoint * Disable unstable tempest test\_create\_action\_execution\_sync test * Disable unstable tempest multi\_vim\_authentication test * Avoid tox\_install.sh for constraints support * Add id field to db query if no sorting order is provided * Use a session for keystone auth * Add new tempest tests for swift and zaqar client actions * Updated from global requirements * Allow filtering executions by their root\_execution\_id * Implement policy in code - docs and reno (end) * Implement policy in code - event trigger (11) * Implement policy in code - workflow (10) * Implement policy in code - workbook (9) * Implement policy in code - service and task (8) * Implement policy in code - member (7) * Implement policy in code - execution (6) * Implement policy in code - environment (5) * Implement policy in code - cron trigger (4) * Implement policy in code - action (3) * Implement policy in code - action execution (2) * Implement policy in code (1) * Don't use oslo context get\_logging\_values * Wrong handling of is\_system flag at workbooks causes DB error with MySQL 5.7 * Switch zaqarclient and swiftclient to use a session * Stop passing auth\_token to ironic-inspector-client * Modify log infomation to achieve the same format * zuul: update tripleo zuul v3 jobs * Remove setting of version/release from releasenotes * Remove \_get\_task\_executions function * Updated from global requirements * Delete rows directly * Updated from global requirements * Fix yaql / json\_pp deprecation warning * Remove \_get\_event\_trigger function * Add a periodic job to check workflow execution integrity * Fix wf\_trace info adding useless space at some conditions * Remove \_get\_db\_object\_by\_name\_or\_id function * Use mock for HTTP calls in unit tests * Updated from global requirements * Fix sporadically overwriting of finished workflow execution state * Add retries to read-only db operations * Remove \_get\_wf\_object\_by\_name\_and\_namespace function * Get rid of ensure\_\* functions from db api * Add a json\_dump expression function * Re-raise DB errors when evaluating expressions * Updated from global requirements * Do not parse updated\_at for task if it was not updated * [API] Support get/delete cron triggers in any projects for admin * [API] Support project\_id filter in cron\_triggers API * Normalize sorting * 'all' parameter breaks task context * Zuul: add file extension to playbook path * Fix launcher tests * Drop pyflakes from the test requirements * Add a config option to disable cron triggers * Fix named locks implementation * Remove wrapping of database exceptions in \_get\_collection() * Replace or\_ with in\_ function for searching queries * Invoke AuthHook before ContextHook * Fix deletion of delayed calls * Add a yaml\_dump expression * Redundant alias in import statement 6.0.0.0b1 --------- * Add the Ironic wait\_for\_provision\_state action * Revert "Enable eventlet monkey patching for MySQLdb driver" * Optimize mistral queries for 'get\_task\_executions' * [Event-engine] Make listener pool name configurable * Updated from global requirements * Add yaml and json parsing functions * Decoupling of Mistral tempest test from Mistral code base * Make scheduler delay configurable * Optimize sending result to parent workflow * Added created\_at and updated\_at fields to functions task() and exection() * Allow mistral actions to run when authentication is not configured * Mistral fails on RabbitMQ restart * Enable eventlet monkey patching for MySQLdb driver * remove all common jobs * Add actions for the ironic virtual network interface commands * Add get cron-trigger by id support * Dynamic action name evaluation * Migrate Mistral jobs to Zuul v3 * Updated from global requirements * TrivialFix: Add doc/build directory in .gitignore * Update README with Keystone authtoken config * Replace @loopingcall.RetryDecorator with @tenacity.retry * Updated from global requirements * Removed NOT IN query from expiration policy * Use @db\_utils.retry\_on\_deadlock to retry scheduler transactions * Updated from global requirements * Add project\_id to API resources * Add README.mistral.conf doc in etc directory * TrivialFix: pretty format the json code block * Add root\_execution\_id to sub-workflow executions * Use get\_rpc\_transport instead of get\_transport * Updated from global requirements * Add mistral/tests/unit/expressions/\_\_init\_\_.py * Updated from global requirements * Cleanup test\_std\_http\_action * Fixes issue rendering strings containing multiple jinja expressions * Handle case with None encoding during std.http action execution * Clean up screen and tail\_log references * Using current pike stable release for devstack * Fix Kombu RPC threading and use within multiprocess environment * Fix "with-items" locking * Fix to use . to source script files * Updated from global requirements * Fix services launcher to handle shutdown properly * Catch DBEntityNotFoundError exceptions for invalid AdHoc Actions * Add "API server started." print statement for the API wsgi service * Adding doc8 to test-requirements * Updated from global requirements * Add ssl support for keycloak auth middleware * Process input defaults and output transforms for nested AdHoc Actions * Remove build files before run tox doc builder * Updated from global requirements * Dynamic workflow name evaluation * Fix cron keystone calls when token is available * Fix test for decoding utf8 * Update URL and indentations * import fails in python3 * support py3 when doing db\_sync * Update reno for stable/pike 5.0.0 ----- * Add doc8 rule and check doc/source files * [Triggers] Fix running openstack actions via triggers * TrivialFix: Fix typo * Cascade pause from pause-before in subworkflows * Cascade pause and resume to and from subworkflows * Move dsl\_v2 document to user guide * Updated from global requirements * [Trusts] Fix deleting trust * Fix event-triggers workflow namespace * Small typo fix * Fixed crontrigger execution error * Fix drop index in version 022 DB upgrade script * Allow async action execution to be paused and resumed * Set mistral-dashboard default branch to master * Create and run a workflow within a namespace * Allow to list all cron-triggers * Create and run a workflow within a namespace * Fix auth in actions if there is no auth\_uri in context * Use more specific asserts in tests * Add releasenote for public event triggers * Remove deprecation warning from tests * Add Glare action pack * TrivialFix: Fix typo * Use recommended function to setup auth middleware in devstack * Updated from global requirements * Updated from global requirements 5.0.0.0b3 --------- * Updated from global requirements * Fix the pep8 commands failed * Fix cron-triggers and openstack actions * Remove local Hacking for M318 * Add a hacking rule for string interpolation at logging * Admin guide landing page added * [Event-triggers] Allow public triggers * Make README better * Use 'related\_bug' decorator from stable interface * Unsupported 'message' Exception attribute in PY3 * Unsupported 'message' Exception attribute in PY3 * Update UTC time validate cron trigger * Fix some reST field lists in docstrings * Updated from global requirements * Cleanup docs to include params * Change the logo to lowercase * Replace e.message with str(e) * Change the misplaced index links * Chnage the mailing list URL * Remove note for nested ad-hoc actions * Updated from global requirements * Update and optimize documentation links * Replace test.attr with decorators.attr * Updated from global requirements * Handle empty response content during its decoding in std.http * Ignore linux swap files range * Updated from global requirements * Enable some off-by-default checks * Updated from global requirements * Update reference link to Ocata * Adding warning-is-error to doc building * Updated from global requirements * Remove the redundant default value * Tests: Remove the redundant method * Fixing deleting cron-trigger trusts * Fix get event triggers * Applying Pike document structure * Update the commands in README.rst * Fix tox * Improve keycloak auth module * Revert "Use recommended function to setup auth middleware in devstack" * Update .gitignore * Switch from oslosphinx to openstackdocstheme * Use recommended function to setup auth middleware in devstack * Update docker build * Add cron name/id to workflow execution description * Remove .testrepository directory from testenv * Updated from global requirements * Centralize session creation and authorization from OS clients * Updated from global requirements * Setup devstack with ini\_rpc\_backend * Replace the usage of 'admin\_manager' with 'os\_admin' * Use BoolOpt instead of StrOpt * Refactor mistral context using oslo\_context * Add more use of mistral-lib in mistral * Updated from global requirements * Add the baremetal\_introspection action for aborting * Updated from global requirements * Make sure that the field "state\_info" trimmed as expected * Set access\_policy for messaging's dispatcher * Increase the Environment variable column length * Updated from global requirements * Replace oslo.messaging.get\_transport with get\_notification\_transport * Change author in setup.cfg * Replace assertEqual([], items) with assertEmpty(items) * Optimize the link address * Always retrieve endpoint in mistral action * This is only a minor defect in README.rst 5.0.0.0b2 --------- * Update the contents of configuration guide * Minor nits to README * Added style enfore checks for assert statements * Make "triggered\_by" work in case of "join" tasks * Remove the deprecated configuration options * Stop using abbreviation DSL in document * Update python-neutronclient version * [Trusts] Fixing trusts deletion * Updated from global requirements * Remove 'sphinxcontrib.autohttp.flask' from sphinx config * Fixing indentation in docs * Updated from global requirements * Updated from global requirements * Fix doc generation for python 3 * Propagate "evaluate\_env" workflow parameter to subworkflows * [Regions] Fixing determining keystone for actions * Add one more test for task() function used in on-success * Add 'runtime\_context' to task execution REST resource * Add 'triggered\_by' into task execution runtime context * Refactor rest\_utils * Optimize API layer: using from\_db\_model() instead of from\_dict() * Get rid of ambiguity in region\_name * Update AdHoc Actions to support context data references * Adding mistral\_lib actions to mistral * Update Docker README * Updated from global requirements * Refactor db model methods * Updated from global requirements * Add release note for "action\_region" support * Adding log to db\_sync * Add "action\_region" param for OpenStack actions * Updated from global requirements * Release notes for "evaluate\_env" * Add 'evaluate\_env' workflow parameter * Add hide\_args=True to @profiler.trace() where it may cause problems * Remove unused logging import * Fix WSGI script for gunicorn * Revert "Support transition to keystone auth plugin" * Change service name to workflowv2 in docs * Support transition to keystone auth plugin * Fix a typo * Force Python 2 for pep8 linting * Add support for mistral-lib to Mistral * Updated from global requirements * Refactor Kombu-based RPC * Make rpc\_backend not engine specific * Add option to run actions locally on the engine * Don't save @property methods with other attributes * Fix the keystone auth url problem * Optimize the link address 5.0.0.0b1 --------- * Enable WSGI under Apache in devstack * Add "Project Goals 2017" to README.rst * Fix the doc for 'concurrency' policy * Add documentation for the engine commands * Optimizing lang schema validation * Advanced publishing: add 'global' function to access global variables * Advanced publishing: add publishing of global variables * Advanced publishing: change workflow lang schema * Fix serialization issue * Fix a description of 'executor\_thread\_pool\_size' option in Kombu RPC * Changed the README.rst and added debug guide * Updated from global requirements * Disable pbrs auto python-api generation * Set the basepython for the venv tox environment * Use Jinja2 sandbox environment * Limit the number of finished executions * Add Apache License Content in index.rst * Fix gate failure * Add release note for resource RBAC feature * Updated from global requirements * Rework the CLI Guide * Allow admin user to get workflow of other tenants * Role based resource access control - delete executions * Use the Mistral syntax highlighting on the dsl v2 page * Updated from global requirements * Replace six.iteritems() with .items() * Role based resource access control - update executions * Add sem-ver flag so pbr generates correct version * Remove the empty using\_yaql gude * Use the plain syntax highlighting in the webapi example * Remove the highlighting choice 'HTTP' * Add a Mistral lexer for pygments * Don't create actions when inspection fails * Change Http action result content encoding * Updated from global requirements * Role based resource access control - get executions * Remove unnecessary setUp function in testcase * Add check for idempotent id in tempest tests * Remove unnecessary tearDown function in testcase * Fix work of task() without task name within on-clause cases * Explicitly set charset to UTF-8 in rest\_utils for webob.Response * Updated from global requirements * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Surpress log with context data and db data * Add missing schema validation and unit tests for 'publish-on-error' * Add release note for 'created\_at' support in execution() * Add 'created\_at' to execution() yaql function * Change some 3rd party package default log levels * Remove log translations * Trim yaql/jinja operation log * Fix cinder/heat base import * Add missing swift actions * Use LOG.exception when adding an OpenStack action fails * Updated from global requirements * Add hacking for code style checks * Fix multi\_vim tempest test failure * Updated from global requirements * Add unit test for deleting workflows by admin * Improve database object access checking * Updated from global requirements * Log stack trace if action initialization faild * Updated from global requirements * Refactor methods in utils related to dicts * Refactor workflow/action input validation * Fully override default json values with user input * Add head\_object action mapping for swift * Updated from global requirements * Deleting the expired execution with batch size * Allow users to set the test run concurrency * Include the missing lines in the coverage report * Don't use 'master' as that isn't always true * [doc] Changed the output fields in quickstart guide * Improve the CONTRIBUTING.rst * Add \`coverage erase\` to the cover report * Fix update workflow by admin * Rename package 'workbook' to 'lang' * Fix get\_next\_execution\_time * Add idempotent\_id decorator to tempest testcases * Use utcnow() in expired executions policy test * Every unit test creates and registers every OpenStack action * Updated from global requirements * Add idempotent\_id decorator to tempest testcases * Verify the retry policy when passed in via variables * Reduce the number of with-items and retried in the concurrency test * Remove the delay from the direct workflow rerun tests * External OpenStack action mapping file support * Update docs for tasks function * Remove output from list action executions API * Update test requirement * Updated from global requirements * Correction in workflow state change handling * Update Dockerfile to use Xenial * Force Python 2 for documentation builds * Fix memory leak related to cached lookups * Fix for coverage job showing 0% coverage for kombu * Add Keycloak authentication doc for client side * Add details into docs about semantics of 'on-XXX' clauses * Add Keycloak authentication doc for server side * Refactor RPC serialization: add polymophic serializer * Updated from global requirements * Add reno for tasks function * Updated from global requirements * Remove '\_\_task\_execution' from task outbound context * Updated from global requirements * Revert "External OpenStack action mapping file support" * Prepare for using standard python tests * Fix for failing services on py3 with kombu driver * Remove support for py34 * External OpenStack action mapping file support * Remove wrong licensing * Refactor RPC serialization: remove JsonPayloadSerializer class * Update reno for stable/ocata 4.0.0.0rc1 ---------- * Fix for failing gates * Enforce style check for xrange() * Fix for failing services on py3 with kombu driver * Fix try import of openstack client modules * Remove some profiler traces, logs, use utils.cut() where needed * Remove unnecessary evaluation of outbound context * Optimizing utils.cut() for big dictionaries and lists * Fix doc build if git is absent 4.0.0.0b3 --------- * Updated from global requirements * Add support for Rabbit HA * Refactor rpc configuration loading * Updated from global requirements * Invalid jinja pattern regex corrected * Add script for unit test coverage job * Updated from global requirements * In Python 3.7 "async" and "await" will become reserved keywords * Allow hyphens in Workflow and ad-hoc action names * External OpenStack action mapping file support added * Make 'task' function work w/o a task name * using utcnow instead of now in expiration policy * Enforce style check for assertIsNone * Add action "std.test\_dict" * Register Javascript action additionally as 'js' action * Role based resource access control - update workflows * Remove insecure flag from the Baremetal Introspection client * Updated from global requirements * Make kombu driver work in multi-thread manner * Fix unit test that rely on order of return from DB * Use utcnow() instead of now() * Stop excpecting to have rabbit flags set when using fake driver * Updated from global requirements * Insecure flag added to openstack context * Initial commit for mistral api-ref * Removed unnecessary utf-8 encoding * Python3 common patterns * Updated from global requirements * Fix unit test that rely on order of return from DB * Fix for failing kombu dsvm gate * Move mock requirement to test-requirements.txt * Using sys.exit(main()) instead of main() * Use i18n for help text * Added gnocchi action pack * Add 'retry\_on\_deadlock' decorator * Fix two failing test cases in test\_tasks * Add the "has" DB filter * Use assertGreater() or assertLess() * Fix version response from root controller * Adding releasenotes for aodh action support * Updated from global requirements * Refactor 'stress\_test' to fit the current layout better * Add rally tests for 'join': 100 and 500 parallel tasks * Add a test for 'with-items' task: count=100, concurrency=10 * Add aodh actions to mistral * Disable invalid API test till it's fixed * Copy \_50\_mistral.py file from enabled folder * Fix doc for missing dashboard config file * Role based resource access control - get workflows * Make body of std.email optional * Refresh object state after lock acquisition in WithItemsTask * Small adjustments in WithItemsTask * Fix 'with-items' task completion condition * Apply locking to control 'with-items' concurrency * Slightly improve 'with-items' tests * Get rid of with\_items.py module in favor of WithItemsTask class * Refactor and improve 'with-items' algorithms * Fix docs in README.rst * Fix configuration generator * Fix version response from root controller * Exclude .tox folder from coverage report * Updated from global requirements * Add more tests to mistral rally * Replace six.iteritems() with .items() * Display all the possible server values 4.0.0.0b2 --------- * Correct missspellings of secret * Minor changes in the document * Added test cases for a few possible scenarios * change the cron-trigger execution time from localtime to UTC * Use the with keyword dealing with file objects * Modify the link in 'README.rst' * Modify the function "\_get\_spec\_version(spec\_dict)" * Update the wording in the actions terminology docs * Remove commented-out Apache 2 classifier from setup.cfg * Updated from global requirements * Fix for failing kombu gate * modify something in 'dsl\_v2.rst' * Fix two errors in YAML example and a error in action doc * Handling MistralException in default executor * Fix a syntax error in yaml example * std.email action requires a smtp\_password * Change version '1.0' to '2.0' * Add descriptions for on\_task\_state\_change parameters * Updated from global requirements * Added releasenote for retry policy update * Cleanup obvious issues in 'with-items' tests * Updated from global requirements * Allow "version" to be within workflow names in workbooks * Updated from global requirements * Yaql Tasks Function * Bump Ironic API version to 1.22 when creating the Ironic client * Small changes to docs to comply with openstack document style * Fix launch process of Mistral components * Modify import style in code * Some spelling errors * Initial commit for mistral-i18n support * Add timestamp at the bottom of every page * Show team and repo badges on README * Make CI gate for unit tests on mysql work * Fix the default configuration file path * Updated from global requirements * Mock the HTTP action in the with\_items tests * Fix devstack plugin compatibility * Updated the retries\_remain statement * Updated from global requirements * Add Ironic RAID actions * Revert "Remove unused scripts in tools" * Add a test for invalid task input expression * Fix config import in javascript action module * Make Jinja evaluator catch and wrap all underlying exceptions * Make YAQL evaluator catch and wrap all underlying exceptions * Replace 'assertFalse(a in b)' with 'assertNotIn(a, b)' * Updated from global requirements 4.0.0.0b1 --------- * Replace retrying with tenacity * Add cancelled state to action executions * Updated from global requirements * Fix possible DB race conditions in REST controller * Remove unused pylintrc * Added releasenote for Senlin Action Pack * Migrated to the new oslo.db enginefacade * Added senlin action pack * Few changes in the doc * Use mock for a bad HTTP call in unit tests * Few changes related to the doc blueprint * Fix REST API dangling transactions * Fix error message format in action handler * Fix error message format in other task handler methods * Migrate mistral task\_type * Fix error message format for task run and continue * Fix missing exception decorators in REST API * Remove unused scripts in tools * Replace uuid4() with generate\_uuid() from oslo\_utils * Updated from global requirements * Add type to tasks API * Handle region\_name in openstack actions * Add more tests to mistral rally * Replace oslo\_utils.timeutils.isotime * Adding Variables to Log Messages * Updated from global requirements * cors: update default configuration * Added unit tests for workflow executions and task executions filtering * Fix DB API transaction() * Run actions without Scheduer * Get correct inbound tasks context for retry policy * Updated from global requirements * Adding tests for workbook execution and execution list to Rally * Use service catalog from authentication response * Updated from global requirements * Fix a bug in the algo that determines if a route is possible * Enable DeprecationWarning in test environments * Added additional info in devstack/readme.rst * Fixing 'join' task completion logic * Updated from global requirements * Removal of unneccessary directory in run\_tests.sh * Get service catalog from token info * Add one more test for YAQL error message format * Change format of YAQL errors * Updated from global requirements * Update .coveragerc after the removal of openstack directory * Enable code coverage report in console output * Updated from global requirements * Remove logging import unused * Cleanup Newton Release Notes * Publish/output in case of task/workflow failure * Don't include openstack/common in flake8 exclude list * Fix PEP8 issue * Change task() function to return 'null' if task doesn't exist * Enable release notes translation * Updated from global requirements * Describe vital details for debugging in PyCharm * Update documentation for multi-vim support * Add Jinja evaluator * Minor changes to the documentation * Minor changes in the installation guides * Add a way to save action executions that run synchronously * Import haskey from keys module * Declare the encoding of file * Changes made to comply with OpenStack writing style * Cleanup the Quickstart Documentation * Stop adding ServiceAvailable group option * Updated from global requirements * Update heat actions in mapping.json * Updated from global requirements * Accept service catalog from client side * Using assertIsNone() instead of assertEqual(None, ...) * Updated from global requirements * Make deafult executor use async messaging when returning action results * Disable Client Caching * Updated from global requirements * Revert "Update UPPER\_CONSTRAINTS\_FILE for stable/newton" * Remove environment data from task inbound context * Use parenthesis to wrap strings over multiple lines * Updated from global requirements * Using sys.exit(main()) instead of main() * Do not include project name in the client cache key * Updated from global requirements * Add tests to check deletion of delayed calls on WF execution delete * Delete all necessary delayed calls on WF stop * Update UPPER\_CONSTRAINTS\_FILE for stable/newton * Fix for timeouting actions on run-action * Fix a typo in access\_control.py * Adding a script for fast mistralclient help generation * Make Javascript implementation configurable * Add unit test case for deletion of execution in case of (error and cancelled) * Avoid storing workflow input in task inbound context * Replace assertEqual(None, \*) with assertIsNone in tests * Updated from global requirements * Add \_\_ne\_\_ built-in function * Update reno for stable/newton * Remove context.spawn * Correct documentation about task attributes 'action' and 'workflow' * Updating mistralclient docs * Abstract authentication function * Fix for raising excepton from kombu 3.0.0.0rc1 ---------- * Remove workflow spec, input and params from workflow context * Add a smarter delay between workflow completion checks * Optimize the logic that check if 'join' task is allowed to start * Copy cached WF spec stored by definition id into WF execution cache * Add functional tests for event engine functions * Added unit tests for Workbook and Workflow filtering * Delete unnecessary comma * Fixed task in\_bound context when retrying * Enable changing of rpc driver from devstack * Take os\_actions\_endpoint\_type into use * Fix mistral API docs Fixing v2.rst to refer to new module paths, and adding the cron trigger param to POST v2/cron\_triggers/ documentation * Add event trigger REST API * Using count() instead of all() for getting incompleted tasks * Fix for raising exception directly to kombu * Updated from global requirements * Fix delayed calls DB migration * standardize release note page ordering * Fixed http links in CONRIBUTING.rst * Optimize finder functions for task executions * Change execution mechanism for 'join' tasks * Fixed an incorrect migration revision number in a comment * cast to str for allowable types * Raise NotImplementedError instead of NotImplemented * Optionally include the output when retrieving all executions * Add \_\_ne\_\_ built-in function * Fix getting URLs / and /v2 * Add event configuration for event trigger 3.0.0.0b3 --------- * Add 'uuid' YAQL function * Sync tools/tox\_install.sh * Updated from global requirements * Fix for 'Cannot authenticate without an auth\_url' * Add client caching for OpenStack actions * Add setuptools to requirements.txt * Task publish does not overwrite variable in context Edit * Updated from global requirements * Clean imports in code * TrivialFix: Remove logging import unused * Add a note to the documentation about std.fail * Some minor code optimization in post\_test\_hook.sh * Updated from global requirements * Fix for not working 'run-action' on kombu driver * Updated from global requirements * Fix documentation * Clean imports in code * Use more specific asserts in tests * Use upper constraints for all jobs in tox.ini * Updated from global requirements * Updated the configuration guide * Add a DB migration for named locks * Implement named transactional lock (semaphore) * Updated from global requirements * Closes-Bug: 1607348 * Optimize task defer() method * Optimize direct workflow controller * Updated from global requirements * Updated from global requirements * Fix task post completion scheduling * Fix \_possible\_route() method to account for not completed tasks * Add 'wait-before' policy test with two chained tasks * Fix task 'defer' * Filtering support for actions * Increase size of 'task\_executions\_v2.unique\_key' column * Add 'join after join' test * Slightly improve workflow trace logging * Fix workflow and join completion logic * Towards non-locking model: remove pessimistic locks * Fix specification caching mechanism * Towards non-locking model: make 'with-items' work w/o locks * Make mistral work with amqp and zmq backends * Towards non-locking model: adapt 'join' tasks to work w/o locks * Add unique keys for non locking model * Updated from global requirements * Fix GET /executions/ to init 'output' attribute explicitly * Fix past migration scripts discrepancies * fix for get action executions fails with "has no property 'type" * Updated Doc for SSL configuration * Use actual session for ironic-inspector action population * Added support for SSL connection in mistra-api server * Towards non-locking model: decouple WF completion check via scheduler * Towards non-locking model: use insert\_or\_ignore() for delayed calls * Towards non-locking model: add insert\_or\_ignore() on DB API * Fix the use of both adhoc actions and "with-items" in workflows * Towards non-locking model: removing env update from WF controller * Updated from global requirements * DB migration to three execution tables and increase some columns * Updated from global requirements * Add state info for synchronous actions run from CLI * Towards non-locking model: fix obvious workflow controller issues * Towards non-locking model: Add 'unique\_key' for delayed calls * Add \_get\_fake\_client to ironic-inspector actions * Add target parameters to REST API * Update docs and add release not for safe-rerun flag * Invalidate workflow spec cache on workflow definition updates * Removing unnecessary workflow specification parsing * Splitting executions into different tables * Added releasenote for https support * Add cancelled state to executions * Enable user to use transport\_url in kombu driver * Fixed trivial issue in exception message * Updated from global requirements * Fix DSLv2 example according to Mistral Neuton * Updated from global requirements * Use 'rpc\_response\_timeout' in kombu driver * Use Paginate query even if 'limit'or 'marker' is not set * Remove task result for collection REST requests * Allow to use both name and id to update action definitions * Remove some inconsistency in DB api * Get rid of oslo\_db warning about "id" not being in "sort\_keys" * Add event engine service * Error handling test: error in 'publish' for a task with 'on-error' * Added 'pip install -r requirements.txt' instruction * Executor fails actions if they are redelivered * Move the remainder of REST resources to resources.py * Move REST resources action, action execution and task to resources.py * Add the new endpoint /v2/tasks//workflow\_executions * Allow to use both name and id to access action definitions * Pass 'safe-rerun' param to RPC layer * Initialize RPC-related flag when starting API * Update in installation package list in installation guide * Add param 'safe-rerun' to task * Create MistralContext from rpc context in kombu engine * Add db models for event trigger * Updated from global requirements * Fix SPAG errors in Quickstart and Main Features docs * Fix some trivial SPAG errors in docs * Rename package mistral.engine.rpc to mistral.engine.rpc\_backend * Fixing filtering in task controller * Add Python 3.5 classifier and venv * Updated from global requirements 3.0.0.0b2 --------- * Fix for YaqlEvaluationException in std.create\_instance workflow * Updated from global requirements * Add tests for Kombu driver * Release note for KeyCloak OIDC support * Add KeyCloak OpenID Connect server-side authentication * Add authentication options for KeyCloak OIDC * Add proper handling for implicit task completion * Add proper error handling for task continuation * Add error handling tests: invalid workflow input, error in first task * Add more tests for error handling * Fix utility print\_executions method * Log warn openstack action generation failures * Fix Magnum action \_get\_fake\_class * Fix Murano action \_get\_fake\_class * Stylistic cleanups to lazy loading patch * Add configuration option for endpoint type * Add filters to all collections listing functions (tags included) * Lazy load client classes * Integrating new RPC layer with Mistral * Make RPC implementation configurable * Adding OsloRPC server and client * Add support for custom YAQL functions * Remove obsolete config option "use\_mistral\_rpc" * Add tacker actions in mistral * Update Expiration Policy Documentation * New RPC layer implementation * Don't create actions when attempting to update one that doesn't exist * Updated from global requirements * Add zake into dependencies * Add action context to all action executions * Fix SSHActionsTestsV2 failure * Updated mapping.json file * Support recursive ad-hoc action definitions * Updated from global requirements * Updated from global requirements * Updated from global requirements * Use client credentials to retrieve service list * Remove std.mistral\_http action from tests * Doc updated for oslo\_policy configuration * Updated from global requirements * Remove .mailmap file * Fix mysql driver installation section in readme * Fix API inconsistencies with GET /v2/workflows * Fixed fake clients of glance and designate * Fixed get\_actions\_list script to get glance actions * Fixed get\_actions\_list script to get designate actions * Example Mistral docker container broke due to oslo.policy update * Refactored tempest tests * Release note for magnum actions support * Fix postgresql test failure * Add configuration for Mistral tempest testing * Added doc string for enforce method * Release note for murano actions support * Add magnum certificates and mservices actions * Release note for role base access control * Added role base authentication support * Added murano actions * Add magnum bays actions * Enable osprofiler to measure performance * Rename the to\_string method to to\_json to clarify it's purpose * Support JSON data in JSON API type * Add Magnum actions * Updated from global requirements * Removing redundant wf\_ex\_id parameter for rerun across the code * Add explicit preconditions for methods of Action, Task and Workflow * Add a test that verifies an old bug with join * Refactoring workflow handler * Fix invalid type usage for join * mistral actions for designate v1 api's not working * Updated from global requirements * Remove AUTHORS file * Remove AUTHORS file from git tracking * Add missing argument in exception string * Updated from global requirements * Use LOG.exception when logging exceptions 3.0.0.0b1 --------- * Release notes for fail/pause/success transition message * Updated from global requirements * Fail/Success/Pause transition message * Remove unnecessary database transaction from Scheduler * Update .mailmap * Refactor Mistral Engine * Updated from global requirements * Updated from global requirements * Fixes the Mistral Docker image * Updated from global requirements * Return 'Unknown error' when error output is empty * Fix client in TroveActions * Add Python 3.4 to the classifiers * Remove unnecessary executable permissions * Updated from global requirements * Add baremetal.wait\_for\_finish action to mapping * Update get\_arg\_list\_as\_str to skip func params * Updated from global requirements * Enforcing upper constraints for tox test jobs * Fix get task list on YAQL error in with-items * Add API to validate ad-hoc action * Updated from global requirements * Updated from global requirements * Replace keystone CLI with openstack CLI * Add Designate apis as mistral actions * Remove oslo.messaging hack since it's broken with 5.0.0 version * Fix the yaql github repository * Updated from global requirements * Updated from global requirements * Fix mistral installation in devstack * Refactoring exception hierarchy * Updated from global requirements * Fixing engine facade hierarchy * Fixed issue related to docker image creation * Updated from global requirements * Rename base API test class * Disable cron trigger thread for API unit tests * Disabled ssl warnings while runing tempest tests * Add extra checks for the existance of executor\_callback * Updated from global requirements * Updated from global requirements * Added script to create docker image * Switch to auto-generated cron trigger names in unit tests * tempest: fix dir\_path * Leave more relevant comment in engine race condition test * Add utility methods to test action executions more conveniently * Fixing failing functional tests for Cinder and Heat actions * Update OpenStack actions mapping * Updated from global requirements * Unblock skipped test * Replace self.\_await(lamdba: ..) constructs with more readable calls * Add auth\_enabled=False to a cron trigger test * Updated from global requirements * Updated from global requirements * Updated from global requirements * Unblock skipped tests in test\_action\_defaults.py * Updated from global requirements * Fixing issue with different versions of oslo\_messaging * Getting rid of task result proxies in workflow context * Fix typos in Mistral files * Hacking log for warning * Fixing engine transaction model and error handling * Refactor workflow controller and fix a bug in \_fail\_workflow() * Fixing a bug in DB API method that acquires entity lock * Also package mistral\_tempest\_tests * module docs are not being generated * Update reno for stable/mitaka 2.0.0.0rc1 ---------- * Ack message after processing (oslo.messaging) * Run mistral services as separate processes * Fix compatibility with WebOb 1.6.0 * Reduce spec parsing some more * register the config generator default hook with the right name * Moved CORS middleware configuration into oslo-config-generator * Updated from global requirements * Deleting redundant trust creation in workbook uploading mechanism * Use tempest.lib instead of tempest-lib * Fix with-items task termination when sub-workflows fail * Restruct README file * Updated from global requirements * Updated from global requirements 2.0.0.0b3 --------- * Fix the problem when parse config file * Add asynchronous actions doc * Add release notes for M-3 * Updated from global requirements * Updated from global requirements * Fixed 'workflow\_name' key error * Change for synchronous Mistral actions from CLI * Updated from global requirements * Delete workflow members when deleting workflow * Add Mistral action pack * Release notes for Barbican actions * Updated from global requirements * Updated from global requirements * Add timestamp for member api response * Show shared workflows * Add actions to expose OpenStack Barbican APIs * Updated from global requirements * tempest-lib has been added to requirements.txt * Fix occasional test failure by SSHActions * Reduce spec parsing in workflow lifecycle * Support workflow id in execution operations * Add workflow id column to executions\_v2 table * Fix occasional test failure by assertListEqual * Added CORS support to Mistral * Fix spellings for two words * BaremetalIntrospectionAction get endpoint by service\_type * Implement basic Zaqar queue operations * Fix with-items concurrency for sub-workflows * Mistral tests will run from tempest plugin * Use proper way to initialize nova client * Updated from global requirements * Fix for not running 'on-success' task after task with 'with-items' * Fix quickstart doc error * Added link for pre-built docker image * Fix rerun of task in subworkflow * Fixed engine tests * Removed mistral/tests/functional * Updated from global requirements * Fix multiple reruns of with-items task * Remove argparse from requirements * Updated from global requirements * Add release note for tempest plugin 2.0.0.0b2 --------- * Add release note for swift action support * Add task\_execution\_id to workflow execution in API * Support workflow sharing API * Change LOG.warn to LOG.warning * Add db operations for resource members * Add db model for resource sharing * Remove unused logging import * Update REST API to support env update * Allow env update on resume and rerun workflows * Add support for OpenStack Swift actions * Disallow user to change workflow scope * Replace assertTrue(isinstance()) with assertIsInstance() * Updated from global requirements * Support workflow UUID when creating cron trigger * "test\_ssh\_actions" failed test has been fix * Fix db error when running python34 unit tests * Updated dynamic credential support for funtional test * Trival: Remove unused logging import * Drop py33 support * Release note for mistral-docker-image * Added README.rst file for tempest plugin * Added base.py to tempest plugin * Added engine to tempest plugin * Added test\_mistral\_basic\_v2.py to tempest plugin * Initial layout for mistral tempest plugin * Added mistral default actions * If task fails on timeout - there is no clear message of failure * devstack/plugin.sh: stop using deprecated option group for rabbit * Fix client name in setUpClass's method in 'test\_ssh\_actions' * Documentation for Mistral and Docker * Added Dockerfile to create docker image * Fix example for workbook in doc * Support UUID when deleting a workflow definition * Support UUID when updating a workflow definition * Support UUID when getting a workflow definition * Fix DB migration 009 constraint dropping * Add releatenote for fix execution saved in wrong tenant * Updated from global requirements * Workflow name can not be in the format of UUID * Fix join on branch error * Updated from global requirements * Get "cron trigger" list using model query * Add support for OpenStack Ironic Inspector actions * Updated from global requirements * Refactor action generator * Fix concurrency issues by using READ\_COMMITTED * Ignored PEP257 errors * Fix example for ad-hoc action in doc * Numerous debug messages due to iso8601 log level * Fixing execution saved in wrong tenant * Updated from global requirements * Pass environment variables of proxy to tox * Make test\_expiration\_policy\_for\_executions stable * Delete python bytecode before every test run * Fix state\_info details for with-items task error * Reset task state\_info on task re-run * Run pep8 on some tools python files * Remove version from setup.cfg 2.0.0.0b1 --------- * Add support for OpenStack Ironic actions * Fix tools/get\_action\_list.py * Update install\_venv.py so it says 'Mistral' * Add etc/mistral.conf.sample to .gitignore * Add database indices to improve query performance * Result will be [], if list for with-items is empty * Added Unit test when policy input is variable * Improve error message for YAQL task function * Add release notes for trove support * Add release notes for Cinder v2 support * Updated from global requirements * Force releasenotes warnings to be treated as errors * Send mail to mutli to\_addrs failed * Correct heatclient comment in mapping.json * Remove running of CLI tests on commit to mistral repo * Change installation of python-mistralclient in the gates * Fix database upgrade from a new database * Updated from global requirements * Fix task state for YAQL error in subflow output * Moved to cinderv2 client support * Show project id when retrieving workflow(s) * Updated from global requirements * Add the CONTRIBUTING.rst file * Fix with-items concurrency greater than the number of items * Adding releasenotes management to Mistral * Use setup\_develop instead of setup\_package in plugin.sh * Add Trove to mistral actions * Fix cron-trigger's execution with pattern and first time * Pass creds into the clients.Manager() in functional tests * Move base.py and config.py under unit/ folder * Add ceilometer action support * Increased size of "state\_info" column to 64kb * Skipped some tests in py3 environment * Fixing reference of floating\_ips\_client in tests * OpenStack typo * Updated from global requirements * Ensure only one WF execution for every CT cycle * Wrap sync\_db operations in transactions * Remove iso8601 dependency * Fix all H405 pep8 errors * Adding callback url to action context * Updated from global requirements * Remove kombu as a dependency for Mistral * Move the default directories into settings file * Removing wait() when initializing notification listener * Updated from global requirements * Do not use len() in log\_exec decorator * Fixing wf execution creation at initial stage * remove default=None for config options * Fixing workflow execution state calculation * Resolved encoding/decoding problem * Wrapper is used instead of direct json library * Comparision opeartor has been changed * Fixed some unit test issue * Filter is converted to list * Fix state change on exception during task state change * Updated from global requirements * Change in sort direction at execution controller * Avoid comparision between "None" type and "int" type * Division result is wrapped to int() * Updated from global requirements * devstack: add support for mistraldashboard * Fixing SSH actions to use names of private keys * [Docs] Add 'Cookbooks' page * Use oslo\_config new type PortOpt for port options * Add decode() function for string comparison * Refactored filter implementation * mistral-documentation: dashboard documentation regarding debug known issue * Fix mistral dsvm gate * Updated from global requirements * Adding 'json\_pp' function in YAQL * Added home-page value with mistral docs * filter() is wrapped around list() * Updated from global requirements * Updated from global requirements * Extracting generator objects returned by openstack actions * Set version for Mitaka * Updated from global requirements * Adding functional tests for SSH actions * Fixing "Task result / Data Flow" section of "Main Features" in docs * Fixing terminoloty/actions section in documentation * Fixing description of "mistral\_http" action in DSL spec * Adding section about validation into API v2 spec * Adding "Cron triggers" section into API v2 specification * Action definition updated, when workbook is created * Adding "Services" section into API v2 specification * Fixing small issues in documentation * Updated from global requirements * Creating new SSH action which uses gateway * Fixing ssh action to use private keys * Use MutableDict from sqlalchemy directly * Updated from global requirements * Delivering error message via header in pecan.abort * Replace copy.copy with copy.deepcopy * Updated from global requirements * Remove the transaction scope from task executions API * Colorise mistral log output * Updated from global requirements * Fix argparse error in wsgi script * Update AUTHORS file * mistral-documentation: dashboard documentation regarding debug * Fix more unit tests in py34 job * Fixing scheduler tests * Remove usage of expandtabs() in get\_workflow\_definition * Renaming state DELAYED to RUNNING\_DELAYED in doc 1.0.0.0rc1 ---------- * Renaming state DELAYED to RUNNING\_DELAYED * Support JSON and arrays in JavaScript action in Mistral * Fix some spelling typo in manual and program output * Fix order of arguments in assertEqual * Fix more tests in python34 gate * Using six.iteritems() to avoid some python3 tests failure * Fixing run action when error occurs * Fixing std.create\_instance workflow * Adding devstack installation doc * Fixing searching errors in mistral.exceptions * Check for trigger before delete wf * Change ignore-errors to ignore\_errors * Removing "skip" decorators for some OpenStack actions tests * Workflow definition updated, when workbook is created * Fail task on publish error * Raise correct exception when a service doesn't exist * Add semantics validation of direct workflow 'join' tasks * .mailmap for pbr AUTHORS upate * Fix two typos * Updated from global requirements * Adding validation of workflow graph * Mistral documentation: CLI operations * Adding 'is\_system' to definition model * Fixing uploading public workflow or action * Fixing DSL documentation * Initial commit that fix py34 tests run * Refactor get\_task\_spec using mechanism of polymorphic DSL entities * get\_action\_list: improve generated JSON output * get\_action\_list: use novaclient.client.Client * Adding test where with-items evaluates 'env' * Fixing indentation in 'create action' tutorial * Minor changes to Mistral docs * Customized response sent in case of fault condition * Fix docstring for the test of the std.email action * Fix order of arguments in assertEqual * Switch to devstack plugin * Updated from global requirements * Fix usage of python-novaclient in Mistral 1.0.0.0b3 --------- * Mistral docs: Upgrade database guide * Mistral terminology: cron-triggers and actions * Add YAQL function 'task' to extract info about task in DSL * Raising exception if there aren't start tasks in direct workflow * Mistral docs terminology: executions * The Link for plugin samples is added * Mistral documentation: mistralclient * Support action\_execution deletion * Use default devstack functional for Mistral user/service/endpoint creation * Fix timing in expired execution unit test * Fix execution update where state\_info is unset * Fix creation of Mistral service and endpoints * Removes unused posix-ipc requirement * Mistral documentation: architecture * Mistral documentation: Quickstart * Updated from global requirements * Small adjustements and fixes for execution expiration policy * Mistral docs terminology: workbooks and workflows * Fixing occasional fail of test\_create\_action\_execution * Adding project\_id to expiration-policy for executions ctx * Fixing 2 typos in comments * Mistral documentation: adding configuration guide * Refactor task controller with new json type * Refactor execution controller with new json type * Refactor environment controller with new json type * Refactor cron trigger controller with new json type * Refactor action execution controller and tests * Fixing working concurrency when value is YAQL * Add fields filter for workflow query * Mistral documentation: adding installation guide * Fix failure in execution pagination functioinal tests * Enabling direct workflow cycles: adding a parallel cycles test * Switching to six module where it's not used yet * Mistral documentation: dashboard installation guide * Mistral documentation: main features * Add resource params to reflect WSME 0.8 fixes * Add schema for additional properties of BaseListSpec * Enabling direct workflow cycles: adding another test * Enabling direct workflow cycles: fixing evaluation of final context * Add config example for rotating logs * Add pagination support for executions query API * Purge executions created during functional testing * Moving to YAQL 1.0 * Fixing cron trigger test * Update the gitingore file and tox.ini * Enabling direct workflow cycles: fixing find\_task\_execution() function * Enabling direct workflow cycles: adding a test that now doesn't pass * Add pagination support for actions query API * Add functional tests for workflow query * Fixed lack of context for triggers * Fixing working with-items and retry together * Implementing with-items concurrency * Add pagination support for workflows query API * Update AUTHORS * Raise user-friendly exception in case of connection failed * Scheduler in HA 1.0.0.0b2 --------- * Fix postgresql unit tests running * Add API for rerunning failed task execution * Remove mistral.conf.sample * Expiration policy for expired Executions * Add Service API * Add coordination feature to mistral service * Mistral documentation: Overview article * Mistral documentation: Initial commit * Complete action on async action invocation failure * Add processed field in task query response * Add one more tox env for running unit tests with postgresql * Add feature to rerun failed WF to the engine interface * Enable workflow to be resumable from errors * Fixing error result in run-action command * Fixing std.http action * Add coordination util for service management * Support large datasets for execution objects * Fixing execution state\_info * Fixing import error in sync\_db.py * Error result: fix std.http action * Error result: doc explaining error result in base action class * Error result: adding more tests * Making / and /v2 URLs allowed without auth * Error result: allow actions to return instance of wf\_utils.Result * Error result: adding a test for error result * Remove explicit requirements.txt occurrence from tox.ini * Remove H803, H305 * Fixing workflow behavior with non-existent task spec * Make update behavior consistent * Drop use of 'oslo' namespace package * Add guidance for updaing openstack action mappings * New mock release(1.1.0) broke unit/function tests * Fixing get task list API * Updating yaql version * Fix cron triggers * Fix mistralclient errors when reinstalling devstack * use task.spec to result always a list for with-items remove redundant 'if' Change-Id: Id656685c45856e628ded2686d1f44dac8aa491de Closes-Bug: #1468419 * Modify run\_tests.sh to support PostgreSQL * Add Mistral service and endpoint registration to README.rst * Fix inappropriate condition for retry policy * Fix invalid workflow completion in case of "join" * No input validation for action with kwargs argument * Delete one more tag 1.0.0.0b1 in devstack script pushed by mistake 1.0.0.0b1 --------- * Removing redundant header from setup.py * Simplifying a few data\_flow methods * Workflow variables: modifying engine so that variables work * Workflow variables: adding "vars" property in to workflow specification * Fixing devstack gate failure * Bug fix with-items tasks should always have result of list type * Set default log level of loopingcall module to 'INFO' * Implementing action\_execution POST API * Implementing 'start\_action' on engine side * Fix wrong zuul\_project name in mistral gate script * Creating action\_handler to separate action functionality * Get rid of openstack/common package * Improving devstack docs * Drop use of 'oslo' namespace package * Fix execution update description error * Fix the inappropriate database initialization in README.rst * Fix stackforge repo refs in devstack/lib/mistral * Fix wrong db connection string in README.rst file * Add description param to execution creation API * Update .gitreview file for project rename * Add description field to executions\_v2 table * Make use of graduated oslo.log module * Implementing 'continue-on' retry policy property * Adding some more constraints to cron trigger * Adding 'continue-on' to retry policy spec * Adding input validation to cron-trigger creation * Fixing execution-update API * Fixing sending the result of subworkflow * Fix command line arguments has lower priority than config file * Make mistral use of oslo-config-generator * Fixing mistral resources path * Fix devstack back to rabbit * Fixing devstack-gate failure * fix: Extra fields in the env definition are allowed * Allow pause-before to override wait-before policy * Adjust docs API to last changes * Fixing YAQL related errors * Skip test on heat action * Removing incorrect 2015.\* tags for client in devstack script * Adding migrations README * Fix dsvm gate failure * Fixing YAQL len() function in Mistral * Adding 'workflow\_params' to cron triggers * Allowing a single string value for "requires" clause * Adding "requires" to "task-defaults" clause * Updating requirements to master * Updating mapping.json to master * Fix bug with action class attributes * Setting base version in setup.cfg for libery cycle * Fix error when getting workflow with default input value * Fix wrong log content * Retry policy one line syntax * Fixing yaql version * Fix yaql error caused by the ply dependency * Fixing action\_executions API * Adding script for retrieving OpenStack action list * Adding tests on 'break-on' of retry policy * Update mapping.json for OpenStack actions * Allowing strings in on-success/on-error/on-complete clauses * Consider input default values in ad-hoc action * Change novaclient import to v2 2015.1.0rc1 ----------- * Add action execution ID to action context * Consider input default values in workflow execution * Removing "policies" keyword from resources * Getting rid of "policies" keyword * Rolling back YAQL to v0.2.4 * Fixing result ordering in 'with-items' * Fixing tags of wf as part of wb * Fixing variable names in db/v2/sqlalchemy/api.py * Fix a logging issue in ssh\_utils * Pin oslo pip requirements * Add YAQL parsing to DSL validation * Fixing engine concurrent issues * Apply input schema to workflow/action input * Add schema for workflow input with default value support * Remove transport from WSGI script * Fixing API 500 errors on Engine side * Fix typo in wf\_v2.yaml * Moving to YAQL 1.0 * Get rid of v1 in installation scripts * Fixing exception type that workbook negative tests expect * Removing v1 related entries from setup.cfg * Renaming "engine1" to "engine" * Fixing DB errors in transport * Removing left v1 related stuff (resources, DSL specs) * Add workbook and workflow validation endpoints * Deleting all v1 related stuff * Fixing docs on target task property in README * Rename 'wf\_db' to 'wf\_def' to keep consistency * Provide 'output' in action\_execution API correctly * Small data\_flow refactoring, added TODOs to think about design * Fixing version info in server title * Fixing 'with-items' with plain input * Add 'keep-result' property to task-spec * Add implicit task access in workflow * Fixing work 'with-items' on empty list * Expanding generators when evaluating yaql expressions * Add mistral-db-manage script * Small refactoring in engine, task handler and workflow utils * Fixing big type column for output and in\_context * Harden v2 DSL schema for validation * Fix bug with redundant task\_id in part of logs * Fixing 'with-items' functionality * Fixing task API (published vars) * Support subclass iteration for Workflow controller 2015.1.0b3 ---------- * Fixing tasks API endpoint * Add action\_execution API * Fixing pause-before policy * Fixing timeout policy * Implementing 'acquire\_lock' method and fixing workflow completion * Fix retry policy * Fixing wait-after policy * Fixing wait-before policy * Trigger remaining-executions and first-exec-date * Refactor task output: full engine redesign * Fix DSL schema in test workbook * Fixing scheduler work * Small refactoring in test\_javascript * Add WSGI script for API server * Fix list of upstream tasks for task with no join * Fixing finishing workflow in case DELAYED task state * Adding validation in policies * Refactor task output: DB API methods for action executions * Refactor task output: 'db\_tasks'->'task\_execs', 'db\_execs'->'wf\_execs' * Refactoring task output: 'task\_db' -> 'task\_ex', 'exec\_db' -> 'wf\_ex' * Refactoring task output: full redesign of DB models * Adding string() YAQL function registered at Mistral level * Fixing published vars for parallel tasks (and join) * Limit WorkflowExecution.state\_info size * Fixing YAQL in policies * Default workflow type to 'direct' * Fix wrong log task changing state * Fix mismatch to new YAQL syntax * Adjust standard actions and workflows * Changing YAQL syntax delimeters * Remove eventlet monkey patch in mistral \_\_init\_\_ * Refactoring task output: renaming DB models for better consistency * Fix OS action client initialization * Expose stop\_workflow in API * Add simple integration tests for OpenStack actions * Fix formatting endpoint urls in OS actions * Fixing a bug in logging logic and small refactoring * Refactoring task output: renaming 'output' to 'result' for task * Refactoring task output: adding ActionInvocation model * Task specification improvement(Part 2) * Add support for auth against keystone on https * Support ssl cert verification on outgoing https * Make spec object more readable in logging * Fix test\_nova\_actions after changes in tempest * Task specification improvement * Renaming \_find\_completed\_tasks to \_find\_successful\_tasks * Adding more tests for parallel tasks publishing * Fixing bug with context publishing of parallel tasks * Fix keystone actions * Fix tempest gate, add tempest import to our script * Fix the wrong project name in run\_tests.sh usage * Track execution and task IDs in WF trace log * Changing InlineYAQLEvaluator: treat only {yaql} as YAQL * Fix H904 pep8 error * Refactoring inline parameters syntax * Add Rally jobs related files to Mistral 2015.1.0b2 ---------- * JavaScript action: part 2 * Allowing multiple hosts for ssh action * Catch workflow errors * Rename environment to env in start\_workflow * Fix action\_context in with\_items * Fix sequential tasks publishing the same variable * fix doc dsl v2 * JavaScript action: part 1 * Apply default to action inputs from environment * Add full support for YAQL expressions * Fixing a data flow bug with parallel tasks * Changing publishing mechanism to allow referencing context variables * Fix 500 error on wrong definition * Pass action error to results * Fixing problem with trust creation * Working on secure DB access (part 4) * Working on secure DB access (part 3) * Working on secure DB access (part 2) * Working on secure DB access (part 1) * Concurrency: part 2 * Adding assertions for "updated\_at" field in DB tests * Fix imports due to changes in tempest * Fixing environment tests * Concurrency: part 1 * Change 'with-items' syntax * Add validation on 'with-items' input * Adding test on calculating multi-array input * Adding more tests for YAQL length() function * Implement workflow execution environment - part 3 * Implement workflow execution environment - part 2 * Implement workflow execution environment - part 1 * Small: remove polluting debug log * Updating YAQL dependency to version 0.2.4 * Update README file with devstack installation instruction * Small: refactor commands * Fix mistralclient initialization * Small fixes in default config * small tox fixes * Using 'with-items' instead of 'for-each' * Fixing README * Implementing "no-op" task * Updating SQLAlchemy dependency 2015.1.0b1 ---------- * Refacor resume algorithm * Implement pause-before * Fixing parsing inline syntax parameters * Fix retry policy unit test * Fixing a bug retry policy * Updates logging configuration samples * Changing target task property to singular form * Add region name to OpenStack client initialization * Fixing for-each * API controllers should log requests at INFO level * Add test case for dataflow to test action input * Refactor for-each * Style changes in launch.py * Testing wait policies defined in "task-defaults" for reverse worklfow * Testing timeout policy defined in "task-defaults" for reverse workflow * Testing retry policy defined in "task-defaults" for reverse workflow * Redesigning engine to move all remote calls from transactions * Working on "join": making "one" join value work (discriminator) * Working on "join": allowed value "one" for "join" property * Add docs on task-affinity and configuring MySQL * Working on "join": removing array type from "join" JSON schema * Working on "join": making "join" trigger only once * Working on "join": adding a test to verify that join triggers once * Working on "join": fixing "partial join" test new "noop" engine command * Working on "join": implementing partial join with numeric cardinality * Modified install docs * Fix creating triggers with the same pattern, wf and wf-input * Working on "join": added a test for numbered partial join * Refactor policies tests * Working on "join": making "full join" work with conditional transitions * Working on "join": making "full join" work with incoming errors * Adding "std.fail" action that always throws ActionException * Adding "std.noop" action (can be useful for testing) * Raise human-readable exception if workflow\_name is not a dict * Working on "join": first basic implementation of full join * Working on "join": add "join" property into task specification * Working on "join": implement basic test for full join * Fix trace with wrong input for action * Fix Application context not found in tests * Add advanced tests on workflow-resume * Make able to resume workflow * Refactor API tests for v2 * Fix creating std actions * Renaming trusts.py to security.py and adding method add\_security\_info * Refactoring workbooks service to be symmetric with other services * Use YAML text instead of JSON in HTTP body * Renaming "commands" to "cmds" in engine to avoid name conflicts * Refactor std.email action * Update README files * Sort executions and tasks by time * Add 'project\_id' to Execution and Task * Fill 'wf\_name' task\_db field * Add cinder actions * Add possibility to pass variables from context to for-each * Implement for-each task property * Updating AUTHORS file * Refactoring getting one object from DB * Fix creating objects with the same names * Add API integration tests for actions 0.1.1 ----- * Construct and pass action\_context to action * Add passing auth info to std.http * Adding print out of server information into launch script * Adding method for authentication based on config keystone properties * Add functional API tests for cron-triggers * Docs fix - small structure fix * Add documentation - part 3 * Add validating of 'for-each' task property DSL * Cut too long task result in log * Cleanup, refactoring and logging * Fixing condition in workflow service * Adding endpoint for cron triggers * Refactoring workflow and action services * Implementing cron cron trigger v2 * Adding DB model and DB api methods for cron triggers * Provide workflow input via API * Add for-each to task spec * Now collections in the DB sorted by name * Create standard workflows and actions * Fixing order of commands and tasks in direct workflow * Fix task-defaults correct work * Removing saving raw action/workflow result under 'task.taskName' * Making YAQL function length() work for generators * Fixing a bug in inline expressions * Adding length() YAQL function * Whitelist binary 'rm' in tox.ini * Add adding target via YAQL * Add simple task affinity feature * Fixing dsl v2 unit test * Refactoring action service * Use keystonemiddleware in place of keystoneclient * Add generating parameters for openstack-actions * Provide action-input via API * Fix dataflow work * Add documentation - part 2 * Add documentation - part 1 * Update tearDown methods in API integration tests * Use $(COMMAND) instead of \`COMMAND\` * Making execution context immutable * Add workflow trace logging in engine v2 * Fix scheduler test * Fix providing 'is\_system' property in /actions * Fix tasks in order of execution * Stop using intersphinx * Style changes in Scheduler and its tests * Add script to run functional tests locally * Adding 'tags' to action rest resource * Modifying workflow and action services to save 'tags' * Adding 'tags' to workflow and action specs * Cleaning up obsolete TODOs and minor style changes * Update requirements due to global requirements (master) * Fix API tests for v2 version 0.1 --- * Style changes in policies and commands * Fix race conditions in policies * Fix workbook and workflow models * Implementing policies in task-defaults property * Add timeout policy * Implementing 'task-defaults' workflow property * Cosmetic changes in actions service * Making action controller able to handler multiple actions * Making workflow endpoint able to upload multiple workflows * Fixing v2 workbooks controller not to deal with 'name' * Modifying workbook service to infer name and tags from definition * Adding 'name' to reverse\_workflow.yaml workbook * Add workflow service module * Fix providing result (task-update API) * Add param 'name' to the test definition * Adding 'name' and 'tags' into workbook spec * Cosmetic changes in executions v2 controller and tests * Removing obsolete code related to ad-hoc actions * Renaming 'parameters' to 'input' everywhere * Cosmetic changes in Data Flow and commands * Fix passing params to execution * Fix dataflow work * Adding workflow parameters validation * Removing engine redundant parameter * Adding tests for order of engine instructions * Fixing db properties for testing purposes * Add API integration tests for v2 * Trivial: improve ad-hoc action test * Fix input on execution create * Fixing task/workflow specs to do transformations with 'on-XXX' once * Fixing v2 specs so 'on-XXX' clauses return lists instead of dicts * Improving exceptions for OpenStack actions * Getting rid of explicit 'start-task' property in workflow DSL * Implementing workflow 'on-task-XXX' clauses * Fix wrong passing parameter 'workflow\_input' * Fixing workflow specification to support 'on-task-XXX' clauses * Fixing workflow handlers to return all possible commands * Refactoring engine using abstraction of command * Delete explicit raising DBError from transaction * Fixing passing raw\_result in v1 * Register v2 API on keystone by default * Renaming 'stop\_workflow' to 'pause\_workflow' * Adding unit for tests engine instructions * Fixing task v2 specification * Fix run workflow in case task state == ERROR * Fixed retry-policy optional 'break-on' property * Fix workflow update API * Add mechanism for generation action parameters * Implement short syntax for passing base-parameters into adhoc-action * Changing all DSL keywords to lower case * Additional testing of reverse workflow * Pass output from task API to convey\_task\_result * Moving all API tests under 'mistral.tests.unit' package * Fixing workbook definition upload for v1 * Add check on config file in sync\_db script * Fixed Execution WSME model and to\_dict() * Saving description from definition in actions endpoint * Fixing workflows controller to fill 'spec' property based on definition * Adding actions endpoint * Cosmetic changes * Fixing engine to support adhoc actions * Fixing workbook service to create actions * Implement wait-after policy and retry * Add test on passing expressions to parameters * Fixed Engine v2 work on fake RPC backend * Add posibillity to use different types in task parameters * Adding necessary DB API methods for actions * Creating ad-hoc actions engine test * Removing obsolete namespace related methods from task v2 spec * Fixing subworkflow resolution algorithm * Removing 'workflow\_parameters' from workflow spec * Switching to using 'with db\_api.transaction()' * Removing redundant parameters from methods of policies * Add 'description' field to specifications * Add serializers to scheduler call * Implement Wait-before policy * Refactoring engine to build and call task policies * Provide executor info about action * Create action\_factory without access to DB * Delete code related to Namespaces * Change instruction how to start Mistral * Dividing get\_action\_class on two separate methods * Rename action\_factory to action\_manager * Modify action\_factory to store actions in DB * Work toward Python 3.4 support and testing * Renaming 'on-finish' to 'on-complete' in task spec * Adding "wait-before" and "wait-after" to task policies * Fixing workflow spec to return start task spec instead its name * Including "policies" into task spec * Adjusting policy interfaces * Renaming 'workflow\_parameters' to 'workflow-parameters' * Small optimizations and fixes * Fixing processing subworkflow result * Renaming 'class' to 'base' in action spec * Renaming 'start\_task' to 'start-task' in workflow spec * Fix execution state ERROR if task\_spec has on-finish * Additional changes in Delayed calls * Fixing services/workbooks.py to use create\_or\_update\_workflow() * Implement REST API v2.0 * Adding new methods to DB API v2 (load\_xxx and create\_or\_update\_xxx) * Adding unit tests for workflow DB model * Add service for delayed calls * Improving services/workbooks * Removing obsolete db.api.py module in favor of db.v1.api.py * Introducing 'workflow' as an individual entity * Removing 'Namespaces' section from DSL * Renaming 'linear' workflow to 'direct' * Implementing task execution infrastructure * Add two more tests which check workflow execution * Small updates to devstack integration * Adding transaction context manager function for db transactions * Fail workflow if any task fails * Fixing validation for action specifications ('output' property) * Working on linear workflow: on\_task\_result() * Working on linear workflow: start\_workflow() * Working on engine implementation: on\_task\_result() * Renaming base class for Mistral DB models * Working on engine implementation: start\_workflow() * Fix small issues in tests * Cosmetic changes in integration tests * Rename resource directory * Add integration test on Glance Action * Add test on Keystone Action * Add integration tests on nova actions * Add tests which check task dependencies * Move gate tests under mistral/tests * Add neutron actions * Small fixes in openstack-actions * Moving TaskResult and states to 'workflow' package * Adding implementation of method \_\_repr\_\_ for DB models * Working on reverse workflow: on\_task\_result() * Working on reverse workflow: implementing method start\_workflow() * Replacing NotImplemented with NotImplementedError * Working on reverse workflow: fixing specification version injection * Unit tests for v2 DB model * Refactoring DB access layer * Implementing DSL specification v2 (partially) * Add heat actions * Add openstack actions * Switching from dicts to regular objects in DB API * Initial commit for the new engine * Fix mistral gate job * Replace oslo-incubator's db with standalone oslo.db * Move oslotest into test-requirements.txt * Calculate context for tasks with dependencies * Cleaning up index.rst file * The schedule triggers need to setup admin context before run * Add running mistralclient integrational tests * Make executor able to work in isolated environment * Add installation of mistralclient in devstack script * Make plugins easier to use * Update requirements due to global-requirements * Fixing Mistral HTTP action to take care of empty headers * Log action failures and exceptions 0.0.4 ----- * Fixing wrong access to Mistral security context in engine * Make OpenStack related data available in actions * Add project\_id to the workbook and filter by it * Make sure the context is correctly passed through the rpc * Add Executions and Tasks root API endpoints * Removing obsolete folder "scripts" * Remove redundant convey\_task\_results arguments * Remove redundant DB API arguments * 'requires' should take a string or list * Fix get task list of nonexistent execution * Favor addCleanup() over tearDown() * Make sure the api tests get a valid context * Fix Hacking rule H306 and H307 * Fix hacking rule H236 * Fix Hacking rule H302 (import only modules) * Expose Task's output and parameters through API * Make the service\_type more consistent * Switch from unittest2 to oslotest(testtools) * Fix hacking rules H101 and E265 * Temporarily disable the new hacking rules * Renaming all example config files from \*.conf.example to \*.conf.sample * Fixing obsolete file name in README.rst * Fix devstack gate * Add upload definition action in test * Do a better job of quietening the logs * All tests should call the base class setUp() * Move all tests to use base.BaseTest * Add OS\_LOG\_CAPTURE to testr.conf * Fix create execution when workbook does not exist * Fix getting action\_spec in create tasks * Added information about automated tests * Refactor test\_task\_retry to not rely on start\_task * Clean up configuration settings * Refactor test\_engine to not rely on start\_task * Fix update nonexistent task * Fix get execution list when workbook does not exist * Fix keystone config group for trust creation * fix mistral devstack scripts * Fix bug with getting nonexistent task * Fix duplicate keystone auth\_token config options * Move tests to testr * Add negative functional tests * Add new tests for executions and tasks * Add lockutils to openstack/common * Implement new mistral tests * Remove unneccesary oslo modules * Making "Namespaces" section truly optional * Restore script update\_env\_deps in tools * Fix devstack integration scripts * Remove unused function get\_state\_by\_http\_status\_code * Sync code with oslo-incubator * Small engine bugfixing/refactoring * Make field 'Namespaces' optional * Add support for plugin actions * Add autogenerated API documentation * Adding docstring for HTTPAction class * Renaming 'events' to 'triggers' * Adding more http standard action parameters * Fix H404 multi line docstring should start without a leading new line * Fix H233 Python 3.x incompatible use of print operator * Fix pep H301 one import per line * Fix pep H231 Python 3.x incompatible 'except x,y:' construct * Fix pep H402 one line docstring needs punctuation * Fix pep H201 no 'except:' at least use 'except Exception:' * Fix pep E226 missing whitespace around arithmetic operator * Add hacking to the flake8 tests * Add/Fix all error handling mechanism on REST API * Fix url in "get workbook definition" test * Cleanup exceptions and add http code * Throwing an error when workbook validation fails * Throw NotFoundException when object not found * Fix creating trust on workbook creation * Allow launch script to start any combination of servers * Fix 500 status code on DELETE request * Fix issue with tempest tests * Task.publish now is processed as arbitrary structure * Fix demo.yaml example in tempest tests * Add test on arbitrary output dict in action * Fix mistral tests * Update mistral tests * Context contains results of all previous tasks now 0.0.2 ----- * Fixed issue with tarballs 0.0.1 ----- * Refactor engine to use plugins * Fixes list of requirements * Fixes README.rst formatting * Making workflow trace logging more consistent * Added Devstack integration * Fixing setup.cfg * Fix work on MySQL backend * Replace rabbit config to 'default' section * Additional workflow trace logging in abstract\_engine.py * Fixing wrong comparison in retry.py * Engine as a standalone process * Improved README file * Fix evaluating task parameters * Adding all conf files in etc/ to .gitignore * Fix broken retry tests * Remove etc/logging.conf * Add workflow logging * Fixing inline expressions evaluation * Making execution data available in data flow context * Fixing initialization of variable 'action\_spec' in abstract\_engine.py * Remove redundant update task operation * Fix convert params and result in AdHocAction * Adding parameters to adhoc action namespaces * Removing 'base\_output' from ad-hoc actions specification * Temporarily commenting assertions in task retry tests * Temporarily commenting assertions in task retry tests * Fix result of HTTP action * Fix returning ERROR task state * Fixing http action and abstract engine * Moving expressions.py out of package 'engine' * Change repeater to retry on error * BP mistral-actions-design (raw action spec -> ActionSpec) * BP mistral-actions-design (removing old actions, addressing previous comments) * BP mistral-actions-design (add SSH action) * BP mistral-actions-design (switch to new design) * BP mistral-actions-design (ad-hoc actions in factory) * BP mistral-actions-design (ad-hoc action) * BP mistral-actions-design (Mistral HTTP action) * BP mistral-actions-design (action creation) * BP mistral-actions-design (add new actions package) * BP mistral-actions-design * Add SSH Action * Remove local engine * Fix repeatable task scheduling * Add resolving inline expressions * Cosmetic change * Fixed issue with deprecated exception * Fix minor issues * Fix keystone trust client * Add extracting action output * Refactor the local engine to use an in process executor * Implements: blueprint mistral-std-repeat-action * Correct fake action test name * Remove unneeded declarations in unit tests * Add keystone auth\_token in context * Fix keystone config group name * Add script to allow update dependencies in all envs * Fixing ordering bugs in local engine tests * Fixing ordering bugs in workbook model and tests * Fixing executor launch script * Fix getting task on-\* properties * Rename 'events' to 'triggers' * Implement new object-model specification * Use oslo.messaging for AMQP communications * Working on Data Flow (step 5) * Working on Data Flow (step 4) * Working on Data Flow (step 3) * Make engine configurable, make debugger show local variables * Partially fixed the pylint errors * Fix throwing exception when 'output' block is not defined * Fixed critical pylint warnings * Working on Data Flow (step 2) * Working on Data Flow (step 1) * Add scheduling specific task on sucess/error * Send email action, part 2 * Rename "target\_task" to "task" * Send email action, step 1 * Add negative tests to api * Fixing access to task "parameters" property in DSL * Fix getting task on-\* properties in DSL * Fix task keys properties in DSL parser * Add YAQL expression evaluation * Modified Rest action for process 'input' property * Add sync task execution * Fixing and refactoring authentication * Deleting client and demo app from main Mistral repo * Fixed issue with tarballs * Add integration tests * Divide RestAction on two separated actions in DSL * Add new access methods in DSL parser * Refactoring local and scalable engines * Adding Data Flow related code to REST API * Fixing typo in an exception message in Mistral client * Step 2 refactoring Mistral engines * Step 1 refactoring Mistral engines * Fix exceptions output when creating object in DB * Add SQLAlchemy in requirements.txt * Fix DB API import in scheduler.py * Implement single (non-scalable) engine * Fixing scheduler transactions * Fixing workbook events creation * Fixing flak8 excludes in tox.ini * Adjusting all license headers in python files so they look the same * Adding license and authors file * context creation in periodic task to execute workbook * Fix workbook POST duplicate exception * Add demo app for Mistral * Fix client for further patching * Added trust for workbook runs * Updating README.md file * Fixing scalable engine algorithm * Fixing scripts' headers to make them executable * Various fixes related to end-to-end testing * Fix resolving dependencies in workflow * Add explicit DB transaction management * Added context for application * Added keystone token authorization * Fix periodic tasks running over engine * Add engine related features * Implementing scalable Mistral Engine * Add DSL parser * Implementing Mistral Rest API Client * Add SQLAlchemy models and access methods * Connect DB implementation with DB interface * Added periodic events * Working on REST API * Added initial database setup * Adding REST API application skeleton based on pecan/wsme * Adding pecan, wsme, oslo and adjusting packages * Modify use case example * Fixing licence in setyp.py * Add example of using taskflow * Add .gitreview, setup.py and other infrastructure * Adding .gitignore * Adding virtual environment tools * Adjusting project name in readme file * Initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/HACKING.rst0000644000175000017500000000110600000000000015707 0ustar00coreycorey00000000000000Style Commandments ================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ Mistral Specific Commandments ----------------------------- - [M001] Use LOG.warning(). LOG.warn() is deprecated. - [M319] Enforce use of assertTrue/assertFalse - [M320] Enforce use of assertIs/assertIsNot - [M327] Do not use xrange(). xrange() is not compatible with Python 3. Use range() or six.moves.range() instead. - [M328] Python 3: do not use dict.iteritems. - [M329] Python 3: do not use dict.iterkeys. - [M330] Python 3: do not use dict.itervalues. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/LICENSE0000644000175000017500000002363600000000000015132 0ustar00coreycorey00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1815686 mistral-10.0.0.0b3/PKG-INFO0000644000175000017500000000442600000000000015216 0ustar00coreycorey00000000000000Metadata-Version: 1.1 Name: mistral Version: 10.0.0.0b3 Summary: Mistral Project Home-page: https://docs.openstack.org/mistral/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: Apache License, Version 2.0 Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/mistral.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Mistral ======= Workflow Service integrated with OpenStack. This project aims to provide a mechanism to define tasks and workflows in a simple YAML-based language, manage and execute them in a distributed environment. Project Resources ----------------- * `Mistral Official Documentation `_ * `User Documentation `_ * `Administrator Documentation `_ * `Developer Documentation `_ * Project status, bugs, and blueprints are tracked on `Launchpad `_ * CloudFlow: visualization tool for workflow executions on https://github.com/nokia/CloudFlow * Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/mistral/ * Source for the project can be found at: https://opendev.org/openstack/mistral Platform: UNKNOWN Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/README.rst0000644000175000017500000000242200000000000015602 0ustar00coreycorey00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/mistral.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Mistral ======= Workflow Service integrated with OpenStack. This project aims to provide a mechanism to define tasks and workflows in a simple YAML-based language, manage and execute them in a distributed environment. Project Resources ----------------- * `Mistral Official Documentation `_ * `User Documentation `_ * `Administrator Documentation `_ * `Developer Documentation `_ * Project status, bugs, and blueprints are tracked on `Launchpad `_ * CloudFlow: visualization tool for workflow executions on https://github.com/nokia/CloudFlow * Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/mistral/ * Source for the project can be found at: https://opendev.org/openstack/mistral ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0695665 mistral-10.0.0.0b3/api-ref/0000755000175000017500000000000000000000000015436 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0815666 mistral-10.0.0.0b3/api-ref/source/0000755000175000017500000000000000000000000016736 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/conf.py0000644000175000017500000001032100000000000020232 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import sys on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinxcontrib.autohttp.flask', 'sphinxcontrib.pecanwsme.rest', ] if not on_rtd: extensions.append('oslosphinx') wsme_protocols = ['restjson'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Workflow Service API Reference' copyright = u'2017, Mistral Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. from mistral.version import version_info release = version_info.release_string() version = version_info.version_string() # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_static_path = ['_static'] if on_rtd: html_theme_path = ['.'] html_theme = 'sphinx_rtd_theme' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['mistral.'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.check_output( git_cmd).decode('utf-8') # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'Mistral API Reference' # Custom sidebar templates, maps document names to template names. html_sidebars = { 'index': [ 'sidebarlinks.html', 'localtoc.html', 'searchbox.html', 'sourcelink.html' ], '**': [ 'localtoc.html', 'relations.html', 'searchbox.html', 'sourcelink.html' ] } # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'mistral', u'Mistral', [u'OpenStack Foundation'], 1) ] # If true, show URL addresses after external links. man_show_urls = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/index.rst0000644000175000017500000000014100000000000020573 0ustar00coreycorey00000000000000=============================== OpenStack Workflow Service APIs =============================== ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0815666 mistral-10.0.0.0b3/api-ref/source/v2/0000755000175000017500000000000000000000000017265 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/v2/action.inc0000644000175000017500000000000000000000000021223 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/v2/cron-trigger.inc0000644000175000017500000000000000000000000022350 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/v2/execution.inc0000644000175000017500000000000000000000000021751 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/v2/task.inc0000644000175000017500000000000000000000000020710 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/v2/workbook.inc0000644000175000017500000000000000000000000021603 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/api-ref/source/v2/workflow.inc0000644000175000017500000000000000000000000021620 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/bindep.txt0000644000175000017500000000044300000000000016116 0ustar00coreycorey00000000000000# This is a cross-platform list tracking distribution packages needed by tests; # see http://docs.openstack.org/infra/bindep/ for additional information. mysql-client [platform:dpkg] mysql-server [platform:dpkg] postgresql postgresql-client [platform:dpkg] postgresql-server [platform:rpm] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0815666 mistral-10.0.0.0b3/devstack/0000755000175000017500000000000000000000000015717 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/devstack/README.rst0000644000175000017500000000112500000000000017405 0ustar00coreycorey00000000000000============================ Enabling Mistral in Devstack ============================ 1. Download DevStack:: git clone https://github.com/openstack-dev/devstack.git cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: > cat local.conf [[local|localrc]] enable_plugin mistral https://github.com/openstack/mistral To use stable branches, make sure devstack is on that branch, and specify the branch name to enable_plugin, for example:: enable_plugin mistral https://github.com/openstack/mistral stable/pike 3. run ``stack.sh`` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0855668 mistral-10.0.0.0b3/devstack/files/0000755000175000017500000000000000000000000017021 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/devstack/files/apache-mistral-api.template0000644000175000017500000000151100000000000024215 0ustar00coreycorey00000000000000Listen %PUBLICPORT% WSGIDaemonProcess mistral-api processes=%API_WORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup mistral-api WSGIScriptAlias / %MISTRAL_BIN_DIR%/mistral-wsgi-api WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On AllowEncodedSlashes On = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/mistral_api.log CustomLog /var/log/%APACHE_NAME%/mistral_api_access.log combined %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% = 2.4> Require all granted Order allow,deny Allow from all ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/devstack/plugin.sh0000755000175000017500000002243500000000000017562 0ustar00coreycorey00000000000000# ``stack.sh`` calls the entry points in this order: # # install_mistral # install_python_mistralclient # configure_mistral # start_mistral # stop_mistral # cleanup_mistral # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # Defaults # -------- # Support entry points installation of console scripts if [[ -d $MISTRAL_DIR/bin ]]; then MISTRAL_BIN_DIR=$MISTRAL_DIR/bin else MISTRAL_BIN_DIR=$(get_python_exec_prefix) fi # Toggle for deploying Mistral API under HTTPD + mod_wsgi MISTRAL_USE_MOD_WSGI=${MISTRAL_USE_MOD_WSGI:-True} MISTRAL_FILES_DIR=$MISTRAL_DIR/devstack/files # create_mistral_accounts - Set up common required mistral accounts # # Tenant User Roles # ------------------------------ # service mistral admin function create_mistral_accounts { if ! is_service_enabled key; then return fi create_service_user "mistral" "admin" get_or_create_service "mistral" "workflowv2" "Workflow Service v2" get_or_create_endpoint "workflowv2" \ "$REGION_NAME" \ "$MISTRAL_SERVICE_PROTOCOL://$MISTRAL_SERVICE_HOST:$MISTRAL_SERVICE_PORT/v2" \ "$MISTRAL_SERVICE_PROTOCOL://$MISTRAL_SERVICE_HOST:$MISTRAL_SERVICE_PORT/v2" \ "$MISTRAL_SERVICE_PROTOCOL://$MISTRAL_SERVICE_HOST:$MISTRAL_SERVICE_PORT/v2" } function mkdir_chown_stack { if [[ ! -d "$1" ]]; then sudo mkdir -p "$1" fi sudo chown $STACK_USER "$1" } # Entry points # ------------ # configure_mistral - Set config files, create data dirs, etc function configure_mistral { # create and clean up auth cache dir mkdir_chown_stack "$MISTRAL_AUTH_CACHE_DIR" rm -f "$MISTRAL_AUTH_CACHE_DIR"/* mkdir_chown_stack "$MISTRAL_CONF_DIR" # Generate Mistral configuration file and configure common parameters. oslo-config-generator --config-file $MISTRAL_DIR/tools/config/config-generator.mistral.conf --output-file $MISTRAL_CONF_FILE iniset $MISTRAL_CONF_FILE DEFAULT debug $MISTRAL_DEBUG # Run all Mistral processes as a single process iniset $MISTRAL_CONF_FILE DEFAULT server all # Mistral Configuration #------------------------- # Setup keystone_authtoken section configure_auth_token_middleware $MISTRAL_CONF_FILE mistral $MISTRAL_AUTH_CACHE_DIR iniset $MISTRAL_CONF_FILE keystone_authtoken www_authenticate_uri $KEYSTONE_AUTH_URI_V3 # Setup RabbitMQ credentials iniset_rpc_backend mistral $MISTRAL_CONF_FILE # Configure the database. iniset $MISTRAL_CONF_FILE database connection `database_connection_url mistral` iniset $MISTRAL_CONF_FILE database max_overflow -1 iniset $MISTRAL_CONF_FILE database max_pool_size 1000 # Configure action execution deletion policy iniset $MISTRAL_CONF_FILE api allow_action_execution_deletion True # Don't use the default 0.0.0.0 it's good only for ipv4 iniset $MISTRAL_CONF_FILE api host $(ipv6_unquote $MISTRAL_SERVICE_HOST) if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then setup_colorized_logging $MISTRAL_CONF_FILE DEFAULT tenant user fi if [ "$MISTRAL_RPC_IMPLEMENTATION" ]; then iniset $MISTRAL_CONF_FILE DEFAULT rpc_implementation $MISTRAL_RPC_IMPLEMENTATION fi if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then _config_mistral_apache_wsgi fi if [[ ! -z "$MISTRAL_COORDINATION_URL" ]]; then iniset $MISTRAL_CONF_FILE coordination backend_url "$MISTRAL_COORDINATION_URL" elif is_service_enabled etcd3; then iniset $MISTRAL_CONF_FILE coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT" fi } # init_mistral - Initialize the database function init_mistral { # (re)create Mistral database recreate_database mistral utf8 $PYTHON $MISTRAL_DIR/tools/sync_db.py --config-file $MISTRAL_CONF_FILE } # install_mistral - Collect source and prepare function install_mistral { setup_develop $MISTRAL_DIR # installing python-nose. real_install_package python-nose if is_service_enabled horizon; then _install_mistraldashboard fi if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi fi } function _install_mistraldashboard { git_clone $MISTRAL_DASHBOARD_REPO $MISTRAL_DASHBOARD_DIR $MISTRAL_DASHBOARD_BRANCH setup_develop $MISTRAL_DASHBOARD_DIR ln -fs $MISTRAL_DASHBOARD_DIR/mistraldashboard/enabled/_50_mistral.py $HORIZON_DIR/openstack_dashboard/local/enabled/_50_mistral.py } function install_mistral_pythonclient { if use_library_from_git "python-mistralclient"; then git_clone_by_name "python-mistralclient" setup_dev_lib "python-mistralclient" sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-mistralclient"]}/tools/,/etc/bash_completion.d/}mistral.bash_completion fi } function install_mistral_lib { if use_library_from_git "mistral-lib"; then git_clone $MISTRAL_LIB_REPO $MISTRAL_LIB_DIR $MISTRAL_LIB_BRANCH setup_develop $MISTRAL_LIB_DIR fi } function install_mistral_extra { if use_library_from_git "mistral-extra"; then git_clone $MISTRAL_EXTRA_REPO $MISTRAL_EXTRA_DIR $MISTRAL_EXTRA_BRANCH setup_develop $MISTRAL_EXTRA_DIR fi } # start_mistral - Start running processes function start_mistral { # If the site is not enabled then we are in a grenade scenario local enabled_site_file enabled_site_file=$(apache_site_config_for mistral-api) if is_service_enabled mistral-api && is_service_enabled mistral-engine && is_service_enabled mistral-executor && is_service_enabled mistral-event-engine ; then echo_summary "Installing all mistral services in separate processes" if [ -f ${enabled_site_file} ] && [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then enable_apache_site mistral-api restart_apache_server else run_process mistral-api "$MISTRAL_BIN_DIR/mistral-server --server api --config-file $MISTRAL_CONF_DIR/mistral.conf" fi run_process mistral-engine "$MISTRAL_BIN_DIR/mistral-server --server engine --config-file $MISTRAL_CONF_DIR/mistral.conf" run_process mistral-executor "$MISTRAL_BIN_DIR/mistral-server --server executor --config-file $MISTRAL_CONF_DIR/mistral.conf" run_process mistral-event-engine "$MISTRAL_BIN_DIR/mistral-server --server event-engine --config-file $MISTRAL_CONF_DIR/mistral.conf" else echo_summary "Installing all mistral services in one process" run_process mistral "$MISTRAL_BIN_DIR/mistral-server --server all --config-file $MISTRAL_CONF_DIR/mistral.conf" fi } # stop_mistral - Stop running processes function stop_mistral { local serv for serv in mistral mistral-engine mistral-executor mistral-event-engine; do stop_process $serv done if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then disable_apache_site mistral-api restart_apache_server else stop_process mistral-api fi } function configure_tempest_for_mistral { if is_service_enabled tempest; then iniset $TEMPEST_CONFIG mistral_api service_api_supported True fi } function cleanup_mistral { if is_service_enabled horizon; then _mistral_cleanup_mistraldashboard fi if [ "$MISTRAL_USE_MOD_WSGI" == "True" ]; then _mistral_cleanup_apache_wsgi fi sudo rm -rf $MISTRAL_CONF_DIR } function _mistral_cleanup_mistraldashboard { rm -f $HORIZON_DIR/openstack_dashboard/local/enabled/_50_mistral.py } function _mistral_cleanup_apache_wsgi { sudo rm -f $(apache_site_config_for mistral-api) } # _config_mistral_apache_wsgi() - Set WSGI config files for Mistral function _config_mistral_apache_wsgi { local mistral_apache_conf mistral_apache_conf=$(apache_site_config_for mistral-api) local mistral_ssl="" local mistral_certfile="" local mistral_keyfile="" local mistral_api_port=$MISTRAL_SERVICE_PORT local venv_path="" sudo cp $MISTRAL_FILES_DIR/apache-mistral-api.template $mistral_apache_conf sudo sed -e " s|%PUBLICPORT%|$mistral_api_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%MISTRAL_BIN_DIR%|$MISTRAL_BIN_DIR|g; s|%API_WORKERS%|$API_WORKERS|g; s|%SSLENGINE%|$mistral_ssl|g; s|%SSLCERTFILE%|$mistral_certfile|g; s|%SSLKEYFILE%|$mistral_keyfile|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g " -i $mistral_apache_conf } if is_service_enabled mistral; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing mistral" install_mistral install_mistral_lib install_mistral_extra install_mistral_pythonclient elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring mistral" create_mistral_accounts configure_mistral elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing mistral" init_mistral start_mistral elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then echo_summary "Configuring Tempest for Mistral" configure_tempest_for_mistral fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting down mistral" stop_mistral fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning mistral" cleanup_mistral fi fi # Restore xtrace $XTRACE # Local variables: # mode: shell-script # End: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/devstack/settings0000644000175000017500000000377100000000000017512 0ustar00coreycorey00000000000000# Devstack settings # We have to add Mistral to enabled services for run_process to work # "mistral" should be always enabled # To run services in separate processes and screens need to write: # enable_service mistral mistral-api mistral-engine mistral-executor # To run all services in one screen as a one process need to write: # enable_service mistral # All other combinations of services like 'mistral mistral-api' or 'mistral mistral-api mistral-engine' # is an incorrect way to run services and all services by default will run in one screen enable_service mistral mistral-api mistral-engine mistral-executor mistral-event-engine # Set up default repos MISTRAL_REPO=${MISTRAL_REPO:-${GIT_BASE}/openstack/mistral.git} MISTRAL_BRANCH=${MISTRAL_BRANCH:-master} MISTRAL_DASHBOARD_REPO=${MISTRAL_DASHBOARD_REPO:-${GIT_BASE}/openstack/mistral-dashboard.git} MISTRAL_DASHBOARD_BRANCH=${MISTRAL_DASHBOARD_BRANCH:-master} MISTRAL_LIB_REPO=${MISTRAL_LIB_REPO:-${GIT_BASE}/openstack/mistral-lib.git} MISTRAL_LIB_BRANCH=${MISTRAL_LIB_BRANCH:-master} MISTRAL_LIB_DIR=${DEST}/mistral-lib MISTRAL_EXTRA_REPO=${MISTRAL_EXTRA_REPO:-${GIT_BASE}/openstack/mistral-extra.git} MISTRAL_EXTRA_BRANCH=${MISTRAL_EXTRA_BRANCH:-master} MISTRAL_EXTRA_DIR=${DEST}/mistral-extra GITDIR["python-mistralclient"]=${DEST}/python-mistralclient GITREPO["python-mistralclient"]=${MISTRALCLIENT_REPO:-${GIT_BASE}/openstack/python-mistralclient.git} GITBRANCH["python-mistralclient"]=${MISTRALCLIENT_BRANCH:-master} # Set up default directories MISTRAL_DIR=${DEST}/mistral MISTRAL_DASHBOARD_DIR=${DEST}/mistral-dashboard MISTRAL_CONF_DIR=${MISTRAL_CONF_DIR:-/etc/mistral} MISTRAL_CONF_FILE=${MISTRAL_CONF_DIR}/mistral.conf MISTRAL_DEBUG=${MISTRAL_DEBUG:-True} MISTRAL_AUTH_CACHE_DIR=${MISTRAL_AUTH_CACHE_DIR:-/var/cache/mistral} MISTRAL_SERVICE_HOST=${MISTRAL_SERVICE_HOST:-$SERVICE_HOST} MISTRAL_SERVICE_PORT=${MISTRAL_SERVICE_PORT:-8989} MISTRAL_SERVICE_PROTOCOL=${MISTRAL_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} MISTRAL_ADMIN_USER=${MISTRAL_ADMIN_USER:-mistral} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0855668 mistral-10.0.0.0b3/doc/0000755000175000017500000000000000000000000014660 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/requirements.txt0000644000175000017500000000035400000000000020146 0ustar00coreycorey00000000000000sphinx>=1.8.0,!=2.1.0,!=3.0.0;python_version>='3.4' # BSD sphinxcontrib-httpdomain>=1.3.0 # BSD sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0 openstackdocstheme>=1.30.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0855668 mistral-10.0.0.0b3/doc/source/0000755000175000017500000000000000000000000016160 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0855668 mistral-10.0.0.0b3/doc/source/admin/0000755000175000017500000000000000000000000017250 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/architecture.rst0000644000175000017500000000431100000000000022463 0ustar00coreycorey00000000000000==================== Mistral Architecture ==================== Basic concepts ~~~~~~~~~~~~~~ A few basic concepts that one has to understand before going through the Mistral architecture are given below: * Workflow - consists of tasks (at least one) describing what exact steps should be made during workflow execution. * Task - an activity executed within the workflow definition. * Action - work done when an exact task is triggered. Mistral components ~~~~~~~~~~~~~~~~~~ Mistral is composed of the following major components: * API Server * Engine * Task Executors * Scheduler * Notifier * Persistence The following diagram illustrates the architecture of mistral: .. image:: img/mistral_architecture.png API server ---------- The API server exposes REST API to operate and monitor the workflow executions. Engine ------ The Engine picks up the workflows from the workflow queue. It handles the control and dataflow of workflow executions. It also computes which tasks are ready and places them in a task queue. It passes the data from task to task, deals with condition transitions, etc. Task Executors -------------- The Task Executor executes task Actions. It picks up the tasks from the queue, run actions, and sends results back to the engine. Scheduler --------- The scheduler stores and executes delayed calls. It is the important Mistral component since it interacts with engine and executors. It also triggers workflows on events (e.g., periodic cron event) Notifier -------- On workflow and task execution, events are emitted at certain checkpoints such as when a workflow execution is launched or when it is completed. The notifier routes the events to configured publishers. The notifier can either be configured to execute locally on the workflow engine or can be run as a server much like the remote executor server and listens for events. Running the notifier as a remote server ensures the workflow engine quickly unblocks and resumes work. The event publishers are custom plugins which can write the event to a webhook over HTTP, an entry in a log file, a message to Zaqar, and etc. Persistence ----------- The persistence stores workflow definitions, current execution states, and past execution results. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0855668 mistral-10.0.0.0b3/doc/source/admin/configuration/0000755000175000017500000000000000000000000022117 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/configuration/config-guide.rst0000644000175000017500000001502700000000000025216 0ustar00coreycorey00000000000000Mistral Configuration Guide =========================== Mistral configuration is needed for getting Mistral work correctly either with real OpenStack environment or without OpenStack environment. **NOTE:** Most of the following operations should be performed in mistral directory. #. Generate *mistral.conf* (if it does not already exist):: $ oslo-config-generator \ --config-file tools/config/config-generator.mistral.conf \ --output-file /etc/mistral/mistral.conf #. Edit file **/etc/mistral/mistral.conf**. #. **If you are not using OpenStack, skip this item.** Provide valid keystone auth properties:: [keystone_authtoken] www_authenticate_uri = http://keystone1.example.com:5000/v3 identity_uri = http:// admin_password = admin_tenant_name = #. Mistral can be also configured to authenticate with Keycloak server via OpenID Connect protocol. In order to enable Keycloak authentication, the following section should be in the config file:: auth_type = keycloak-oidc [keycloak_oidc] auth_url = https://:/auth Property ``auth_type`` is assigned to ``keystone`` by default. If SSL/TLS verification needs to be disabled then ``insecure = True`` should also be added under ``[keycloak_oidc]`` group. #. If you want to configure SSL for Mistral API server, provide following options in config file:: [api] enable_ssl_api = True [ssl] ca_file = cert_file = key_file = #. **If you don't use OpenStack or you want to disable authentication for the Mistral service**, provide ``auth_enable = False`` in the config file:: [pecan] auth_enable = False #. **If you are not using OpenStack, skip this item**. Register Mistral service and Mistral endpoints on Keystone:: $ MISTRAL_URL="http://[host]:[port]/v2" $ openstack service create workflowv2 --name mistral \ --description 'OpenStack Workflow service' $ openstack endpoint create workflowv2 public $MISTRAL_URL $ openstack endpoint create workflowv2 internal $MISTRAL_URL $ openstack endpoint create workflowv2 admin $MISTRAL_URL #. Configure transport properties in the ``[DEFAULT]`` section:: [DEFAULT] transport_url = rabbit://:@:5672/ #. Configure database. **SQLite can't be used in production; use MySQL or PostgreSQL instead.** Here are the steps how to connect *MySQL* DB to Mistral: Make sure you have installed ``mysql-server`` package on your database machine (it can be your Mistral machine as well). Install MySQL driver for python:: $ pip install PyMySQL Create the database and grant privileges:: # mysql CREATE DATABASE mistral; USE mistral GRANT ALL ON mistral.* TO 'root'@ IDENTIFIED BY ; Configure connection in Mistral config:: [database] connection = mysql+pymysql://:@:3306/mistral **NOTE**: If PostgreSQL is used, configure connection item as below:: connection = postgresql://:@:5432/mistral #. **If you are not using OpenStack, skip this item.** Update ``mistral/actions/openstack/mapping.json`` file which contains all allowed OpenStack actions, according to the specific client versions of OpenStack projects in your deployment. Please find more detailed information in ``tools/get_action_list.py`` script. #. Configure Task affinity feature if needed. It is needed for distinguishing either single task executor or one task executor from group of task executors:: [executor] host = my_favorite_executor Then, this executor can be referred in Workflow Language by .. code-block:: yaml ...Workflow YAML... my_task: ... target: my_favorite_executor ...Workflow YAML... #. Configure role based access policies for Mistral endpoints (policy.json):: [oslo_policy] policy_file = Default policy.json file is in ``mistral/etc/``. For more details see `policy.json file `_. #. Modify the action execution reporting configuration if needed. It is possible that actions stuck in *"RUNNING"* state, for example if the assigned executor dies or the message that signals the completion of the action is lost. This section describes a heartbeat based solution to close these forgotten action executions. The related configuration options are ``max_missed_heartbeats`` and ``check_interval``. Note that if either of these options are *"0"* then the feature won't be enabled. The default configuration is the following:: [action_heartbeat] max_missed_heartbeats = 15 check_interval = 20 first_heartbeat_timeout = 3600 *"check_interval = 20"*, so check action executions every 20 seconds. When the checker runs it will transit all running action executions to error if the last heartbeat received is older than *"20 \* 15"* seconds. Note that *"first_heartbeat_timeout = 3600"*, so the action execution won't be closed for 3600 seconds if no heartbeat was received for it. - **max_missed_heartbeats** Defines the maximum amount of missed heartbeats to be allowed. If the number of missed heartbeats exceeds this number, then the related action execution will be transited to *"ERROR"* state with cause *"Heartbeat wasn't received."*. - **check_interval** The interval between checks (in seconds). - **first_heartbeat_timeout** The grace period for the first heartbeat (in seconds). #. Configure event publishers. Event publishers are plugins that are optionally installed in the same virtual environment as Mistral. Event notification can be configured for all workflow execution for one or more event publishers. The configuration is under the notify param at the notifier section. The notify param is a list of dictionaries, one for each publisher identifying the type or the registered plugin name and additional key value pairs to be passed as kwargs into the publisher:: [notifier] notify = [ {"type": "webhook", "url": "http://example.com", "headers": {"X-Auth-Token": "XXXX"}}, {"type": "custom_publisher"} ] #. Finally, try to run mistral engine and verify that it is running without any error:: $ mistral-server --config-file --server engine ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/configuration/index.rst0000644000175000017500000000040400000000000023756 0ustar00coreycorey00000000000000====================================== Mistral Configuration and Policy Guide ====================================== .. toctree:: :maxdepth: 2 ../configuration/config-guide.rst ../configuration/policy-guide.rst ../configuration/samples/index.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/configuration/policy-guide.rst0000644000175000017500000000052200000000000025242 0ustar00coreycorey00000000000000============================ Mistral Policy Configuration ============================ Configuration ~~~~~~~~~~~~~ The following is an overview of all available policies in Mistral. For a sample configuration file, refer to :doc:`samples/policy-yaml`. .. show-policy:: :config-file: ../../tools/config/policy-generator.mistral.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0855668 mistral-10.0.0.0b3/doc/source/admin/configuration/samples/0000755000175000017500000000000000000000000023563 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/configuration/samples/index.rst0000644000175000017500000000043200000000000025423 0ustar00coreycorey00000000000000========================== Sample configuration files ========================== Configuration files can alter how Mistral behaves at runtime and by default are located in ``/etc/mistral/``. Links to sample configuration files can be found below: .. toctree:: policy-yaml.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/configuration/samples/policy-yaml.rst0000644000175000017500000000031500000000000026553 0ustar00coreycorey00000000000000=========== policy.yaml =========== Use the ``policy.yaml`` file to define additional access controls that apply to the Mistral services: .. literalinclude:: ../../../_static/mistral.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0855668 mistral-10.0.0.0b3/doc/source/admin/img/0000755000175000017500000000000000000000000020024 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/img/mistral_architecture.png0000644000175000017500000006502000000000000024752 0ustar00coreycorey00000000000000PNG  IHDR*cWiIDATx \es L#"04:D!2aC4 LD@ArKh`'aH՟E@ p ~z{uUuծog%]k_޷@0 y,ԛ͹ؚ'~N{ny**bƱ33W5fϝ|ꁕ0AfV֘}V_{kE5,y@Yn$dy|^nxޠ?2j&FTʶuK%3,3wY6||y8>\LK9XTcbzl71!xKuC3pm&yO,x{r{M:.aKq)fi<͉}|tǷڞ U=1 ƿg_m禈@*c HZ fFhWǭl$'~{ޓjA,7OF׭"i? X񟒋Ga/&<# [$\If#} n5eO+H]̯@Ohs?fom4; qv ]|nM(4\Ch8Lj I|*W]Du |& " \3AA@ir ""@q ""@q ""@q ""X:ADD:DDA@DADA@AD@ir\"" "H#@LAD@ir "" \3AA@ir ""H#@LADA\"g""P\"g""P\"" TU f#H3_xaDϤq> ]"d, L K\9epyGghϤ;2R yF3;5c|.s1([P)rV9AЖ޸ w!|Z24C@j\l7 4؁ @<@DX۝qNh` kӺ\Mَ;PKl` `nп2^G([PC;PhBa;Ph[{qG([40˪P-@q}Q5? ve Aj`lQhnۭq=znspL- #P_gu<6 ve A:q5iS([@C'FHAED4)[-@j\lgp\׍v2\cN q|\-" A\JU>clQAD`[ܒ^ 5]%{Q?$P([ "@]e  "P([4h P4h PuFwUBg ǞG@c([PʝV\1J^08ul@ݙQ7W/,pSul@hE W@c([-d -@Fepc|~.S@@٢ldo,H ul@t$:P([G orсXe DheQoob.@c([-`u0|5d -h2M -h"70?@c([-d -@Yw ~@c([-`H ulQZOo@heqf @c([-IH ulQό\loo 5ow Fulpbd  %\yk)[-u 5m/b-4;s1LtP+DY$}*b^.B"K1j/7} TWaL.ca&qSwN;h8hwӟOѦI q{DǼ"PWDА$(c@&7| A7| }h+5$/[>u Aи%SW$/[>u Aи%SW,"Hp_7| AL- BPg)qKpçY$}h+"89߾m N65~̽'aצ^ϗs< @O]!DK{_ճ9,?nɞ{=>K4'.D hcr.f8;o).Ӹ%׃_:q1t=ܶ/4JW7R7h&a3m˗\Ś\ņ\<6jԨgr;;gsn\߃נk<"7JY{O.SWl+x`@v O\xsbƔ|Z,UQ[ٹXXM,Sc ߕab  u'|!8óq;8Tw⏹wbuƛ7c)&4n neeB 5ˀu?D%%&)U5+R&ƢӝJ$ɕ$k s"'?<E /ΚpU;k9\WEFsOMh$8 }qfQѹ$Ҹ%FA 0q;ivM4ƊR]Km(YC&A )B#j;(;';m_ڷ#BjE_|+9©C-$oz^?h-$ހ2H]!F:&)eBb42%(HW٪v;u ݂vYB۞⧩ @ЉωƳ)#|5~h)΋&>1c6蟥t~FZ ƦTx_3l$j]dbxoz9oF]ZNrH-`{O6}͓v$N"_i|ɟ9'!V͟1+e njH? "```UJ]e5ˡX)5t׈ "%ΞA^o,z|=BܔtK^]IM د:`5 FBZW,N?%F,2>:-_>H~}m^g&)4N=EPkIkEp#"0s*o;E";DI=` qYBGbg5ՅK]ߴxf.5uvݩሠPFUlztI RLUƨlhj]QY)$V重LطB3TnƼ7ɠNglTS+Iu@u!,C:־/r]ol_Q  ׮! ;[UZXc6rui m.}Dl@Z71Pekv(K6 V$u_O?nt2i˞-گ/i|ɥ|:zfp7uUg mZΫеԝ;MwN))A:ݽZ),N k4v#^uDPY;wRh4JZd>ov])_2~?'j[p_gVkz`Mݒ M~K3dĤ1;ye㶋vҔ67 [/B`&]}]AlI٬Y C"3D5d,|l1I&Ӟ'-HY%"41Djl%ꢆ6 ֥ѭql*~ce}&)u=M[Ç_mQWR?k?̤ d-@ DX울>!ѣ(Z^6jJDJWo!̃`]djuE?( ]GNLR L`1Jg g]-WiJ$""WД{qx1^v "+kSzxiQe$Z\k-U+ƕeRI*n `#ɉJEPmwBeS5]Sa+Bϧ;m-t k""芻Bɟ9y!'|Ԗ̌쉺`Z?vlQB.ȍV,hIKY7϶ˊ#V4:Ltc4fkviGhܤϚYU7~ڧ7`dW""5^,{?:BT3|TD"TjH +C[S5l^"hg)UMF/I{ b>oC[^eURr/өWYw3jb"+X*kA_:W].vqnHc'Iw'nCi33;KNRlU&ib#PO F `yLA+{x7^w'CT%YKH #|ߐTT SgT̮Vq&P[U+T$Q"~V1uiWpeHgtݱen'#Ix}N7"X췚Ǿ> D>d/@bj[OlrTcDcƧLAiH['N瘴"XU lk亢̉2Ƴ2si o2j/~2iuYE\}:Hޔu3;n 7"XZ~r9"t2,Oo"2pb)7KrB3ptcjDmX&q +j0}kOSm#AUC[I"B7|F+nʪ# dCeD?gEUwUveK]UֻܗbftnxZW0iyH;?]K:R뎥3C=6# m{bI-_NsK4Dyh]`iߡ|(QN}sP9Q=vq,"̡azyä嫽0tFo"c)lCI2I?^]}f%Ad」N 2~C*+hΘǽԪߕiڙ_][熟)tF`J؉Խ6IIV[Aea..S+}sج%8K{>x mc':rh lh|vZ`E0I1$|ۻѣoYe"TVTfTvTZ23[ qQYiTfFW+" &c>"m,w˵]%iEMfoKcj؀ #(|[|A(jep5oY1)'|d`1cwɨweUf|4#Ѩ̌KAG8Iƿqv}O+shEΟ9׊`?:b}Ǘ:u{0Rǎ"t>IcrADv`{λ,|qqv7ݍ`ӿ~î5:_wej$I\xl*t֗ cZi35(]hYm>kޤ'ҩC]8٬m e׿TN{>v"v> Tp_E+SW? F]N/^=)ׇ3@ {?J4*+auu[l$p.u@GZ$^[vT61m)!7 ¹|rxDRfdOY elwR.M5K.@~`E+{ٗxnxmyCvg왿t >/|g}*<Љoz >O໒- X䯭R+AGB+"X#ə&nI*_zBz %EP]AӖ3rhM'|V5M;]]%UADcV|C+ޛ|hL؁7y펱w|2ybuw9oi8C3'ceg7J4*+u %\SSWDˤlv ,'X c$6B e*9_: TA1~_xBׅt ɳ%+o5`#iv2N>?w' NxL|ܮiyg 篆f[N$3]2pGrޠ|o4jeqKn4[u@Soc 2f׬-ԽtX=ɢ}D`G-v> Y[YG{OcЎE9~"p $4I]K8׏z߾qGf?EYE %Jv )v:7-%ߊ$Wkȉox ~ݓ}w9m{/~}S\eEWK]I@#RWlI`)Ie,I[xV*~TJ,"# * }/֘CIÎp8c>19~?o#8=hI"~N]m_WB̤bJlG3u8)'Ö{ʜp;dO*9ߕT(uEP={].RWlcIe%KZX^+&H/e}q&pjMPB;?mE싍}L;u?-|/ U_`Sߋ(jH۩f(~C vA:7Թ5R=}&ZA]9wmh*A!H`WH u@˜MTҧBV6QdJF:7`vŭʽXP%+ɣKť{탟uA7˿ _Yӣż}Lעbr+P8~x AL4n,n{/Z.S(4fhv\;+A↹96RWD,"|)롨{g"4$(č77RWD. .f^FAF@ +u2Ip_@-j1vkBҤL1M_Puْ5nZ5?%JdXiZ=%l&Ҹ$TwOsZԝ{!dYKQ׿ۮ{~n֪d]v{]:{Rr+ib ^֐<ٙ_}TϻbNԎC62v>ۗ(J3ez&DP"&aLB;QN%wv [Htr>vff@w%[BL]!I" 6xE*g -F΁V%P0eŬLVQu"(rkVt:2ۢt _r/+W6ѡ_nx_$:e kW+ib ALAda$2eAsb5Ex1 [dX(JBPg)Dsf$k>{ŝHULb8ߧ ڬ 9$.KYe&1IMHpnϻ>וuQT]cu4n 1uR&/ g$cvaYs]%ۣDF:*wu¤>mN(@KA{z#|$Vm]%I.%nYR64[BL]!I9 cW"?}, 28{g~c퐉\4NɒIe-ҽm;HuTVؚ8\uۆĮ~QSG7uOu9e'# AWWVw*ІqKP+u2I4r/j< vp/,]Q&Pq%~.;&Y,e?;FПgc.|:t63qzmZF64[BL]!I&޳R Nl'q4ɐCxREv3"e-,gA7=7nOJg!q|B7RRIDK҆qKP+u2I 6rIIlF1XL ftR&p)W%V-z/he"v͈Fɤ#h64[BL]!Ida$Dt{o윿7غ%6dj#t[i[=MS3(i'q ^o+xiuoUFн:kq 2"Hd/ADWA!f_ՊRD2ɔ\l728$Ad^%YCA,3};"H ݸ?."__p|"H ݸwUl!""!  BI "X:ZP> ""mh$ ̼jλ1$ayD-DhiulFAD$DЁ" ]H` u]g_{Oʵ|"|Qȟn~λb]$v;_^Q\uۆ踊.)zLt>>Aw4C3+I8IӘ4n}7I {x'IݧqGcH`VdOWVnwo_}g;vܽ{@SDQݷ/~(V %.{c9}ԹX^z%<Ո ;@FjVt`:}r]ChX6V[mLuUPL/cF;tc AI*{(43iҹikG@Ie@AnCcJ`ˉZᓰ9N\xU퓤ۨ&E'n=?:kTY@=oz*>ڮzޡ%'Aw4"6uєI쌞2UT]CեSĎ_V4kVP=&t{PKRDhX6VƢ$0ӡzuԒIH.i KrzL躍琥n "mh\ ̴J$uL˶K0:}h[e4Y;ek0:{M,&j ߍUDhX6 ;|WP@D@hX@D@At$$AD$$ADA@A@V@D@\̍EWDcGBKK "XBhw@ +'fW G"'W]='XBJ "h u]CAbd'ҬDn߭H "8p"xWE"x @ɠ-vsTբod0 "w>(K߶“_=7E>G/F1B.x&Bqk_=?n$n/~?wekAdl \g$p  @rbHlfV$iEP")t%WS1i]CO\xU$hIeǽ)<\Q$+" $:5%Ff70">;vK.bsߙLYAɛC8yW2yNܶWٓ'r {{۽/zLY7e!s/I F׼{OK^g|CDPox\m`$13(|sJM NJXE0kؔ"d«dvypfM#ɦm\=ዠ>zǼGKTzu[uǴ׊ (Y{8='#8\ob5N?@Ӌ`%iEkGf"(q*gRE+b1 lD+k?2I]G|"< 2XrH(4Z33z(+pEpy}gi?J2Ydl= lJlٮn5ԉj;׍S]L%DM; JAd0F6{Rj]/H uzv)EDplIVyR6 YA{S{|I.vKvO7>m)YA(Mܢ8?I Khw4t*6k/>^|i"hgMAuT׹,hCA`3J`)"86#+Nn7)Tq)"8Xwڶ ?x Ep~ge\Y gZt + P̺6޲R$P!' s$=B"ݤ0"̃C]HAsQ6..I," Z暆&{/\<˝3evwYA+s@by"> fIFbIgVsm@6-1Һ4xZ][ARiDJ.&ju {د˺I_'rIKn _fDrs2:FNDA+}M*hz?Ɍ`އYƅeo1tr0ۍOrD˼G9f[֛VVAe\!%Mb˥*_&3%WkLe9",xZ$g~c-cƻeM"jVv\A:H^AKApV l7l\JՄ{ D-yv;= -*$M)N4dj{fTnnP¥[#ߧ6WCߤ{ڏۇO=nI;Um2͒ D#Ƕ+~&b"O⢌2akD0%˼76楈格>wg_j$jDp'̶XZD7R!` 6 @+`V$B4<2A3.$u%5n<[Ӽ~iD͟p:\lm۞ߣAᮡ:'Њ"؊ @#f຋`g䨑S3Ѽ_ "83Eι=8Seg4u< [8V$`{?Y8+- N1'q݉ǚmǯhlU D Dd>.(ʂ Г( =A~w Fv"&2dhVԏnaŻ]Cz^c>vx.>#6z+B" @Ƞ}+#(Is,.Xq_}ju08J^9׼v'"L8^og5x9nn![Y%i_k~$8scזkoDv{0tz,`K "H M M7cc)'ۏdz48&lVmW$H "H M!}-..m #H "H M%@a b"5.N @{?v*A$0"'J^; D-H`DMOEBpADM$0#"me @ {O$;^~5« O<~[?s _^߄>-|rێn O;G;u# }qg CH`DPBk#hs}[ e$\¥QDPR掫lM}t*j۟?vg~csp>׮؇2WGf@E ̠#hE,ewDm{7 \Ǿ_{S[vKDV~7oW3\]EǵfYz "Y)%rjEо^Y9$[YE2z VZ&[hLf_|13u̎(GW[-p=D[\LsI"x; [V A7 MF bPwFADZyAT`_\z"H "XY4 fjfQ&k|iY>?IKf ucE@Ts-:E[ܸ84#,u>f֯;.l`:"5#KysWG"h *C疉gվ5~O!$4? [zBy5_h:krl#HVcf:8ɖ"- iYU"tf^z4Ob*G(qvI%t߽*hSYI;P "HD,A[G uͬD-T#tifO2w7"@YGu y']GaQyDAc" ԒXA'pʐI-n@Ͷ4N)\7Kd^sʶY9e=fԕsm5.ܾ|l5DjU@2DD*gO7V#nQM?;q jܟ\FBBw- DAE|$Ů&Q'xE7tٮj*uE5DA☈ "XLU}-%*Z*.nBNT j`3,^8泗ƌ Q DAƣKub}_+J2|Eq{X>uUFQJ q=;z^"HD\R 1xqcG2$N"HD'ϯDjBeFe'Q_" qLDhgQRp;" qLDh.s7ߚ0yz,EvP8  DA&cl.b'BHe>rwxN,.; 1ADi\# ėȓϱbܿqY@$"=CGe)q.fucc d"1AD ی\lLuR!S?saw~:bi DAdj?&lcKc\6QL9?I!GM Gw'OП0䘈 "1r ݁OPFLJmT1l{J5~X9"1AD`3-M6n?FB_aI }W}3H\l}N D`8h2XOke-QHZ$;rD,_N'kьʾJ5L>&of.QTA"PKq:;Nxt{.<Ii%-'{c ?Îp&aB$/쀌@mEpNܸGslYN︈ @c1h:"kIhvH8)~iO=QVQ¨q);gϑJt MsP~ nj;MrSv@XX—5A#hxǒ(YrqO.ʪ*):жHN"]{j8CSWLu.tlŨѣ+R7I4k2|3sh(\^䘙:(G/XgROT^ju?:=>\uO2ZA]9*cl,Scy1'/hZcJ||&gfeBпx 죉ΕfL~8=`2Oiӧ4IL{s Z E` s1lnD^_,Wn1wu\?M7ؓ72PKՊW#p~AVR27(cLu$0-]k$p]qA2q]?{KF6LԒyF>W(e< Jt.c{mq0uE #E N3j %_3|nˍ..u @ar]hɠ E %3 Xn`VK8LŎ+BiI` A֋U 4+j,h=16gHdV7 jwb譡9aȴgsצq *m`6vEYX2j[ 0<EcV]; h. ("a\?g9:"1XZj7E>A+D ;, 'aZr%~|X@ƶg\2K/{&UY v|/"*.3HPat4 N NN@g 7I0aRge|"V'I$3lм`pUwcp (Fh~oe`0cF1@ 3) mϣ Ẅ:4 q'h7P@\6>!OOe0 D c8 ٮީ.y_:3 "Í𜺊j5:vW08.mEI2;||7 d'cs|qw#Ye2"⚸ǜ :enQUu:f!ucW$(Yd #nñ$-nrNau 3Ek2KnbZB2XvA Rଗy۫.=l#PE`p36 v@I[T :dpf0؝2DEô`pż5ez8D QE啥z.M^}Ή -j`K$"ZylRE'1LLS? 溂 @Fբx;(y$03-.Tw"X[ Am ZL7 rrlQE&鋏sM|\(F~jE"]j|wr08CiK8Y`zBvQNfk)kubmп,ozQ\ ]G'蟅RAPb0nu09RpfПYlZevP -Ǘr?3;c97R߈ yA' %FGF+F@EvSwN;h8h@O57hhP㔆) S)P0%h S)P0%h S)P0%hRߨo70%hR S)0%hR@|9߾8u=Oo$OÔV8kOˮks/Ɉ~/A5L]X j(ֻa:ǧaJ}Flru?@A7D z6E/5D\0kyxH]ZA0Q2"HfA Wp[?n>_x-j̪qiQF½FG$ndQAm&lodRꅗ^Wr{~(QS]S&%YDek> DPL5:j(5Z\=eƗOuUSj"B"x#*ՠaJ}˒>8ySTg@_,?u-դ4_ݶF@0AezL %w' z/Z Zia Auɶ)uڊ*^Lw$0i\! DP̞dLR5ԊNjQA UC*&uImYDiRߪ=k~xQYŊR.Icj /o5EPԍqxhVCUa Ab'Gb6^k c'})$~Wo@i?+j*×$ 5:@~F2DPgI[)- "Lc2zT]/ nې~eo-$ZJ?̠~d|T8=.t赍6= S[DvYh:zN?dwQ1v{m$aWm Ÿ5T RDe]ýa ` CEOn17Ȭ4a= otMz SDЏ1>}^u'->[Gֿ*Ozzשl͐mぁ'c=?2==n۴.=z# ^ 删Ү鹧K^WF-5<%I {R_'! ዤ$1 jx:ro70E}^-y2rk˨WE-nl:}2玤Mm3L{߲CN}DA?7@ W6m`|Fݗ2  5ʤY."Lb^zeHCS}ϓj0;2FrY$p[フ!a˩ʣPQ C ^g3jAe}UWپ4kN7h9,FLI,~&f%w5μD%= C C%R,emqmNgNseRP7MϘJf蕴Lʎݧ/f =[o$zc㚩4G;7_+R ۢ/I֪܉ozS]/aXIL Bݹ(TcP"%!sǬJ.4n6Н/4LB>˪6m+IwUebˮ~X~>ڟ\+TT# }$W_7@s>f3~T Ȥ5l4sٰ :MJg ERC0)j%KpW$M ]Cgjaڼ",rH[CP?x<3CSok,gZ{&LETAePaZe)g5liJ=Z}NҎoqM=΂M0ۻ{I o-&{`WBuJf̴ߓ鞚U"*+#ߟ{^mUk-}Dϝ_^`է~ /Xfash0d]B#uάlTD!G@0θ _c3?Wfbwf*l\4_KcssH#$Ig^IF73uL+tn{wzguৱpkwif5bF0kto>.R!'n)Rކ%w.}f2lYo͢ea^q}>(\1dpBpc(bQ!v&нMƘ-S_y!(^M.rf(lML{b5]@se]Y <3t}m˹uΛ'3fҨf?W!HL_kL~kᏏf)AE tg k:ٝFg%Q|>~ߛ3aIߟMv v7]V.r#%]me03幾8 |~ [g%SfR۱H>?qO( ).~ 4eaL~k'0I;9Y{ |ٲ_!|J!8ue=.bg:?f{|'GQ{MBp?:|f.`ItX3nYGԯϭ%~G.I^ZwJ2;9\&1gݶy-Dyt_?IdBp%{̰,Rbz-ۭY`\KGhfg!5K?ꂻgӉwYׅ*"{1GO=K9w7Hkq49qM~ڎK~93|Svb)7 r]u.>w͟9qm7@bj!1ox$5)oԐ" HL )oԐ" HL )oԐ"ěx$T @bjHLś!$T |_rj<˷Sf7AtJNx3C]oP{MTYIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/index.rst0000644000175000017500000000046400000000000021115 0ustar00coreycorey00000000000000=========================== Administrator Documentation =========================== This chapter contains all needed information about how to install and configure a Mistral cluster. .. toctree:: :maxdepth: 1 architecture quickstart install/index configuration/index upgrade_guide ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0895667 mistral-10.0.0.0b3/doc/source/admin/install/0000755000175000017500000000000000000000000020716 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/dashboard_guide.rst0000644000175000017500000000411000000000000024550 0ustar00coreycorey00000000000000==================================== Mistral Dashboard Installation Guide ==================================== Mistral dashboard is the plugin for Horizon where it is easily possible to control mistral objects by interacting with web user interface. Setup Instructions ------------------ This instruction assumes that Horizon is already installed and it's installation folder is . Detailed information on how to install Horizon can be found at `Horizon Installation `_ The installation folder of Mistral Dashboard will be referred to as . The following should get you started: 1. Clone the repository into your local OpenStack directory:: $ git clone https://github.com/openstack/mistral-dashboard.git 2. Install mistral-dashboard:: $ sudo pip install -e Or if you're planning to run Horizon server in a virtual environment (see below):: $ tox -evenv -- pip install -e ../mistral-dashboard/ and then:: $ cp -b /mistraldashboard/enabled/_50_mistral.py \ /openstack_dashboard/local/enabled/_50_mistral.py 3. Since Mistral only supports Identity v3, you must ensure that the dashboard points the proper OPENSTACK_KEYSTONE_URL in /openstack_dashboard/local/local_settings.py file:: OPENSTACK_API_VERSIONS = { "identity": 3, } OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST 4. Also, make sure you have changed OPENSTACK_HOST to point to your Keystone server and check all endpoints are accessible. You may want to change OPENSTACK_ENDPOINT_TYPE to "publicURL" if some of them are not. 5. When you're ready, you would need to either restart your apache:: $ sudo service apache2 restart or run the development server (in case you have decided to use local horizon):: $ cd ../horizon/ $ tox -evenv -- python manage.py runserver Debug instructions ------------------ Please refer to :doc:`Mistral Troubleshooting <../../developer/contributor/troubleshooting>` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/get_started.rst0000644000175000017500000000150700000000000023760 0ustar00coreycorey00000000000000========================= Workflow Service Overview ========================= The Workflow service consists of the following components: ``Mistral API`` service Provides a REST API for operating and monitoring workflow executions. ``mistral-dashboard`` service Mistral Dashboard is a Horizon plugin. ``Mistral Engine`` service Controls workflow executions and handles their data flow, places finished tasks in a queue, transfers data from task to task, and deals with condition transitions, and so on. ``Mistral Executor`` service Executes task actions, picks up the tasks from the queue, runs actions, and sends results back to the engine. ``Mistral Notifier`` service ``python-mistralclient`` Python client API and Command Line Interface. ``mistral-lib`` A library to support writing custom Mistral actions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/index.rst0000644000175000017500000000076700000000000022571 0ustar00coreycorey00000000000000========================== Mistral Installation Guide ========================== .. toctree:: :maxdepth: 1 get_started install verify next-steps dashboard_guide installation_guide mistralclient_guide The Workflow service (mistral) enables setting up task relations that have to be executed in a particular order, called workflows. This chapter assumes a working setup of OpenStack following the `OpenStack Installation Tutorial `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/install-obs.rst0000644000175000017500000000067000000000000023702 0ustar00coreycorey00000000000000.. _install-obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For information on how to install and configure the Workflow service for openSUSE and SUSE Linux Enterprise, refer to the :doc:`Installation guide for Ubuntu `. Note that some commands vary by distribution and might differ from the ones described, for instance, package management. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/install-rdo.rst0000644000175000017500000000067700000000000023712 0ustar00coreycorey00000000000000.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For information on how to install and configure the Workflow service for Red Hat Enterprise Linux 7 and CentOS 7, refer to the :doc:`Installation guide for Ubuntu `. Note that some commands vary by distribution and might differ from the ones described, for instance, package management. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/install-ubuntu.rst0000644000175000017500000001324100000000000024437 0ustar00coreycorey00000000000000.. _install-ubuntu: ================================ Install and Configure for Ubuntu ================================ This section describes how to install and configure the Workflow Service service for Ubuntu. Prerequisites ------------- #. Install the packages: .. code-block:: console # apt-get update # apt-get install python-setuptools python-pip libffi-dev libxslt1-dev \ libxml2-dev libyaml-dev libssl-dev python3-dev tox mistral-common Installation ------------ **NOTE**: For instructions on how to install Mistral using devstack, refer to :doc:`Mistral Devstack Installation ` Clone the repo and go to the repo directory: .. code-block:: console $ git clone https://opendev.org/openstack/mistral $ cd mistral Generate the configuration file: .. code-block:: console $ tox -egenconfig Create the mistral directory and copy the example configuration file: .. code-block:: console $ mkdir /etc/mistral $ cp etc/mistral.conf.sample /etc/mistral/mistral.conf Edit the configuration file: .. code-block:: console $ vi /etc/mistral/mistral.conf **Virtualenv installation**: .. code-block:: console $ tox This installs the necessary virtual environments and runs all the project tests. Installing the virtual environments may take significant time (~10-15 mins). **Local installation**: .. code-block:: console $ pip install -e . or: .. code-block:: console $ pip install -r requirements.txt $ python setup.py install **NOTE**: There are some differences between *pip install -e* and *setup.py install*. **pip install -e** works similarly to **setup.py install** or the EasyInstall tool, however, it does not actually install anything. Instead, it creates a special .egg-link file in the deployment directory that links to your project’s source code. Configuring Mistral ------------------- Refer :doc:`../configuration/index` to find general information on how to configure Mistral server. Before The First Run -------------------- After the installation, you will see the **mistral-server** and **mistral-db-manage** commands in your environment, either in system or virtual environment. **NOTE**: If you use **virtualenv**, all Mistral-related commands can be accessed with **tox -evenv --**. For example, *mistral-server* is available via *tox -evenv -- mistral-server*. The **mistral-db-manage** command can be used for migrations. Updating the database to the latest revision type: .. code-block:: console $ mistral-db-manage --config-file upgrade head Before starting the Mistral server, run the *mistral-db-manage populate* command. It creates the DB with all the standard actions and standard workflows that Mistral provides to all Mistral users.: .. code-block:: console $ mistral-db-manage --config-file populate For more detailed information on the *mistral-db-manage* script, see the :doc:`Mistral Upgrade Guide `. **NOTE**: For users who want a dry run with an **SQLite** database backend (not used in production), the *mistral-db-manage* script is not recommended for database initialization because of `SQLite limitations `_. Use the sync_db script described below for database initialization instead. **If you use virtualenv**: .. code-block:: console $ tools/sync_db.sh --config-file **Or run sync_db directly**: .. code-block:: console $ python tools/sync_db.py --config-file Running Mistral API server -------------------------- To run the Mistral API server, execute the following command in a shell: .. code-block:: console $ mistral-server --server api --config-file Running Mistral Engines ----------------------- To run the Mistral Engine, execute the following command in a shell: .. code-block:: console $ mistral-server --server engine --config-file Running Mistral Executors ------------------------- To run the Mistral Executor instance, execute the following command in a shell: .. code-block:: console $ mistral-server --server executor --config-file Note that at least one Engine instance and one Executor instance should be running so that workflow tasks are processed by Mistral. Mistral Notifier ---------------- To run the Mistral Notifier, execute the following command in a shell: .. code-block:: console $ mistral-server --server notifier --config-file Running Multiple Mistral Servers Under the Same Process ------------------------------------------------------- To run more than one server (API, Engine, or Task Executor) on the same process, execute the following command in a shell: .. code-block:: console $ mistral-server --server api,engine --config-file The --server command line option can be a comma delimited list. The valid options are "all" (by default if not specified) or any combination of "api", "engine", and "executor". It is important to note that the "fake" transport for the rpc_backend defined in the config file should only be used if "all" the Mistral servers are launched on the same process. Otherwise, messages do not get delivered if the Mistral servers are launched on different processes because the "fake" transport is using an in-process queue. Mistral Client Installation --------------------------- Refer :doc:`/admin/install/mistralclient_guide` to find out how to install Mistral Client. Finalize installation --------------------- Restart the Workflow services: .. code-block:: console # service openstack-mistral-api restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/install.rst0000644000175000017500000000112700000000000023117 0ustar00coreycorey00000000000000.. _install: ===================== Install and configure ===================== This section describes how to install and configure the Workflow Service, code-named mistral, on the controller node. **NOTE:** **Mistral can be used in standalone mode or it can work with OpenStack.** If Mistral is used with OpenStack, you must already have a working OpenStack environment with at least the following components installed: - Keystone with API v3 support Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 2 install-ubuntu install-rdo install-obs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/installation_guide.rst0000644000175000017500000003543100000000000025334 0ustar00coreycorey00000000000000========================== Mistral Installation Guide ========================== Prerequisites ------------- It is necessary to install some specific system libs for installing Mistral. They can be installed on most popular operating system using their package manager (for Ubuntu - *apt*, for Fedora - *dnf*, CentOS - *yum*, for Mac OS - *brew* or *macports*). The list of needed packages is shown below: 1. **python-dev** 2. **python-setuptools** 3. **python-pip** 4. **libffi-dev** 5. **libxslt1-dev (or libxslt-dev)** 6. **libxml2-dev** 7. **libyaml-dev** 8. **libssl-dev** In case of Ubuntu, just run:: $ apt-get install python-dev python-setuptools python-pip libffi-dev \ libxslt1-dev libxml2-dev libyaml-dev libssl-dev **NOTE:** **Mistral can be used without authentication at all or it can work with OpenStack.** In case of OpenStack, it works **only on Keystone v3**, make sure **Keystone v3** is installed. Installation ------------ **NOTE**: If it is needed to install Mistral using devstack, please refer to :doc:`Mistral Devstack Installation ` First of all, clone the repo and go to the repo directory:: $ git clone https://github.com/openstack/mistral.git $ cd mistral Install tox:: $ pip install tox Generate config:: $ tox -egenconfig Configure Mistral as needed. The configuration file is located in ``etc/mistral.conf.sample``. You will need to modify the configuration options and then copy it into ``/etc/mistral/mistral.conf``. For details see :doc:`Mistral Configuration Guide ` **Virtualenv installation**:: $ tox This will install necessary virtual environments and run all the project tests. Installing virtual environments may take significant time (~10-15 mins). **Local installation**:: $ pip install -e . or:: $ pip install -r requirements.txt $ python setup.py install **NOTE**: Differences *pip install -e* and *setup.py install*. **pip install -e** works very similarly to **setup.py install** or the EasyInstall tool, except that it doesn't actually install anything. Instead, it creates a special .egg-link file in the deployment directory, that links to your project's source code. Before the first run -------------------- After installation you will see **mistral-server** and **mistral-db-manage** commands in your environment, either in system or virtual environment. **NOTE**: In case of using **virtualenv**, all Mistral related commands available via **tox -evenv --**. For example, *mistral-server* is available via *tox -evenv -- mistral-server*. **mistral-db-manage** command can be used for migrations. For updating the database to the latest revision type:: $ mistral-db-manage --config-file upgrade head Before starting Mistral server, run *mistral-db-manage populate* command. It prepares the database with standard actions and workflows which Mistral will provide for all users.:: $ mistral-db-manage --config-file populate For more detailed information about *mistral-db-manage* script please see :doc:`Mistral Upgrade Guide `. **NOTE**: For users who want a dry run with **SQLite** database backend(not used in production), *mistral-db-manage* is not recommended for database initialization because of `SQLite limitations `_. Please use sync_db script described below instead for database initialization. **If you use virtualenv**:: $ tools/sync_db.sh --config-file **Or run sync_db directly**:: $ python tools/sync_db.py --config-file Running Mistral API server -------------------------- To run Mistral API server perform the following command in a shell:: $ mistral-server --server api --config-file Running Mistral Engines ----------------------- To run Mistral Engine perform the following command in a shell:: $ mistral-server --server engine --config-file Running Mistral Task Executors ------------------------------ To run Mistral Task Executor instance perform the following command in a shell:: $ mistral-server --server executor --config-file Running Mistral Notifier ------------------------ To run Mistral Notifier perform the following command in a shell:: $ mistral-server --server notifier -- config-file Note that at least one Engine instance and one Executor instance should be running so that workflow tasks are processed by Mistral. Running Multiple Mistral Servers Under the Same Process ------------------------------------------------------- To run more than one server (API, Engine, or Task Executor) on the same process, perform the following command in a shell:: $ mistral-server --server api,engine --config-file The --server command line option can be a comma delimited list. The valid options are "all" (by default if not specified) or any combination of "api", "engine", "notifier" and "executor". It's important to note that the "fake" transport for the rpc_backend defined in the config file should only be used if "all" the Mistral servers are launched on the same process. Otherwise, messages do not get delivered if the Mistral servers are launched on different processes because the "fake" transport is using an in process queue. Running Mistral By Systemd -------------------------- #. Create an upstart config, it could be named as ``/etc/systemd/system/mistral-api.service``: .. code-block:: bash [Unit] Description = Openstack Workflow Service API [Service] ExecStart = /usr/bin/mistral-server --server api --config-file /etc/mistral/mistral.conf User = mistral [Install] WantedBy = multi-user.target #. Enable and start mistral-api: .. code-block:: console # systemctl enable mistral-api # systemctl start mistral-api #. Verify that mistral-api services are running: .. code-block:: console # systemctl status mistral-api #. Create an upstart config, it could be named as ``/etc/systemd/system/mistral-engine.service``: .. code-block:: bash [Unit] Description = Openstack Workflow Service Engine [Service] ExecStart = /usr/bin/mistral-server --server engine --config-file /etc/mistral/mistral.conf User = mistral [Install] WantedBy = multi-user.target #. Enable and start mistral-engine: .. code-block:: console # systemctl enable mistral-engine # systemctl start mistral-engine #. Verify that mistral-engine services are running: .. code-block:: console # systemctl status mistral-engine #. Create an upstart config, it could be named as ``/etc/systemd/system/mistral-notifier.service``: .. code-block:: bash [Unit] Description = Openstack Workflow Service Notifier [Service] ExecStart = /usr/bin/mistral-server --server notifier --config-file /etc/mistral/mistral.conf User = mistral [Install] WantedBy = multi-user.target #. Enable and start mistral-notifier: .. code-block:: console # systemctl enable mistral-notifier # systemctl start mistral-notifier #. Verify that mistral-notifier services are running: .. code-block:: console # systemctl status mistral-notifier #. Create an upstart config, it could be named as ``/etc/systemd/system/mistral-executor.service``: .. code-block:: bash [Unit] Description = Openstack Workflow Service Executor [Service] ExecStart = /usr/bin/mistral-server --server executor --config-file /etc/mistral/mistral.conf User = mistral [Install] WantedBy = multi-user.target #. Enable and start mistral-executor: .. code-block:: console # systemctl enable mistral-executor # systemctl start mistral-executor #. Verify that mistral-executor services are running: .. code-block:: console # systemctl status mistral-executor Mistral And Docker ------------------ Docker containers provide an easy way to quickly deploy independent or networked Mistral instances in seconds. This guide describes the process to launch an all-in-one Mistral container. Docker Installation ------------------- The following links contain instructions to install latest Docker software: * `Docker Engine `_ * `Docker Compose `_ Build the Mistral Image Manually -------------------------------- Execute the following command from the repository top-level directory:: docker build -t mistral -f tools/docker/Dockerfile . The Mistral Docker image has one build parameter: +-------------------------+-------------+--------------------------------------+ |Name |Default value| Description | +=========================+=============+======================================+ |`BUILD_TEST_DEPENDENCIES`|false |If the `BUILD_TEST_DEPENDENCIES` | | | |equals `true`, the Mistral test | | | |dependencies will be installed inside | | | |the Docker image | +-------------------------+-------------+----------------------+---------------+ Running Mistral using Docker Compose ------------------------------------ To launch Mistral in the single node configuration:: docker-compose -f tools/docker/docker-compose/infrastructure.yaml \ -f tools/docker/docker-compose/mistral-single-node.yaml \ -p mistral up -d To launch Mistral in the multi node configuration:: docker-compose -f tools/docker/docker-compose/infrastructure.yaml \ -f tools/docker/docker-compose/mistral-multi-node.yaml \ -p mistral up -d The infrastructure docker-compose file contains examples of RabbitMQ, PostgreSQL and MySQL configurations. Feel free to modify the docker-compose files as needed. The docker-compose Mistral configurations also include the CloudFlow container. It is available at `link `_ The `--build` option can be used when it is necessary to rebuild the image, for example:: docker-compose -f tools/docker/docker-compose/infrastructure.yaml \ -f tools/docker/docker-compose/mistral-single-node.yaml \ -p mistral up -d --build Running the Mistral client from the Docker Compose container ------------------------------------------------------------ To run the mistral client against the server in the container using the client present in the container:: docker run -it mistral_mistral mistral workflow-list Configuring Mistral ------------------- The Docker image contains the minimal set of Mistral configuration parameters by default: +--------------------+------------------+--------------------------------------+ |Name |Default value | Description | +====================+==================+======================================+ |`MESSAGE_BROKER_URL`|rabbit://guest:gu\|The message broker URL | | |est@rabbitmq:5672 | | +--------------------+------------------+----------------------+---------------+ |`DATABASE_URL` |sqlite:///mistral\|The database URL | | |.db | | +--------------------+------------------+----------------------+---------------+ |`UPGRADE_DB` |false |If the `UPGRADE_DB` equals `true`, | | | |a database upgrade will be launched | | | |before Mistral main process | +--------------------+------------------+----------------------+---------------+ |`MISTRAL_SERVER` |all |Specifies which mistral server to | | | |start by the launch script. | +--------------------+------------------+----------------------+---------------+ |`LOG_DEBUG` |false |If set to true, the logging level will| | | |be set to DEBUG instead of the default| | | |INFO level. | +--------------------+------------------+----------------------+---------------+ |`RUN_TESTS` |false |If the `UPGRADE_DB` equals `true`, | | | |the Mistral unit tests will be | | | |launched inside container | +--------------------+------------------+----------------------+---------------+ The `/etc/mistral/mistral.conf` configuration file can be mounted to the Mistral Docker container by uncommenting and editing the `volumes` sections in the Mistral docker-compose files. Launch tests inside Container ----------------------------- Build mistral:: docker build -t mistral -f tools/docker/Dockerfile \ --build-arg BUILD_TEST_DEPENDENCIES=true . Run tests using SQLite:: docker run -it -e RUN_TESTS=true mistral or PostgreSQL:: docker run -it \ -e DATABASE_URL=postgresql://postgres:postgres@localhost:5432/postgres \ -e RUN_TESTS=true mistral Keycloak integration -------------------- If you set AUTH_ENABLE to True value in the mistral.env file then Mistral will enable Keycloak integration by default. Keycloak will be deployed with mistral/mistral credentials. You should uncomment the volume line in the `infrastructure.yaml` for the CloudFlow. Next step you login in the administrative console using the http://localhost:8080/auth/admin URL. Create a oauth client, you can specify only a name, for example mistral. Specify valid redirect URL: http://localhost:8000/* and turn on the "Implicit Flow Enabled" in the your client page. Save your changes. Add the following line to your /etc/hosts file:: 127.0.0.1 keycloak Export the following environments variable for mistral cli:: export MISTRAL_AUTH_TYPE=keycloak-oidc export OS_AUTH_URL=http://keycloak:8080/auth export OS_TENANT_NAME=master export OS_USERNAME=mistral export OS_PASSWORD=mistral export OS_MISTRAL_URL=http://localhost:8989/v2 export OPENID_CLIENT_ID=mistral export OPENID_CLIENT_SECRET= export MISTRALCLIENT_INSECURE=True Check your configuration:: mistral workflow-list Or open a cloud flow page in a browser:: http://localhost:8000 Using Mistral Client with Docker -------------------------------- The Mistral API will be accessible from the host machine on the default port 8989. Install `python-mistralclient` on the host machine to execute mistral commands. Mistral Client Installation --------------------------- Please refer to :doc:`Mistral Client / CLI Guide ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/mistralclient_guide.rst0000644000175000017500000001340200000000000025477 0ustar00coreycorey00000000000000=========================== Mistral Client Installation =========================== To install ``python-mistralclient``, it is required to have ``pip`` (in most cases). Make sure that ``pip`` is installed. Then type: .. code-block:: console $ pip install python-mistralclient Or, if it is needed to install ``python-mistralclient`` from master branch, type: .. code-block:: console $ pip install git+https://github.com/openstack/python-mistralclient.git After ``python-mistralclient`` is installed you will see command ``mistral`` in your environment. Configure Authentication Against Keystone ----------------------------------------- If Keystone is used for authentication in Mistral, then the environment should have auth variables: .. code-block:: console $ export OS_AUTH_URL=http://:5000/v2.0 $ export OS_TENANT_NAME=tenant $ export OS_USERNAME=admin $ export OS_PASSWORD=secret $ export OS_MISTRAL_URL=http://:8989/v2 ( optional, by default URL=http://localhost:8989/v2) and in the case when you are authenticating against keystone over https: .. code-block:: console $ export OS_CACERT= .. note:: In client, we can use both Keystone auth versions - v2.0 and v3. But server supports only v3. You can see the list of available commands by typing: .. code-block:: console $ mistral --help To make sure Mistral client works, type: .. code-block:: console $ mistral workbook-list Configure Authentication Against Keycloak ----------------------------------------- Mistral also supports authentication against Keycloak server via OpenID Connect protocol. In order to use it on the client side the environment should look as follows: .. code-block:: console $ export MISTRAL_AUTH_TYPE=keycloak-oidc $ export OS_AUTH_URL=https://:/auth $ export OS_TENANT_NAME=my_keycloak_realm $ export OS_USERNAME=admin $ export OS_PASSWORD=secret $ export OPENID_CLIENT_ID=my_keycloak_client $ export OPENID_CLIENT_SECRET=my_keycloak_client_secret $ export OS_MISTRAL_URL=http://:8989/v2 (optional, by default URL=http://localhost:8989/v2) .. note:: Variables OS_TENANT_NAME, OS_USERNAME, OS_PASSWORD are used for both Keystone and Keycloak authentication. OS_TENANT_NAME in case of Keycloak needs to correspond a Keycloak realm. Unlike Keystone, Keycloak requires to register a client that access some resources (Mistral server in our case) protected by Keycloak in advance. For this reason, OPENID_CLIENT_ID and OPENID_CLIENT_SECRET variables should be assigned with correct values as registered in Keycloak. Similar to Keystone OS_CACERT variable can also be added to provide a certification for SSL/TLS verification: .. code-block:: console $ export OS_CACERT= In order to disable SSL/TLS certificate verification MISTRALCLIENT_INSECURE variable needs to be set to True: .. code-block:: console $ export MISTRALCLIENT_INSECURE=True Targeting Non-preconfigured Clouds ---------------------------------- Mistral is capable of executing workflows on external OpenStack clouds, different from the one defined in the `mistral.conf` file in the `keystone_authtoken` section. (More detail in the :doc:`/admin/configuration/index`). For example, if the mistral server is configured to authenticate with the `http://keystone1.example.com` cloud and the user wants to execute the workflow on the `http://keystone2.example.com` cloud. The mistral.conf will look like: .. code-block:: console [keystone_authtoken] www_authenticate_uri = http://keystone1.example.com:5000/v3 ... The client side parameters will be: .. code-block:: console $ export OS_AUTH_URL=http://keystone1.example.com:5000/v3 $ export OS_USERNAME=mistral_user ... $ export OS_TARGET_AUTH_URL=http://keystone2.example.com:5000/v3 $ export OS_TARGET_USERNAME=cloud_user ... .. note:: Every `OS_*` parameter has an `OS_TARGET_*` correspondent. For more detail, check out `mistral --help` The `OS_*` parameters are used to authenticate and authorize the user with Mistral, that is, to check if the user is allowed to utilize the Mistral service. Whereas the `OS_TARGET_*` parameters are used to define the user that executes the workflow on the external cloud, keystone2.example.com. Use cases ^^^^^^^^^ **Authenticate in Mistral and execute OpenStack actions with different users** As a user of Mistral, I want to execute a workflow with a different user on the cloud. **Execute workflows on any OpenStack cloud** As a user of Mistral, I want to execute a workflow on a cloud of my choice. Special cases ^^^^^^^^^^^^^ **Using Mistral with zero OpenStack configuration**: With the targeting feature, it is possible to execute a workflow on any arbitrary cloud without additional configuration on the Mistral server side. If authentication is turned off in the Mistral server (Pecan's `auth_enable = False` option in `mistral.conf`), there is no need to set the `keystone_authtoken` section. It is possible to have Mistral use an external OpenStack cloud even when it isn't deployed in an OpenStack environment (i.e. no Keystone integration). With this setup, the following call will return the heat stack list: .. code-block:: console $ mistral \ --os-target-auth-url=http://keystone2.example.com:5000/v3 \ --os-target-username=testuser \ --os-target-tenant=testtenant \ --os-target-password="MistralRuleZ" \ run-action heat.stacks_list This setup is particularly useful when Mistral is used in standalone mode, when the Mistral service is not part of the OpenStack cloud and runs separately. Note that only the OS-TARGET-* parameters enable this operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/next-steps.rst0000644000175000017500000000034100000000000023560 0ustar00coreycorey00000000000000.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the Mistral service. To add additional services, see `OpenStack Pike Installation Tutorials and Guides `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/install/verify.rst0000644000175000017500000000165400000000000022762 0ustar00coreycorey00000000000000.. _verify: Basic verification ~~~~~~~~~~~~~~~~~~ .. code-block:: console $ mistral run-action std.noop Verify operation of the Workflow service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: Perform these commands on the controller node. #. Create a workflow file: .. code-block:: console $ cat >/tmp/test.wf.yaml <` Mistral Client Command Guide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use mistralclient, please refer to :doc:`Mistral Client / CLI Guide ` Export Keystone credentials ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use the OpenStack command line tools you should specify environment variables with the configuration details for your OpenStack installation. The following example assumes that the Identity service is at ``127.0.0.1:5000``, with a user ``admin`` in the ``admin`` tenant whose password is ``password``: .. code-block:: bash $ export OS_AUTH_URL=http://127.0.0.1:5000/v2.0/ $ export OS_TENANT_NAME=admin $ export OS_USERNAME=admin $ export OS_PASSWORD=password Write a workflow ---------------- For example, we have the following workflow. :: --- version: "2.0" my_workflow: type: direct input: - names tasks: task1: with-items: name in <% $.names %> action: std.echo output=<% $.name %> on-success: task2 task2: action: std.echo output="Done" This simple workflow iterates through a list of names in ``task1`` (using `with-items`), stores them as a task result (using the `std.echo` action) and then stores the word "Done" as a result of the second task (`task2`). To learn more about the Mistral Workflows and what you can do, read the :doc:`Mistral Workflow Language specification ` Upload the workflow ------------------- Use the *Mistral CLI* to create the workflow:: $ mistral workflow-create The output should look similar to this:: +------------------------------------+-------------+-----------+--------+-------+---------------------+------------+ |ID | Name | Namespace | Tags | Input | Created at | Updated at | +------------------------------------+-------------+-----------+--------+-------+---------------------+------------+ |9b719d62-2ced-47d3-b500-73261bb0b2ad| my_workflow | | | names | 2015-08-13 08:44:49 | None | +------------------------------------+-------------+-----------+--------+-------+---------------------+------------+ Run the workflow and check the result ------------------------------------- Use the *Mistral CLI* to start the new workflow, passing in a list of names as JSON:: $ mistral execution-create my_workflow '{"names": ["John", "Mistral", "Ivan", "Crystal"]}' Make sure the output is like the following:: +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | ID | 49213eb5-196c-421f-b436-775849b55040 | | Workflow ID | 9b719d62-2ced-47d3-b500-73261bb0b2ad | | Workflow name | my_workflow | | Workflow namespace | | | Description | | | Task Execution ID | | | Root Execution ID | | | State | RUNNING | | State info | None | | Created at | 2017-03-06 11:24:10 | | Updated at | 2017-03-06 11:24:10 | +--------------------+--------------------------------------+ After a moment, check the status of the workflow execution (replace the example execution id with the ID output above):: $ mistral execution-get 49213eb5-196c-421f-b436-775849b55040 +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | ID | 49213eb5-196c-421f-b436-775849b55040 | | Workflow ID | 9b719d62-2ced-47d3-b500-73261bb0b2ad | | Workflow name | my_workflow | | Workflow namespace | | | Description | | | Task Execution ID | | | Root Execution ID | | | State | SUCCESS | | State info | None | | Created at | 2017-03-06 11:24:10 | | Updated at | 2017-03-06 11:24:20 | +--------------------+--------------------------------------+ The status of each **task** also can be checked:: $ mistral task-list 49213eb5-196c-421f-b436-775849b55040 +--------------------------------------+-------+---------------+--------------------+--------------------------------------+---------+------------+---------------------+---------------------+ | ID | Name | Workflow name | Workflow namespace | Execution ID | State | State info | Created at | Updated at | +--------------------------------------+-------+---------------+--------------------+--------------------------------------+---------+------------+---------------------+---------------------+ | f639e7a9-9609-468e-aa08-7650e1472efe | task1 | my_workflow | | 49213eb5-196c-421f-b436-775849b55040 | SUCCESS | None | 2017-03-06 11:24:11 | 2017-03-06 11:24:17 | | d565c5a0-f46f-4ebe-8655-9eb6796307a3 | task2 | my_workflow | | 49213eb5-196c-421f-b436-775849b55040 | SUCCESS | None | 2017-03-06 11:24:17 | 2017-03-06 11:24:18 | +--------------------------------------+-------+---------------+--------------------+--------------------------------------+---------+------------+---------------------+---------------------+ Check the result of task *'task1'*:: $ mistral task-get-result f639e7a9-9609-468e-aa08-7650e1472efe [ "John", "Mistral", "Ivan", "Crystal" ] If needed, we can go deeper and look at a list of the results of the **action_executions** of a single task:: $ mistral action-execution-list f639e7a9-9609-468e-aa08-7650e1472efe +--------------------------------------+----------+---------------+--------------------+-----------+--------------------------------------+---------+----------+---------------------+---------------------+ | ID | Name | Workflow name | Workflow namespace | Task name | Task ID | State | Accepted | Created at | Updated at | +--------------------------------------+----------+---------------+--------------------+-----------+--------------------------------------+---------+----------+---------------------+---------------------+ | 4e0a60be-04df-42d7-aa59-5107e599d079 | std.echo | my_workflow | | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:16 | | 5bd95da4-9b29-4a79-bcb1-298abd659bd6 | std.echo | my_workflow | | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:16 | | 6ae6c19e-b51b-4910-9e0e-96c788093715 | std.echo | my_workflow | | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:16 | | bed5a6a2-c1d8-460f-a2a5-b36f72f85e19 | std.echo | my_workflow | | task1 | f639e7a9-9609-468e-aa08-7650e1472efe | SUCCESS | True | 2017-03-06 11:24:12 | 2017-03-06 11:24:17 | +--------------------------------------+----------+---------------+--------------------+-----------+--------------------------------------+---------+----------+---------------------+---------------------+ Check the result of the first **action_execution**:: $ mistral action-execution-get-output 4e0a60be-04df-42d7-aa59-5107e599d079 { "result": "John" } **Congratulations! Now you are ready to use OpenStack Workflow Service!** ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/admin/upgrade_guide.rst0000644000175000017500000000432000000000000022605 0ustar00coreycorey00000000000000Mistral Upgrade Guide ===================== Database upgrade ---------------- The migrations in ``alembic_migrations/versions`` contain the changes needed to migrate between Mistral database revisions. A migration occurs by executing a script that details the changes needed to upgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially. The scripts are executed by Mistral's migration wrapper which uses the Alembic library to manage the migration. Mistral supports migration from Kilo or later. You can upgrade to the latest database version via: :: $ mistral-db-manage --config-file /path/to/mistral.conf upgrade head You can populate the database with standard actions and workflows: :: $ mistral-db-manage --config-file /path/to/mistral.conf populate To check the current database version: :: $ mistral-db-manage --config-file /path/to/mistral.conf current To create a script to run the migration offline: :: $ mistral-db-manage --config-file /path/to/mistral.conf upgrade head --sql To run the offline migration between specific migration versions: :: $ mistral-db-manage --config-file /path/to/mistral.conf upgrade : --sql Upgrade the database incrementally: :: $ mistral-db-manage --config-file /path/to/mistral.conf upgrade --delta <# of revs> Or, upgrade the database to one newer revision: :: $ mistral-db-manage --config-file /path/to/mistral.conf upgrade +1 Create new revision: :: $ mistral-db-manage --config-file /path/to/mistral.conf revision -m "description of revision" --autogenerate Create a blank file: :: $ mistral-db-manage --config-file /path/to/mistral.conf revision -m "description of revision" This command does not perform any migrations, it only sets the revision. Revision may be any existing revision. Use this command carefully. :: $ mistral-db-manage --config-file /path/to/mistral.conf stamp To verify that the timeline does branch, you can run this command: :: $ mistral-db-manage --config-file /path/to/mistral.conf branches If the migration path has branch, you can find the branch point via: :: $ mistral-db-manage --config-file /path/to/mistral.conf history ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/conf.py0000644000175000017500000000735000000000000017464 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Mistral documentation build configuration file # # Refer to the Sphinx documentation for advice on configuring this file: # # http://www.sphinx-doc.org/en/stable/config.html import os import sys on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinxcontrib.pecanwsme.rest', 'sphinxcontrib.httpdomain', 'wsmeext.sphinxext', 'openstackdocstheme', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', ] wsme_protocols = ['restjson'] suppress_warnings = ['app.add_directive'] # The suffix of source file names. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Mistral' copyright = u'2020, Mistral Contributors' policy_generator_config_file = \ '../../tools/config/policy-generator.mistral.conf' sample_policy_basename = '_static/mistral' # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_static_path = ['_static'] html_theme = 'openstackdocs' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['mistral.'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'Mistral' # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'mistral', u'Mistral', [u'OpenStack Foundation'], 1) ] # If true, show URL addresses after external links. man_show_urls = True # -- Options for openstackdocstheme ------------------------------------------- repository_name = 'openstack/mistral' bug_project = 'mistral' bug_tag = 'doc' latex_use_xindy = False html_theme_options = { "display_global_toc_section": True, "sidebar_mode": "toctree", } # -- Options for LaTeX output ------------------------------------------------ latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0895667 mistral-10.0.0.0b3/doc/source/developer/0000755000175000017500000000000000000000000020145 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0895667 mistral-10.0.0.0b3/doc/source/developer/contributor/0000755000175000017500000000000000000000000022517 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/coding_guidelines.rst0000644000175000017500000005037600000000000026737 0ustar00coreycorey00000000000000Mistral Coding Guidelines ========================= Why learn more coding guidelines? --------------------------------- This document contains the description of the guidelines used for writing code on the Mistral project. Some of the guidelines follow from the nature of Python programming language (dynamic types, etc), some are the result of the consensus achieved by the project contributors during many contribution cycles. All contributors need to follow these guidelines when contributing to Mistral. The purpose of having such well described practices is improving communication between team members, reducing a number of controversial situations related to how a certain code snippet should be written, and letting contributors focus on unique engineering tasks rather than low level decisions that any engineer makes many times a day, like choosing a good name for a class or variable, or how to organise a loop, whether they need to put a blank line before "if" or "return", in what cases and so on. The document, when accepted and followed by team members, aims to improve overall development speed and quality. Note that the guidelines described below almost don't conflict with the official PEP8 style guide (https://www.python.org/dev/peps/pep-0008/) describing how any Python program should be formatted. Mistral guidelines add more high level semantics on top of it. PEP8 still should be considered a necessary base for those who write Python programs. Strictly speaking, this guide is not exactly about style, it's about writing maintainable code. Some of the concepts being discussed below may seem a bit too philosophical but, in fact, they reflect our real experience of solving practical tasks. According to it, some decisions work well and some don't given the Python programming nature and the nature of the project. The guidelines are based on the three main values: - **Communication.** When writing code we always try to create it in a way that it's easy to read and understand. This is important because most of the time developers spend on reading existing code, not writing new. - **Simplicity.** It makes sense to write code that uses minimal means that solves a task at hand. - **Flexibility.** In most cases it's better to keep options open because in programming there's no such thing as "done". Pretty much all code gets modified during lifecycle of a program working in production. This is a very important difference form designing real buildings where we can't rebuild a basement after a building is completed and inhabited by people. In the document, there is a number of code snippets, some are considered incorrect and some are correct, with explanations why from the perspective of these key values. Further sections are devoted to certain aspects of writing readable code. They will typically have a list of guidelines upfront so it's easy to find them in the text and then more details on them, if needed. Naming ------ Naming is always important in programing. Good naming should always serve for better communication, i.e. a name of a code entity (module, variable, class etc.) should convey a clear message to a code reader about the meaning of the entity. For dynamic languages naming is even more important than for languages with static types. Below it will be shown why. Using Abbreviations ^^^^^^^^^^^^^^^^^^^ Guidelines '''''''''' - *Use well-known abbreviations to name variables, method arguments, constants* Well-known abbreviations (shortcuts) used for names of constants, local variables and method arguments help simplify code, since it becomes less verbose, and simultaneously improve communication because all team members understand them the same way. Below is a list of abbreviations used on the Mistral project. The list is not final and is supposed to grow over time. - **app** - application - **arg** - argument - **cfg** - configuration - **cls** - class - **cmd** - command - **cnt** - count - **ctx** - context - **db** - database - **desc** - description - **dest** - destination - **def** - definition - **defs** - definitions - **env** - environment - **ex** - execution - **execs** - executions - **exc** - exception - **log** - logger - **ns** - namespace - **info** - information - **obj** - object - **prog** - program - **spec** - specification - **sync** - synchronous - **wf** - workflow - **wfs** - workflows Note that when using a well-known abbreviation to name a constant we need to use capital letters only (just as required by PEP8). Local Variables and Method Arguments ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Guidelines '''''''''' - *The name of a method argument should clearly communicate its semantics and purpose* - *The name of a method argument or a local variable should include a type name if it creates an ambiguity otherwise* - *The name of a local variable can be shortened from a full word (up to one letter) if the scope of the variable is very narrow (several lines) and if it doesn't create ambiguities* Principles of choosing names in dynamic languages like Python are especially important because oftentimes we don't clearly see of what type a certain variable is. Whereas a type itself (no matter if it's primitive or represented by a class) carries very important information about all objects of this type. For example, if we see a variable in Java or C++ code it's not a problem to find out of what type this variable is, we can easily find where the variable is defined. Any modern IDE also allows to navigate to a type declaration (e.g. if it's a class) just by clicking on it and see all required info about it. In Python, it's often much more problematic to find a variable type. Injecting a type name, at least a shortcut, into a variable name often helps mitigate this fundamental issue and improve code readability, and hence communication. Below are the code snippets that help illustrate this problem. Let's assume we have the following Python classes: .. code-block:: python class TaskExecution(Execution): # Carries information about a running task. ... class TaskSpec(BaseSpec): # Defines a work logic of a particular task. ... For what we want to illustrate, it's not important if they belong to the same module or different. Problematic Code '''''''''''''''' .. code-block:: python def calculate_next_tasks(self, task, context): result = set() for clause in task.get_on_clauses(): task_names = self.evaluate_on_clause(clause, context) tasks = [self.get_task_by_name(t_n) for t_n in task_names] result.update(tasks) return result Is this method easy to understand? Well, if this code is part of a small program (e.g. a 200-300 lines script) then it may be ok. But when it's a system with dozens of thousands lines of code then it has a number of issues. The most important issue is that we don't know the type of the "task" method argument. In order to find it we'll have to see where this method is called and what is passed as an argument. Then, if an object is not created there we'll have to go further and find callers of that method too, and so on until we find where the object is actually instantiated. The longer the method call chains are, the worse. So, jus by looking at this code we can't determine whether the argument "task" is of type TaskSpec or TaskExecution. So for example, we can't even say for sure if ``task.get_on_clauses()`` is a correct instruction. If we have hundreds of places like this, it is very challenging to read code and it is very easy to make a mistake when modifying it. The other obvious issues are also related to naming: #. It's not clear objects of what type are returned by the method #. It's not clear objects of what type is returned by ``self.get_task_by_name(t_n)`` Better Code ''''''''''' .. code-block:: python def calculate_next_task_specs(self, task_spec, context): result = set() for clause in task_spec.get_on_clauses(): task_names = self.evaluate_on_clause(clause, context) task_specs = [self.get_task_spec_by_name(t_n) for t_n in task_names] result.update(task_specs) return result Now we won't be confused. Of course, we still have to remember we have those two classes but at least we have a clear pointer to a type. Functions and Methods ^^^^^^^^^^^^^^^^^^^^^ Guidelines '''''''''' - *The name of a method should clearly communicate its semantics and purpose* - *The name of a method should include a type name of a returned value when it creates an ambiguity otherwise* For example, there are two classes like these again: .. code-block:: python class TaskExecution(Execution): # Carries information about a running task. ... class TaskSpec(BaseSpec): # Defines a work logic of a particular task. ... And we need a method that implements some logic and returns an instance of **TaskSpec**. How would we choose a good name for the method? Problematic Code '''''''''''''''' .. code-block:: python def calculate_parent_task(self, task_spec): ... Looking at this method signature it's not clear what to expect as a returned value since in Python method declarations don't contain a returned type value. Although there's a temptation to use just "task" in the method name it leads to the naming ambiguity: a returned value may be either of **TaskExecution** or **TaskSpec** type. Strictly speaking, it may be any type since it's Python but we can help a reader of this code a little bit and leave a hint about what's going to be returned from the method when it's called. At first glance, it may seem a bit weird why we pay attention to such things. One may say that it's already totally clear that need to use full "task_spec" in the method name instead of "task" just according to the name of the class. Why anybody may want to use "task"? So, the reality is that during informal communication within a team people tend to simplify/shorten words. That is, when several team members are working on something related to task specifications within a certain scope (module, class, etc.) they often move to using a simplified terminology and instead of saying "task specification" they start saying just "task". And this kind of habit often also sneaks into code. And eventually it breaks communication between code author and code reader. Better Code ''''''''''' .. code-block:: python def calculate_parent_task_spec(self, task_spec): ... Although it's more verbose, it allows to mitigate the naming ambiguity for a code reader. Constants ^^^^^^^^^ Guidelines '''''''''' - *All hardcoded values (strings, numbers, etc.) must be defined as constants, i.e. global module variables* - *Constants must be defined in the beginning of a module* Problematic Code '''''''''''''''' .. code-block:: python def print_task_report_line(self, line, level=0): self.app.stdout.write( "task: %s%s\n" % (' ' * (level * 4), line) ) ... def print_action_report_line(self, line, level=0): self.app.stdout.write( "action: %s%s\n" % (' ' * (level * 4), line) ) The problem of this code is that it uses hard-coded integer values at different places. Why is it a problem? - It's hard to find hard-coded values while reading code - It's hard to understand whether same values mean semantically the same thing, or different - It's easy to make a mistake when changing a hard-coded value because we can change it at one place and miss other places Better Code ''''''''''' .. code-block:: python REPORT_ENTRY_INDENT = 4 DEFAULT_ENTRY_LEVEL = 0 def print_task_report_line(self, line, level=DEFAULT_ENTRY_LEVEL): self.app.stdout.write( "task: %s%s\n" % (' ' * (level * REPORT_ENTRY_INDENT), line) ) ... def print_action_report_line(self, line, level=DEFAULT_ENTRY_LEVEL): self.app.stdout.write( "action: %s%s\n" % (' ' * (level * REPORT_ENTRY_INDENT), line) ) Now the code clearly communicates to a reader that value 4 in these two methods means exactly the same entity: an indent of any report entry. The other constant similarly adds clarity about value 0. Previously, these two integers were nameless. It's now also easy to change values, we just need to set different values to the named constants. Grouping and Blank Lines ------------------------ Guidelines ^^^^^^^^^^ - *Use blank lines to split individual steps (units) of algorithms.* - *Always put blank lines before and after "if", "for", "try" and "with" blocks if they don't contain one another w/o anything else in between.* - *Always put a blank line before "return" unless it's the only instruction in en enclosing code block like a method or an "if" block.* - *Use blank lines to split logically not symmetric lines, e.g. less abstract and more abstract lines like variable assignment and a method call.* - *Put a blank line after any call to a superclass.* Although for someone it may not seem important, blank lines consciously put in code can improve readability. The general recommendation is to use blank lines to separate different logical blocks. When writing code it is useful to ask ourselves the question "what are the main steps of the algorithm I'm implementing?". When answered, it gives understanding of how code can be decomposed into sections. And in order to reflect that they are individual steps of the algorithm, the corresponding code blocks can be split by blank lines. Let's consider examples. Problematic Code ^^^^^^^^^^^^^^^^ .. code-block:: python def update_task_state(self, state, state_info=None): old_task_state = self.task_ex.state if states.is_completed(self.task_ex.state): self.notify(old_task_state, self.task_ex.state) return if not states.is_valid_transition(self.task_ex.state, state): return child_states = [a_ex.state for a_ex in self.task_ex.executions] if state == states.RUNNING and states.PAUSED in child_states: return self.set_state(state, state_info) if states.is_completed(self.task_ex.state): self.register_workflow_completion_check() self.notify(old_task_state, self.task_ex.state) Is this method easy to read? The method does a lot of things: it invokes other methods, checks conditions, calculates values and sets them to variables. Even more importantly, all of that are different computational steps of the method. However, they appear in the code one by one without any gaps. So when a human eye is reading this code there isn't any element in the code that would tell us where the next step of the algorithm starts. And a blank line can naturally play a role of such element. Better Code ^^^^^^^^^^^ .. code-block:: python def update_task_state(self, state, state_info=None): old_task_state = self.task_ex.state if states.is_completed(self.task_ex.state): self.notify(old_task_state, self.task_ex.state) return if not states.is_valid_transition(self.task_ex.state, state): return child_states = [a_ex.state for a_ex in self.task_ex.executions] if state == states.RUNNING and states.PAUSED in child_states: return self.set_state(state, state_info) if states.is_completed(self.task_ex.state): self.register_workflow_completion_check() self.notify(old_task_state, self.task_ex.state) Now when we read the method, we clearly see the individual steps: - Saving an old task state for further usage - Check if task is already completed and if it is, notify clients about it and return - Check if the state transition we're going to make is valid - Calculate state of the child entities - Do not proceed with updating the task state if there any running or paused child entities - Actually update the task state - If the task is now completed, schedule the corresponding workflow completion check - Notify clients about a task transition Of course, when writing code like this it may be hard to format code this way in the first place. But once we already have some version of code, we should take care of people who will surely be reading it in future. After all, the author may be reading it after some time. Again: programs are read much more often than written. So we need to make sure our code tells a good story about what it serves to. As far as putting a blank line before "if", "try", "for" and "with", the reasoning is pretty straightforward: all these code flow controls already reflect separate computational steps because they do something that's different from the previous command by nature. For example, "if" may route a program in a different direction at runtime. So all these blocks should be clearly visible to a reader. "return" is also an outstanding command since it stops the execution of the current method and gives control back to the caller of the method. So it also deserves to be well visible. Using blank lines consciously can also make code more symmetric. That is, if we don't mix up significantly different commands. Problematic Code ^^^^^^^^^^^^^^^^ .. code-block:: python var1 = "some value" my_obj.method1() my_obj.method2() var2 = "another value" ... # The rest of the code snippet What's wrong with this code? The thing is that we mixed up lines where we do absolutely different things. Two of them just do set string values to the two new variables whereas the other two send messages to a different object, i.e. give it command to do something. In other words, the two lines here are more abstract than the two others since they don't run any concrete calculation, it is hidden by method calls against other object. So this code is not symmetric, it doesn't group commands of similar nature together and it doesn't separate them from each other. Better Code ^^^^^^^^^^^ .. code-block:: python var1 = "some value" var2 = "another value" my_obj.method1() my_obj.method2() ... # The rest of the code snippet This code fixes the mentioned issues and note that, again, a blank line clearly communicates that a more abstract block starts and that this block can and should be maintained separately. Multi-line Method Calls ----------------------- Guidelines ^^^^^^^^^^ - *Long method calls that don't fit into 80 characters must be written down in a way that each argument is located on an individual line, as well as the closing round bracket.* All code lines need to be not longer than 80 characters. Once in a while it's required to break lines when we deal with long instructions. For example, when we need to write a method call with lots of arguments or the names of the arguments are long enough so the entire code instruction doesn't fit into 80 characters. Problematic Code ^^^^^^^^^^^^^^^^ .. code-block:: python executor.run_action(self.action_ex.id, self.action_def.action_class, self.action_def.attributes or {}, self.action_ex.input, self.action_ex.runtime_context.get('safe_rerun', False), execution_context, target=target, timeout=timeout) On many Python projects this way or breaking lines for long method calls is considered right. However, when we need to read and understand this code quickly we may experience the following issues: - Hard to see where one argument ends and where another one starts - Hard to check if the order of arguments is correct - If such method call declaration is followed by another code line (e.g. another method call) then it's hard to see where the method call declaration ends Better Code ^^^^^^^^^^^ .. code-block:: python executor.run_action( self.action_ex.id, self.action_def.action_class, self.action_def.attributes or {}, self.action_ex.input, self.action_ex.runtime_context.get('safe_rerun', False), execution_context, target=target, timeout=timeout ) Although the second version of the method call sacrifices conciseness to some extent, it eliminates the issues mentioned above. Every method argument is easily visible, it's easy to check the number of the arguments and their order (e.g. to compare with the method signature) and it's easy to see where the entire command ends because the ending round bracket on a separate line communicates it clearly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/debugging_and_testing.rst0000644000175000017500000000764400000000000027576 0ustar00coreycorey00000000000000===================== Debugging and Testing ===================== To debug using a local engine and executor without dependencies such as RabbitMQ, make sure your ``/etc/mistral/mistral.conf`` has the following settings:: [DEFAULT] rpc_backend = fake [pecan] auth_enable = False and run the following command in *pdb*, *PyDev* or *PyCharm*:: mistral/cmd/launch.py --server all --config-file /etc/mistral/mistral.conf --use-debugger .. note:: In PyCharm, you also need to enable the Gevent compatibility flag in Settings -> Build, Execution, Deployment -> Python Debugger -> Gevent compatible. Without this setting, PyCharm will not show variable values and become unstable during debugging. Running unit tests in PyCharm ============================= In order to be able to conveniently run unit tests, you need to: 1. Set unit tests as the default runner: Settings -> Tools -> Python Integrated Tools -> Default test runner: Unittests 2. Enable test detection for all classes: Run/Debug Configurations -> Defaults -> Python tests -> Unittests -> uncheck Inspect only subclasses of unittest.TestCase Running examples ================ To run the examples find them in mistral-extra repository (https://github.com/openstack/mistral-extra) and follow the instructions on each example. Automated Tests =============== On Mistral project we have two separate test suites: * Unit tests - executed by Jenkins CI job in OpenStack gerrit (python-style checks and execution of all unit tests) * Integration tests - executed by Devstack Gate job in OpenStack Gerrit (integration tests for Mistral after the OpenStack deployment with devstack) Where we can find automated tests ================================= mistral: * Unit tests can be found at https://github.com/openstack/mistral/tree/master/mistral/tests/unit * Integration tests can be found at https://github.com/openstack/mistral-tempest-plugin/tree/master/mistral_tempest_tests/tests python-mistralclient: * Unit tests can be found at https://github.com/openstack/python-mistralclient/tree/master/mistralclient/tests/unit * Integration tests can be found at https://github.com/openstack/python-mistralclient/tree/master/mistralclient/tests/functional How to execute tests manually ============================= Almost all existing automated tests can be executed manually on the developer's desktop (except those which check OpenStack actions). To do this, you should clone "mistral" repository (or "python-mistralclient") and run the corresponding commands. Cloning a repository: .. code-block:: bash $ git clone https://git.opendev.org/openstack/mistral.git $ cd mistral Unit tests ---------- To run all unit tests: .. code-block:: bash $ tox To run unit tests against a specific python version: .. code-block:: bash $ tox -e py35 To run tests from a specific test class (using a specific python version): .. code-block:: bash tox -e py35 -- 'DataFlowEngineTest' Integration tests ----------------- There are several suites of integration tests the mentioned repositories: mistral-tempest-plugin: * mistral_tempest_tests/tests/api/v2/test_workflows.py - contains the tests checking Mistral API v2 related to workflows * mistral_tempest_tests/tests/api/v2/test_actions.py - contains the tests checking Mistral API v2 related to actions * and so on python-mistralclient: * mistralclient/tests/functional/cli/v2/ - contains test suites which check interaction with Mistral using CLI To run integration tests: * in OpenStack mode (when auth in Mistral is enabled and Mistral integrates with OpenStack components) .. code-block:: bash $ pip install git+http://git.opendev.org/openstack/tempest.git $ nosetests mistral-tempest-plugin/mistral_tempest_tests/tests/api/v2 * in Non-OpenStack mode: * set 'auth_enable=false' in the mistral.conf under [pecan] group * restart Mistral server * execute: ./run_functional_tests ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/devstack.rst0000644000175000017500000000050000000000000025050 0ustar00coreycorey00000000000000Mistral Devstack Installation ============================= 1. Download DevStack:: $ git clone https://github.com/openstack-dev/devstack.git $ cd devstack 2. Add this repo as an external repository, edit ``localrc`` file:: enable_plugin mistral https://github.com/openstack/mistral 3. Run ``stack.sh`` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0895667 mistral-10.0.0.0b3/doc/source/developer/contributor/img/0000755000175000017500000000000000000000000023273 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/img/Pycharm_run_config_menu.png0000644000175000017500000007444600000000000030660 0ustar00coreycorey00000000000000PNG  IHDR鍎 pHYs  8&iTXtXML:com.adobe.xmp Adobe Photoshop CC 2015 (Windows) 2015-09-09T17:13:31+03:00 2015-09-09T17:15:48+03:00 2015-09-09T17:15:48+03:00 image/png 3 xmp.iid:3124f3ac-943f-b045-9026-2b79de52f1fc xmp.did:3124f3ac-943f-b045-9026-2b79de52f1fc xmp.did:3124f3ac-943f-b045-9026-2b79de52f1fc created xmp.iid:3124f3ac-943f-b045-9026-2b79de52f1fc 2015-09-09T17:13:31+03:00 Adobe Photoshop CC 2015 (Windows) 1 720000/10000 720000/10000 2 65535 394 176 F cHRMz%u0`:o_F@zIDATxbO=ŀxo3 guZkjؘW&eaXiXYYK*bŢK 0100©P.ޔش~-ّH | , ?2b^حYFFƖ12*_İoh1100a`ee%I6MŬxmOnyhHIJ H| -¬ IL?Zj0V.=m`~ÅK!|w>kꪋi6N%"VYspE"TG R?8k m$Ė[ ]```дv绋7,,, 6*?ׯXlŠ. my,?(]P`д4ZfQ}MUL ڛCX~3|ÉV& p": ҜH fjxٰ.thc8,3000Xo?Iv_7f,XV1o\lIz*h[f;spSsCݠv#9#eA4/BS֮aIh筍& pb.g``*BX;Z\Q9+I4̸xذP6fda`Hs14(`Puc UꚂMߟ4, w|(Fe7@.Qk.߿JD.Y~0Boǔ =```P'#Duld```cbd```dB4!$[@Y|%[pEbƕnR:(M .MSJH ¢ # bX`ZVbTWe6$'^b@T҂MSd)bQRq5-=Z7P-RtiR߯~x}9| Ssuv?:6,mT:$Ef\}M!0*ŋMJ\F }KcbbOlS?u%٭:?:-Q?C]5PV^W\[]98CjD'1M[arYVK]EZi][IfqyCz~ )hbgg!ԃ7MJ`s2Cϩ'f(!uو Ӎ#!3C%:YiG CH`b8 5R>fKנA936|;,h" @ =x]o0Lw3~NW;dQ U99,#uڿJ ss{~6cr~erCq8`]YG(oJw%3D55_= || ~8NiA.^lx}_WL e (WH{$-V%&.]łg6显&\7$o้GG]V׳tfI^Y{U]Yۈ%yb.3t Qoc0*ѳY6\^[xBؽ7(eBt+BA-:oMBT T̬<*ޟכ4ݠ4NѝOu/G\FžM/oaHhb,C>ܑ /ZB CǯD,\ 7 H$9N,So_R%E{ݒgl6cT >&i܉P8>*k:RHK~vcp Rkez4K.C]{PT<T $U+"$MitI@jx$L2ÒTbcjL&)MI6TҔwY/|e=rϏ{|a܀)ĖR [d?]eF8l_s RL0Pqm*JD"aN! 7O9CUGH^*3}WxJkB<ɒ ->X~}n/:x篗Sƀ+/tj; l=+r)M!ЧyW @$/1\ ] t-*;W̻2X~zBE2:Do뵾Ɍ6PZϥ KV ;({?f_{e0ho$Wxs{KG3;`H_Wc£_hh~~R%HYq~RVy] O cKQ)W)jǿ dB;/wǦ:Z/v,.馁dO11 j3g26oyjpD㭺7$ɴSthPfU%َH7X2srz~@o>YP|z|>sH^~Nm_Oy: /| h4.5> !Wـ105O cOrlQvȵTQ/DfKm-[ |Ϸ6B/Ä!I #@~gl6WWUzyy8|˥YTO !g͟_ʤeLyN ---Jj[f&!> ɔ:$+"/~W×[Qz,?x+%.vhƾxnzjfgV0}=\뭳Ʈ%YWM]@l%~_殱KN12VNCbW<e 2n0(#2.cPڦ' <0 cWgsUy]k슎6n~p}&{z'j_a jŠkxڹîͫa\au>~!QOz@ÏFz,hFoy^coRVQ<, B0\Fֺ(H ư9x"Qі 2Rgw ѳ(r13o:@W=nڝ.OqǾSօTJ!8ѦmmR zHƳVOW[  H֖B</[B}~R[ontiUU*V֫HtNvr.gL@s1Y?"]@xHV v8"ZoF;UVhRPhU1+ij?okЁi$W$`^Mu)gi_)K 5|-bshv`V,5|MYBxglЛ@ z"Ri '7`ykY9;q  6$$-KHZR vI)\M6mSV/k?vݫS=[7c .ö%;'^QWB$1 #*KbFW4X'.m$4' xё[;vF|`R>dLsӝHZ+\b Y> |d[_?3oצ <3{ǜud)^|rne`#yaˤ71EFOtTxs7XlS-kO:e,nf'AJ58ïuHwi'kBDhGjObiR_ >Zd pJLO«0vVgݸyGyB([JBҺh@/Rs@q־$l7M?gONY+aOYu,iWasZVEɁ7_1:g! G$^nqWal(͙QvhLhك.:D 3^@ YO\]BmmmץW~7M>zwuޓ'S||C˴ 4<5FZ>^.<^~x+X:S04g~A #un[H]U'eqH4 ]=iJ[ޓ'Sh0obCR@6 coMU=l -=U }Dy^u^&嬢i_/.̴`9E?"w}1j]EY@JLFg' BwO<z%I/^Trd-ݖ}z"P 9r6@0 yKcܢ*P(\%+++OOOooŋI%r[S$2M̝#M$ d-= 0aϯ^'>pﵬ̼?>jElI1u'"dk I]UUUhhW/ndZV(>U?=Ifb"0rjņtreHNEa"Tk}z>\T*y4bĉUc%EqK %"_u}dĦwB} e y'IXGWSiE%!&O2-愣=jJHJR;Bscc7OQ#opдV'U?dU~B5v`Qk%F-ڼfp_BӚ&. "Ё>%_cgp Dڇ ސ,>S鹋MDsj^÷poӖ0XG $Gs? w_EAx+صvyʇ3&"%o 9R#AGq0pVȞAfgۮ>jz,݌1Q:U+QC?IBJ+*}Lji~ԧ-,xDɆOѸ~U*[[[w6]@ackKѫKK|.ZAY O=TbR҆ Z 1D4at%eg}wRn> |9;}Ѯzƭ_o6kb~>50 {As(鸉t}{qU뵫 =5w7;;ǰ ?_:NlLPMJC^5-mP 6zVy>lNUjܨ减`{4'Yދ@ ;3tgϜQ֨40y^2~`[Y٭J}䚶?F^=#G0mNT%ѣ@×wꉚH1v8jf,bi=)ߚ3ucӉA㝭SsjUi.}̸Ygw>/6b}gѩ/Y7oo"qI~8#]z?s"z"VnB0:X ˀ1k ȡxhm rs86In_SvuKS.lRT^t>k1qa<,JZFLU0%0=aΟfgPT\HwRpD ";W7%CVyޔ˓z~ƌ䃼N327 㤠Ruo`yiL\R$M,ݯ#@ GRw[R^+3설`q #XGg,B--oN}%KZy[x:<]K3[l&lbG̏aP+>IЮNMkrmni u5]f⃨?o浊DќvW7LWNLӹ55=H<&CchxlWU;L̋M&ݏ˒aød-=20]͏12w.:v%6^eq[`EԨ[Mt]Mdi"΋ʋM2w_=ZiBkG}`.rE-mPzúIßbZ7c:Q%swԀּmV4V--mlGx$yRCVd,)Pq*;WR:gSLI;=ǘ'S/#W~jQ޾ѤwT蝭gVGFOՉ1te,9A$uP'[SR\]^D%w( ˘r[lO۫rnpuS/n*JXb x?&c>q zg}/cS?ɫWQ-羈9dpG'\ǂINcƯta54qOk֧ ,OD 9a-I *jHC(OZx!$8%#0 S@fq,yv̐ G "U|;1BV?mt׊{d>B&e){w&Q chƺ%}w(ô%ܗ|pMa}eA$SȑBs,LYfr'iCC#U$6X=fj(b݋ǎ <G "({+#Ʀx1&NᏥzBr`~swhWF7EFH}8Ech++kHO~%t[`nlV:wG'\l 3ۋ4rnN- 26w Z0[t[e>Cl9ko 8\k;rBb^p5l9Mw>.ņ0?Ix:oH[5i+=ɐJT=Zi="O )?,+runP4gOmupo몯.UHHcZC~7H;B+ _2|A$lv|t\NקCC5޿u!_N]۲ao"yN 4l'pd*^jxXT3& J=1H<\͡?z/ޱ@"549GC=5@xABD O-@ <;=eg'GjQM OG9/+y3da ?eoq쒑JuG # =KHn^`g:] s;3k;nq3.>v~s:h,`OߕkýMBhNT2{o귡f `ڏ~{N=L[:ݐ9`7yA[RSWNu0L}5AS$cZߗ߭T/O:y͞"LD9I1Gƅ5'z}~=\1Q~ѸFy,ajꏬ9CFqڎ w9\/br=X"` } `thF}<|I*4pPvf!:eW>BGݿ6$eST}M֫@M'$(*o RfI𠷵uUuنRGn`aQu ezzj{7 o7_T)i(Etl& g-v[J.&tL@!<xt:d uXx\.W*)pI[OE\tNa yׅׄC]\_w2dСH0_NA|3.k /0GC߯c!gWyt+\cdB9|}cxXൌa<9J`{mְ;qI~khQM1Y9D~X^?΂*H[0-\g6uX3.M^WqC7*ޤ˙LJ5L%ObҀdY|$ʓl{Y] 1w!EQZ /T0Qgk9nXn1qs!VZ?Zq.`"`\lyIII~~~eoׅnnn >Lkvo&>bAx;oݜV %[yrwҫWӧO'>>^T_|9{t޷eɫ0vuq$c!gE¥!^C}X K (5+a/}WZE+bѵñ{ˑlp>%5DJ͌WB羱f}zy˰`L)tT(]]wj@=`sh:o7 ޗI~Զd N\&~-\ڧes3?IxYfܷ?R *QCtM-JдG'ˑ%bZ @z`s1LbPd-&mcP#~ $iΞ̭]UGG&ة$e+X#X-V#o{gO )z蔂`5x`mm(!n̍ lJ;:c{RR͘4}TvowK^maZcP)Aq O~h5g3XvqQ[29 t yD ڭ'#>)C2 ³#O޳^vvrTo6t"7CpSG. XwԀ$:3QF{mԞ%Org$7g/3̮θ;[9j F4vʵ&d"1`LsYzQfM6 SooکiKǼ4?Qu ezzjw7 o7_T)y(Etl& ñ<Vl?Dĵ@ D O cɥ~') “@9ZӛJn_擆#ݍ9w)[HI###w!$ju[GZa,UXZCk$SܣwG ꚚjNFn\}_OhZl>Ԍ M(XJzqά>5o3x%OϦ}@q~aV#'oƂA .3Ϟ2 % &?\) MS+uvmT}Tv7Ġ8xM9H8h+%jX'8ytAuWpY?%|%yjGcݿVW- U\TWl!3WMEӐÆ{Z$+N6؜4)cWFP㤠R:*biPݭ֊I5*kF7F8hƏSLtAuMwIL)DFcp"A,ψ4"^%OByp8lvscǎ53G/CDwzbZ43,OJ%HR˫.%%eb4ٓ'sL&S>|xaa 56ԔG<>Fu Ruji T*|}}kkksssXXZi׃o.T nj0if4vxzz~U +!`0t:=`qx 3a `v|1vRלK07ϾDQ EQeGt@JYBHL A^l!/ P[Alˌy!d`з`s K++NEI%bLQ ޾AAQOKGB2y0%J ²(I]QQ04Hvm~ta10dҔA{Sm[=ڿ@@p9*cci}!'OnBhZofƾ$S( IBnznB=yuoJm}G[~엽V1SWʔJP;Sc÷?,kL$rۉx|Qz/];B3nj/ rmB\.Dnݢ=!fU R&<M's>tRm-k\IwtʱS^݄9QJ֤(ڹ7oIR*MI(Y^ܵSGdieo<ŗ) < qFdPe$ dƲecxroe"+n]yJ (fz|SgO oٮY*Jv7s5OWrDDiTI]MF!㦦:CT*Bn2H$hkbF,*J05BU>V~YNYVg>WoXB;1Vؗ(x4(~TPUUJr>\' w1>TzYbyEkc߰/QV+|6|9%ϼHԶSoOKޙ͗e>&&t9{%.M{1 Z+EdTלsP4]gu…tDg &ά@i2zjs$XR+BGBBTt~,.$eYQe}jVh{ _xsF ߁Nev;RV╌ͿxAwwNV  1ȿ}3OíO`5:RY|Օ`wL&'<i#m wm%]& +y{' :"bl 9TJCf =5W !(e]2M)W0#ǟcX/kSftbjg .lJcjF=@CE;$Gs&-bFzb8z@,LcnqM${;Cw{AhBDF{)No&[5di1r^Xv3wr B=3zjs:! feu 8sXuE !T!w~+5Q֝ *WQ"RN5b^^pۄn @V}ggH'KʓS*B g_]@ju eYJ) y)ؚ4_c:cէTV_pjw| RJ0Z{o nI3&=ݶfЌVPO5Px2LL&xJ!xaKr,x{~R,)Nw5Ȧj2i xr U5u[=`Cod+)<K S)OuY2)Sx/m%EֵTWno* Z ˊlp0Jlc.#^ gRu[^_LNzXkmQVqo?`5UPL":6yx !\La imw9zfq6jʪxXel-crϗYk>ܪLewuU-r$6>%!BURSWW'!c3HLB 7C*rE!Bͬi!j]|װpB!2ƤKB!j| ,ӣC@f! `ڭ!KKP!B!P!Fg͘V$al !BWݳ2ô(>WPbiBZbȄϔ*J]P |I?@Iಅ| zӘA)<|q5cZX.f< ci)$Pz+]ңH$cT +;u5^!B X,֭3P*qqO M("MM2'Ҳ)Z6d!P8 e3Mëg:%aL͠@45[f| c;;:8;:ppc0 |>4pznuKD\rA)Y={9,~ y(ONx+VE6At\|Žizxv70B.xf_qt9o>_CKB-RJAuOڌe%F];{~1Cl;uk|~Cժa4OlP$J)g LƪWG*oDK1WAsZ*Td@dr%!UB,-!Ү]{y{3`nFkdwk^XܒKj,7@k3t k q3|H [fdz1'xN7s6^o P*3u8gk2?2xK͇L9ro8~xo[AגuV?Tz)vo{SF$5С˅8 !Ԇ#jf@vϑu4tɨ6V`d3m&L[;RU S(/|׾ø$@RRЋ8XUQ_+קr칁[ua)9Ǧր$^Tk.7w ݒkC|}ZϟkR0lK?ߒmz\xEo-?&?֢7\fK'Yao/.|W 4Ucvq' ?3=)6-۾hg*x0ovsw(3qdـ?F4E! (_C M8P82i~=>!w0Km%/6|i;@׺k}Z;@$5HajruB|&?7eŅ%c\>NL;B3 Rsn/{_+$:JZ;IEׯ]f(",(2U k}~%َe5W#w- ]!~g*Axoz>*sъ>.Dssuqsu:[ssvt`ƇHB0xp`?Z޸$45'o坮q\7\,=w- > &d a+`ג ȿ}8x ]zg$$ ?Wǯ's-%!zA2V0Y I0B kdx^MΓ֘Rӣێ ^!A` n ({U#zuɿ g/ZV}Qk/+ffݼ;g7`sε)tK;~Hj{F+ xn)RBL?BFW: >tsOۘrR/7J^"+V*!ʴÄA`ʡ}3*_`8ttH{2 :5j"Zۙ&EkE=\ks_yD@Nԩ͆GCίO# ֋=z:HU] O9!%;SZ4 ^--]i4w MSJ bH{$ !}zUeR͈isn6&My`!DSEE/DǸzH/X/sHRIϹ1s`Į v_ thLe1,VѪIFYG0)7J*WӫI$16N\2`4 rYu,!4웯*?{쏻Ǫv XeBؐ}}5@KR~9G,kTVkS!F1 òl fYam1''6]eǢVL]_Y@*{g?d)#{v'C'j[B[SœP5v01%Ϝ2ВG wGfƇO &k9WS^kA]epC篚 t~9_*Zs!xyysK~~))io5snk:MyRO,: >txyc2x@g'Ǽɚ.}|!=zU=8AeBlkg.ǥe'YIQaSic!B!:&B$2]|҂iʒMEum?OM>Cg=忬WG|~c!Bu$M >u lFL-?pD.~!جκ,?^\ˠrX S>ƧT(/Ms| BԻUC{'vJ"@dȜgxK4fbp R߻OU8/fk&y~F"t;˿oW [|?!TK`O>ҽ לUǼ|[[wr*VyvS`|eO=Č_`D~5sx~wT_~Jkth{ÖCYd)9DW^`bw**"=[$Ϋ>|btr%2_L-;f-46F^nnsvٵt!sO BRy2-51LbW>{B )Ƭ0=.QW >[@*@}J" lrszmۜ#BTNGWse?dmߠ@Z c}:ۙ S:jS2 ^AŦ{@lZu5ag/EQZtC<`ڷf8uZ=sX(J* iY`l[;\u3B ƹ;^}B˲'NW/mjGC . L@4o2@ڛ,֍]?PZ~Ju[YTJ"@<$U<+w܎o[9ػ9}}!T{I_{*ee!N_O62-8bC=>㦊?/=d#jRta`)YQV3KIO7wfU ol!FNVvQ3aN6&Z:B˛[MII#|^uʊ %FϼBFL=rڝجP)AIidmR|*?u#J>xob  ٌK #/;S{yKi0/\ELp!CXJ@(~߿ńWgq-mFG{ :vb1Y ףYJE&n.P>o Wk(wͫ3̘co#}ix^1Qe{^TFkSn| B!"Zt$ |Khd yטp=-*'7 ڙ ~mbWgUܦ9GING.[G&q&wfScRmȒ9kgQs'~LW_!B-%HVI~M 0@l8i4Bw.È t5Ѓ59[G?ZI%fC^7ktp2o骀/@8 &Sd&8L=boͻ y42B0}h)O\!UVwR3ZqWfJmј2[ :G6r^J)TV7z̆!緅QZquEPQދ}+B+{;sEar̀B!C}4!nAHZBѫ٢B2qvxj9׿sg`=KL4՛Ɵl@I"_eн"Ī*2gj7jA"rCV;clUkCGT<6{ ƌ ko&O q䞖 !B$vp엦,>ɝ}F$>IpK!w~'pVZRU\vlӃ>W U6Cg/L2_eW~mWLVʼnwCxՃBRZS= Ǖ_lK%wԤ^tVVk9鞐xiukר?[QsH`U$Pp~~pl 7o[O ORoCgsUg]h #! BmQN 0vv)%)3;Ұ!Bŵ$*ݤhjw./N^ !/HUSʖ(3&+M5SVt"ЩʡwY֫zȏjB!xyysK~~))iϫNYqj&~CHGNx*%⢷[$¶weyCy B!d,f#T526VGOgj,/8JHZtKDB!Z+!BupLKB!Ps$!Bu8 aq@MYK1/O!)&.CЮ<96-9-C@!pwuii7F?ϭ# 0=&-' Sʪ Uq@%kB!P;*׃,ḽ,\v5B!ZV17P"fU{T%/Ns"b:bLyB!`KKE %<}F*(c=]x|g/x0WBRZ~SIܕyLOؙTQ{c ?ڵB!t">(p724_j<%QU%+ftgkcp0_+r_u*ܜ9'a̙ܟ0Uhy,Z wXd Ћ;G>gall 3B ߨk:_N;/xٮqj1`ƯZ0L"\o }H' h8pM^ pMz8 %> BꯅEy_nӐyKWD~qH+ffݼ; }z@! D6݇*V?sKpF9Zo|}nf埯)ha\60 FOy 2~T*.]s7=UJJbU V ) <Pɵ'֮⌘@)ÉtyʒJ[_TL[)Y i9DS9><[/NZ@heog^^~ !j*"(+Je1KY e)}Zg_Y[,LᾂW@O< ZHq rK;_5fLX{3yzT#1!PT!t>8z-[TB;5;B-hIpVVSwy+P)XªKw2"P #ާ"w!R;B|umG~\Cs'LC `(@X`y|++Io,xȽ !P;`[+|n"U![wUR` @8d>{ze@R&'s_ *%eU@Y2̫t|Er9w@En`mE)PSvm w?&M#[-~] /]:ULyQ4b9#D cUU<`_!DBnXC+|?qSe.ˆI"q̹dd-2X}k/Ku-@T2LݥJ\%Dz;'56W6!WM@yNMeIJ49we%#+L8;27>|O#jOtЖ<8}PS(Ɍ99[yꛣq y`-~îeŅscNg6W9%T2dJ~ƧT(/Ms| Bߪ5nNq5|; %KCopQƘUKFy9gvS9|4RoT/s7n~˩'.c^x޶ٺ6hOG/{._nȩ@!B MLfLz7nJ2{=U6͇v$26//OE]7HWne( GRYa:z&\ܳ+JYYiAfDȁS+}_';37jma[pI.{x1Cߴާ'oO_<ݏfV^']:Be=#BD%??ߔ4YPbnizx1 \lTW$[6$B!i oV?nހ'6Wgy&D!:VvLoIzuk|~#7j✹úK,; +|391{ }B@`=a>.bFQ4դY~l_+|:Q$>RL˲k B!ԡaϻ ՘ń34dRus[yB S#9nXLϲsKsy5jcSf?x7mZAJ<79Lc!Mۡ׬y%DuK!P;I0j&RJ!{ nnUe'<|gM i{2Hm`jOQԃ˷2 dνƍCP=GO-.3Ƹ$9v}`JC&"w|n_G4B/cp%B..~mՍj|l%bY1a.3s#=Uaƃrm?=zC^-ZܬXR% c"@BI(jb:\$wNRR(}jI`]7:B]xwLnJVx CUquk8 xw>w_?Z{ˮ|O>x9C+{_vع}^=ĒRч(J|ч}+G >5~9*Oů'^`:)YڵS{BesjE-K:g0LjkR<ϩj.vjZM摶l/M|E GV-oB)c$\ʫuӣ~˟wEZze}w?7_oo7gW?#3LS/Llg0Ar Zۙ( ϵ3gIHе"%> ]vaeV[Xab9Tl6FPR{3u:{4zh1<6՞XWY6l]4wݣZIo=BA8'5eg7RJLG_$ԱRURUB}W-G 3@pZ8PR;_P evl0k̘fG?{wԕ9!.Z}G+jVb֪]]f7UߺuvƩc]-ZwpZ*VpVTDAYd߷,G $l Wrr9=7}89DԙW=sp ]s\V.u#Uy3Ql"VqC\<:!u~2Ys-lYu#͘PuW{ kկcL0X3Y1{xjq5Ts޾Q,G-}4Qeګ111VW~[j_>+P9<""1"bܱy^jL$:>_^݁Bf/X/5w ?zJ3uɴa\T#bJf)<\o~CDG5W/u*!3H%$O.n3|R7;e~Ճ 0r>SNB{ "ԫ9ٿ;v3FD^=qgZ>99f%ۗ$T׌:bE}rjI0XD"Q&YVK$fwӷ4VlOYP lKjyI-\2$$th'*ͼzmdye[ђ)#LĈ._enU ቮ.)O?\ӎ׻3YRrZlwK.y\.\;}Z՝ç|P+]yPto(n{ BQœE1N-b3}s+g﮹;99ILnSRȁyy{Ӡ\&yzzɃ9穩 Zee-d4HSeaaf6•m[]L0Xޡ8##ȢN(wR(K-$`!..nF>_Deee* P7HBJUn^ %a&C$,. PgX6I0$`sA 6I0$`sA 6I0l2X^*)..V*bu8#cr3 I0XL*;vJEߎJ{r-<ðחs9ܩRvϚٯֳ48-pg`&n"[ %-4uZS h)ۙV xjHAjGC} ``9X&5IlAp)[9klZBѼmc(P;w7jlP8:A$I;[g}dtzJ c/ߧ"Sĥu:x;;0eQv_.0sfʰN-$U%dDkc75;/h9!P}ٵ`1{m3gIs^ӋpǮ=_ 6o*QqL|)c;^s1y?W;uQƽ:FnNU~yUrxׅ}Ƶw9wz.0y_W;uQfB_ʅeミ"hpcX`VTuNI3wc_lj `b3R,;;W>d6Ԫf(ݹcvy~M}%"1dhW?GIPlX)?mt_mI0XY=k s?%BfBjwRጢ2Ԫ'oH;[Ndp$c/\NDD/vW3*ҩ`Dd7!~1IkD?FecGNGa #Vid';v^yײ4։2^8b]ِ}~\:trc첑hp4dC'?t/G |fHS68eVM3fM~4'xƲ~Lqޓv$""Hө:O'/]{Br+ca:_J[9t͞|"6L- ݮkb8MiK0xs6 3Y&9w4'oӋT%)q'msщɖMqN|Xx#<ډOJ9 .h2¸_zul2X&9Q]/k|f)1gohݺޞ]9[NTwUh_fWun_.+,,cο\I+VGg֯4+ꞁ2ΉnWɯY3lyɱIDYgѩ3 YjٷOYC-XƠ5.5cCL0XZLzkDࢤ{ոo ՝>$zU6<%ߗB~\}DD;ٗPbީy[wͭVDmg ?$jE/7C]6rQ3{1VFDU֭_Cif#f?Fȫ9R_%+;ml3?4ı,JQY-jژrdFZ%9`:sg*SeWKD*\ŠJt4Dj9Pf 6XOh;/11 ƜNԝAb_ Ftfg &]iDI<D W.Zm~ە:,7ү. XHzAOtD߯h Jv~lhoOs%q nʳFFڤ1'm!\ ad=ĴQK8I]Z $r{DD;{{t݋?1 gr&c^Жb~RVx8gɽn?d~ň XHzq&ˁ؞< bϫ*WE!1hEDW-ȟdCԞ6`k0 $HjCVpU}{x:IY)ok>>8~Nv҂_'ܑŚ*?D3RjŬx8vЈ{9Mޱ6'qS]=x^"R?7"pLp?0 jR^=0Ea'wxY~ݟue8c\xaˍ+FlHB Ź >jR#];xNa|db-x(|j9Ȅօ$_ }#t{dbMOZ=mL\ 0[S6%NXG9;+g\&yzzɃ9穩 ZTY26$m^`nb΅,g}W堇{u66N FpmVqq_L0Xޡ8##`N(wR(K-צ)28~ߠE9{Qs(d9c~nό@ ֔_XiTω E/ض\&ґz.0|@7{eA#gUN ~-seՃuӨ6x9a9<W$l`9H $l`9H MKke$+[C Vаodk ,$lCbR)V11,$`!2D.wQ*})ʻ;8Z265XRTX9WwIPX;X0˲%0 9眛SRscgˑ/9]0-ln$`9f'Y0wW}bl۝K%Zנ@ bL_عFD Pe7z;[Kw9/yrY5kp:t}$#|4uxj.cc[Ξ֧:FƃI9%%k$]sx-p:վHr/}&N]RmQ dgGl.( 8ΌʹАC;7W>;T `]HrOӑD" w;k3}r^0 ד[^N%߁3-@X5 ANj?mZޮN`RyIZwgY)ڇ~8~7݉`퇟U nE lޫ&3`"%KRbaXaZ4zJȼy|}4^a+9) 5d<ԐYTxu$&L0XZYD]>.7!%whN*cDXUfL&MBzPܙ" +t ߠAێ U.H0vD5"L1 O$`9~1NdÏ4}_^ ELh}u;qsi3^+P<"DtrDk?5Y,s< Lޢ4E \VL4jC`yXbZ#!R6߰=wD'/{ j?h{hN%$uk=`z'ӆ7wHԪ LN x>rހVr~ akM,sD^UMBϏp&R*K+vmOc~->jN_?9cr߁sL0XD"Q&YVK$uX/9f0/G^~nMov $Q]xy u[)Ot<wIy~qO~%)|_dHHNTyrc)6XpyΙGΜ{Fxe ^~BXDװ`,eKh9 q137+r3Bv=DDLC:z3&ui;J$vjUs&@'TXzr'ajCH$ZmjDRRɯwM7g<7ڙDq^)dᇋ_2ï k\_?Ӿt{E)ifw 蝜¤v&))wruov^u^yf̹iwq #c 2eaK6KhIbzNSI3:ݼ6eGOٳ\(Hcw82shfGq깧Ш? fʨ tj;Ly/䖵Swyĭ8Pa;7Dr"Y4Mnk֕;i󯳺>盤br̷/,ߐCD|rו;sW1a9X˂޹,65`RyIZwgY){UѴ'c Ts`DkS,lReYTxuXfrjuaڳ>IH-QDuDs3/V/@OmGl*IʝjGYX`3C02KMwڒGDT@XW}~MKxbm>{h6>.(WվdDU3XCœo1V{O~ڒ|97ODLC:z3&u^6 kST7xOΘw Ф #; v0 I0bi_bxH4]}Y`9H $l`9:Xٲ%w?[V֪~jRqH;"A elzt-=iζ–|wV.'6/aX&1"_^ٷ?sT/ [\Bp@fN>}1}EVB.W06ܨ㩽ZstoΫ[;6EDYI>BEDpSlH>$XNnGs7##di+"dpIԷ_!1ؿ1@Zqg8axo"9u}\[לwmҍUћ$uևgn^ۄ9Uuھ7Է_ڼy/XuBVr`h,Jȱ㯇Nwtt,-+;rJ);n9\DT [ˡWv`CC>0R3Xfedd &X-wyDo);=w(Ͼw &jͮK^zɲQ?VKV4}߿S6%NXG9;+df6•m[]r9H $l`9H $l~1,C y.%ZZ EN^Rlh;85s^F$rGGo/ !J.*.xI0$`s4eaKUZk"bScYؒV6)9rBQVx#DlzCڽ&-W#ƴx-wc{wKTw.|j!^\KO;en3{Tǥ^ \OI_U@Byg+Wh*kB^&8V}yKk 0>df=/8۰jn%b"^*a/|xfV@ϼe.1Ɇl5R{)٪2ZUEQf}2틙jEIAƃ3뵃$QO{Ml5ٝVLڼ}5Nj .#@/Qc{TF>f}~u0=tx\wnϥ!"_&8{RCf~8)Åєp(!޻}ޠ[цm#Q^ PI+W0FD] l ըMLչEC/ 7_]4/VSa؛~s`-]ߜ^٪2*K>xwէL&#j˙G~g7N>cDeYYGV}w:& J:|yh<4sסӦD% vGi1xI+2bGV&BG*s.GNt};6w$_޽+*1~n-ɽ#gityI<8gKwDQpM_}&N]RmQ5Ƅ6!߇CCf^(\/85[ bSDjLLN-t.!8]3'UŬK0N7Άî;y)x8w6=thf2eCQQ(͹Etp<|g.ܵ.)ƍ3(.$shD,iC V`v*o%+7kb~KjߎENo–|w^-]96ƶ=OkWu΍j}4|%٣9훾9Dmg&_=F_" Q]V 'Us]&3eԆ : 5ڙAGo 8&=KD[9]A^8w7)js:ESy8ɪ*b<K+24Mq^-k/M[q|;|hD$hϹv%)z֬+wi/}Ï[݁Q/т{LF Zga_gu?}l7I%o_X!d>xŹmw>,|~4M'?Ã>xKx9u~u0՗aFvJlpSS"Zsg󆈴Rn$f]q q\c%mKh 3CHЗW2__r)ƍ3KCu<:NKvu+_1ފ )lN.؂(ޒ5? X}W2%/g|z}nzimH`h;-OS{QUf4y\zhHvzƈb;;6l ح^Y~f\X0KDڟ\QՃæm#ƈ)1i;D۵$5?u^f_A4L F<1̛ǫ3fC^{{,滯DE,Jg})LNޫiUOEu i䠘Ip7Cc'WQ^FbУy֩%OOM2թ9g6%;|D,5R|$U`w$2^(XC_vj81! %*2|S2h7|Rܪzv,~j;+pncNc~^cVKM?w 31ZF ۴œBӉHCv݊w$hww̜݆X(cwמIGʴO:&"+#ѝ;Xxݨ}wȩEAS}{ˤlC^ LPGދr ڌFQq.\Q\ƹ_4պCD. љD4Xs3/2틪 Xc۶FqH #Bzq$DĹZSށhamaz#0#_#%N `uJD7͈Y`Fw\!zH zOwT.˪=&I}|) M҆%-{.|/lAs= ٫]o9jŧK.xQ! %xS$xN]{[BoFNbgTVDDiUiS^y2R_ܦ%k<-rsPEΟMwڒ#4ݾ-Rz NvO{LGV/$2kGցo`x$"=4DÈLh?"DtlQ=VG )~̞ccqsi3N_\CAvAB߿ה? :4wVlxFk_[.֗`F`uJDklb%Ag&%#:_y繹12d7dUu+{wZ=1yOm<+{65ѨCSXʖr☳A"#ƺSZp/~]'#nO0zJU/8̘uV )vPYI>Z:J[.Vb7d8 !n~k 1]iB MHNcĒ5iScip/Z{5Sm F.Gj\y!!F;Qik#˵oI6 /"~N~;h9C%)Q+AW<. BBuvwGϫY_erۜKMݳ]7 4 _h,~mW[y9m|`~ecvk3~ZKS8mq^OD?)Lc/_Xk)|IJ0嫄/FxX~V~Rܔ͍g#Z\j۶U\U]$ A`Af&ӊWsF}39 {vo@q_,4Lc94w0iQ>߯}<,h</L}sB3Evҹƻ7<0 0 ̙`x5`IC;;;yWG1`ht*Cvvv>ͼ LW F‡~Z 6$`-XTiYtkGQ I0XV5`sA 6I0$`spu47wwWG]5lZP(KJ3e$`i>>rSҞ Дw~6H$Nrw3r8,KIyȀ$Z]T\ǻYö$snQPd ~!it-ukyժ սS*vdL04i̧sݼ\===\Zg><ݐ@&99ȼ/d2GQ- [mZ?:6^px- [֎U$vvgr/0ԜKϝRbd׸Κ6UR+JDT̽1n,Jy*2Z?܎66śߧ"+XKq~W'ƪJ&, S/_u{a q IDATsF4A߇LIO$ЈT:b dH(ۏz4IRN"0;IB)HJĎ^/ ){u2sK8o ";v~O<7l^'[lVvtТ<Γ ƽFk+hMl t Sn`>[S&}҇VY.`h$Hq5tE(T.ԜsRr<ĨS+ѯҜ;i['65_sS͙=U}n6վHr/:t}$#|4n8oX_۪&>+2:DVu^v 2eZ8!b/wQ" 2`ncY?K?R1wYsGuk.%O]<y>Ks 9IW\"j5zfp@/iEfBª4hdLt@jcTσR=N( 7nJ۷#h쮑%4tyZӻn zNSI3:/^(TM:wG1&\G+yij""N<ǣ՜Jы 3CH_ 8CDD>e?4B߇[?Ⱦ9 "9Ly/䖵Swyĭ8PaVIyXo e쓽?MmW|SI^d:xR[;}yܵ]D*}lV ۙ:a7Sw@ȼ&_YqXצjzEqh"$ o//Vg%%%??a8%5m:z`Dպ'}f1݁էz}[6ۋ8/ưa 1=_~d[\/s`×dt~|=W_h`5iSPcS*JP*B=֭aoWۼzsZVrI%–}4i'FDe{~U )眈d3~x$鎈(I]MaHv)%|wb 57@l>c[^jN.hљ{[MD߿[w*XGC 5&ID"#MJ""Ν^wR*VpyQ^n;s<:pC7«^_F;D4j@?d#{ef#<@:(6OVkETTTXf!`k#ŐHk!,M tVNyyI*UsA+} CGԋӣmŚ&hPjbýy|JB se؂Oo+wX^&Ztv65R(e`ם6ؙw2jѨ9}"ҩEi֗fyX-cK4"-BI0XUb) =rN )|2/-kn…՘eޖU(Jə /&"BUk2_y`ede3$eXpօÑ4 +5($`OGLҋAuAS,)MvFbչSQVT*U~z;\{ /nXo>PX}}yAV[1{b: 0ˎtsk+D۱\us-\9_WC=v1kRN2lB>/>xV1[S|'Ẓۻ-f:lڴ¤l)}3+{>z9۹Ls`NQH˼}3%3W^m"z7[3Tm*..f]볕9 4 nʲ%+7 ,I04Q0[tn! K{v4AvvvjO ~,,M& ˳aD VhL" HyyfrmZuځwTMT*UzFfQC`X; i8l`9H $l`9H:`iuR:d$\_a`=. [Rյh;EDt'.zqeb@ ?*o9`΂wFtPC2fs[7JR;YW(< iV:a7Sw@ȼ&_YqXaXMt0 ד[^N%߁3-@X`=ӎpт{LF ZG:)5eҲߏHp;QshߪFkBvD@I꒔F֙6dxlRJsS{U7 RyIZl3X֩VHvb;zTo" #W"?ux*oxa&,DƊәYUUN@o֬y%ՌVNET߻H4d`mGpm Rz3jۡfʇ>/_|}}(--_|P=FD.S6^H89U%|D>Ƅ~Uf@7% VkΝj="Dtќv894Ùs7^S=>?- ͻ)垲~$q/C5e tN #bJ'˞16519:bչS|97^#i;J$vjZ[ SM$#O;MȒD`qO~%)|: v\;Ul-ݦ%UdߏZ{BpV:wUdHHNTyrmhN;]'O?]Rze8>&gN1ٓɄL6|5z''0mJ \9023rvVV3oo ģ^w锔j(ѵk2}6>2uiѾU*i*mef6•m[]L04 @Lc$ r_0Q``x45aW&Nse$J3}Cvvv>ͼ 6,+hתCkÌ^ TZ 6$`i(-+{n(a9$`sA 6I0\,ͿK'k"\.Hr4BT*00$`I)U᝝skۈD噞ـa9XB2`"RE%2YO" $l`x , [uօ$˨I*p:WWgi`=oԄAGJ d! r4!79RuG6;:]?[-Xq:\" ͹<5{1ә}s9D8w:m>NTpawdl>c{?Y WQw${i41-5t:|g񛚝s[}즕y=fLȼ ̬0 :_׬gϞY#"S&0g1iZ:n?Ԕ/8/{;jXTp6uǂ/g*:爑=ZGXiI[v{r:G¨m"|iIK۝wDk F3[ٱ8qq8w=.*Ԕ{xOwL;u UɆL0擓^;}~uѷTU#(s}`y/G9 D3'K:.nᖮ~Ak?z}+19qXys8RrSGs zF>}?pAXEf<}FivdFL^2{## W85h2kÇ´{KZq_%;:$ĩ>~,O뼌-oTqJ].*'`觴VK^|ol :s pǹ]|zUZHZ<<޼͇[T׳]˃-Xm˨\.!5icksY+{ȇC+yֲEn$_ٶ|}.,P*G),1"s%mDxqK/NۋL%RE8-Eى''ԏmǦzi'DX i(C)uh~a7&{)&/<ڜXYD|_ҽQ睝ZY*r播"raVwQ0Ed+HH7 ֢\ɦ'O1-N~zk{%wj3YGY)""XO/}˽M 66+w(랅/=OЋ~FwW* >T@TqW?!c Uk3Żb>h<%%,zxhJ{Cۦ}hVkx3a韟(¥E:"akFN(~nBq?l{ds>eu'=rX3v}csgFKqfZ\ipc־}hd*stDPe)æW}sSZ9(RĶm}XzW&Yc>>6ݻcPl6O7`'2"<;'ǻ@iҸKԪsYэ"ůNuvNNNnw!mG~Au]ECp0B0  !p0B0  !p0B0  !pX,>{xC.\%JO;zp}\tTT] q.Fa1Zje6~5n8&&|+j<`(dgGEEi] j޼$&&f_΁L~1ZZ|766V)nݺ{DO/N_-J-2Ϩݽ8%TR8/-Vn|~Rg|pqSFT3*Թa_8ǚť}tẩR6(eI%TzըڪB0j#Ę攔ǦR*..k֬OMKs;yxQn߼z GUP5a?}Mѡ/yYMӤ_4ڼ)A-$zv+K풂>O2G%k;&+?!OޜD_>4š8,r9O<6:v}K{m+VQ#?:5s_}65 ܿס2޶bLy[ "rc0(@Vp>Z*;u{)qOuu+0&fQ{7))b߿CJ)Ylɓ'wﮔ"KñEKv+/=P<%"AR"[>8ݵ;o0}aDN%=d8ӎ' O_SR"c[>lw1fߵӷQJeL&W]f:VE DG6K:};a⼬)kqge[Ik{fT[5>)RHgLlGi']sU `."mڴYn]˖-۶m+";wܺukΝ-;1cBrty4׏H^EJ®vg-E\dQnQemDb_t*eeޓ+H* v C{JY(=yf*aaьOֵ{﷯,w?xpZȾgZ˭JퟵqK,$w pm 8fycbb{%Ko>$$tXLZ{0@|ǿ8edWT."Ko-]C"D6oL*}VgRyi]zWTؽ"vwk0:yZ?;̭}*ݬ/&6%a> sM~oT=TjaIDATK.pUcM0jvҢE 2mڴ3g7mY󓝆YNrXuaI$"Wgl+ul.kGńķA;ܫ{$@no{+OsJK`YZ%Ș}2EE^wk0mo2mg+?L^B\U #}0B/X?jr3%&n;(:t}v;^n2H3a_MK]7bX3ӒJܵ;+^Ϊ#>;De޶رp #' S7kqa$NL:1Iw趵o-w߀= 3~^կLݫ0wFyYnӧ#4d)uzzz^l&#m5o$)ic`_~^ɓ' @`56kFCF-W/~XP"sj,`H`Ֆy.0 B0  !p0B0  !p0B0  !pX,>{xC.\%JO;zp}\tTT] q.Fa1Zje6~5n8&&|+j<`(dgGEEi] j޼$&&f_΁L~1ZZ|766V)nݺ{DO/N_-rL~3j8"a/:~`ˢU?*:rܔ?!wyunt7ff$purqi]n?^ҽ\TRIw_ pM"8BpLLlNIIql*|}}]DJɃ,ޚua999OFtph|LDy+j#n 5޾EDk F3[ٱ8qJ3ٕq]vߒPԜUùgoH٬W~=i\薹YJԂKo<&~W"r.eӜLy4j'&:M_*-.-8Q{!xƍ̴X,͚5k۶%"u#Mmȿl s{vKTǵ]Gv}?pAXEfΉ"7͛ő␛?ST=2\WS+UE o%Al[)o{xym-5K\Yqi׃%.y%G/eS]sG%{@Fqܤ$ҿ:(Ddٲe'O޽R, F-٭,HCyHB~cJdlx〗nwtw@į~d#ۗĄ8ӎ' >_ʸǛ7L|X)S_.iv#JݵlѐD>zwexXS:&yY'Rּ#*ۺNհ]3sڪɚ>)RHgLlGi˒ܽ[.*cEM6֭kٲe۶mEdΝ[nܹbqtp'fLH?1oԘfQY\kHIuy;^QzOEȢr;vԒR6"/NC7YywexXSÖ_J[F3>Yž߾iAs<8-Jd_3-EV8SWT[wVVQ{111=\AA%KڷoR:\(= _R22VܫTH%ͷݙ:yZ?;4"ߗt,RuN*H7 "WwWyp׮KGpyj*w7&w%6%a> sM~oT=TK.p>%I-,˴ifΜ޴iSg]Xvf993J^˰$tY3StRQ]ƕMkw=cL>v[${uq(mmzo-FҴt.kGńķA%Ș}2EL2ܵX\ [k?L^B\U #}0B/X?jrn`dR:tؾ}Osn7\LvS{&;7.}w݈c`)LK+vtplnJO' w~5dų&$7POݬŅfol9rsOHOpyūY5x{;V8־}hd* pm*9}:AO X,aaa}ZVn tZb2eu"D`8`!C`8`_u.qY"4.!`8`!C`8`!C`8`!C`8`!C`8`!C`8`!CGݜ_'yk)_P62Q.P8/Xvm;LkmU 6lܨf/O!"bh穄GWmE-Z&@B0.W|ǛիͿZբEɒMnIѝK>[sL9ҋ"ޮo?YHȔ/tph|LDy+VL~Akm?)i) NL~G7g; {u JҬ  /hۦuzAW+??]g(i}A>߽,ּ3Sis?mv}tv9H<}֦&ٳCt??L~JDZ?3(-Ϯ|`xGtT]-]]G&70v}:1'ശT̯g* 1Bd;}aD3+]\Dh(W~FnDׯ(:{%;3TM w~)xBdi忧 |vV^zhڢuRZ:wUJD*V&*W>Vn/8p%KU6GuLٱ${}?e n#W^LM{.ǬvRA"ZXlnw\i>m9(CUثj)#OSb/WzėL9Gw~%2# -qjbcs<zL}'/;+08 C*kښ7o˱L0jUc2B0ja\i`  !p0B0  !p0B0  !p0B0 |iegy\J z@a9  !p0B0  !p0B0  !p0B0  !p0B0  !pu]?(4"t5M0ٵ񤿟_XX?UuZ뢢⬬ٞbs5ݮ6[nn^Nn' r5! fYNu]E0) Eԫo+..(, lZr Q">>>!ŞfDXXn^^ƉJ)?А`լb͖sfW]sXPONb[$"ZjZmŜ,"Zg3`&/2S"2}<|ECgfyZ\MvV^58-MpfI U> k-Uij= wڹTWӓ tQ. +ChuhV?d9zhӚ]I̿>$"y%C{[[m)WW <‡On.=kɪ Z9ZNRn%b[߯ŋ `+Z?Lf_,ݘZXyn.mPUrf@Fe_V$^ӿ>-: IjmdKt+(B0pQ ܋۳IeJD2$4>&pg[f¬+Wm6, &2Df3"O^mc;wիV%;|S&];:z]`=ܱ=O+UEhԩk[MٺWg/quT{}Iiw559s7sU,eM0e#|1uHTOu]L*os_f/ڻ/.&}o}pV_-;ϖ%vyFG;#'o]hC~[l ɴ!qWsw؜9֦jەurC >탬{k'Fyk"`BxeMp39`kӅIA{D:zG+{/6/7,!EW*\x2WKNPJĖkx@_Dd>[I:HKT hcJd<\dwZ)%g~!@vBiDL& W@-)JEbX9f+,Tp#7HҤ|&cH,]],--+J3D!(8穩O329#*d]\\0TDsMuS822d|$<5tCPT:) \ (spp$,MB.;88bCn>t꧟Iv0P>5t^BaaannPXTu۶ml5]^ͱ433p|ģAvbRIEG@dgLHZ~x7E_Voιn24c:t2LܬcWFI5899V}PN̦Ǵ*~X:{X+L>苝sUw~JA_?|Wͅ խN!pRKcޅKiҤɝ;wם8WOUaڱQVڟJ/,u+rl=`̰K A݇#rRGg,=ZS)3)=Z7Gf4ՔZA_#R սoC{ih+$R%\ڵx941nҀVL]?ȯ𪪹uSJ%c :m@"ԬU(N\Y"j1v6%s'^?/];mH"&E=ɨZsg^%L_;ȏ﮹%rjNx,{>c21a 艺1R\&ӿTr7\.׊O4ai{ٿ?"rDDClHu+i~>*Cx?]/uo)U*!iw^:{pݑ(_#Y2z+ffGDGh C5FO ?T4s7/d2  .{|eƈ;?{3[%viV,'\YboY]ȉ ?7ZHh@DG$嫈  t<{ǩ#!Ǯ=-@MuSD"uCRPSH$leȾ|w'vuzMv7$1"Je<-`# 眈d"*Y/̓眘HHDs^j+e敵* ܧmӛ{l+J5z^N Ɗc%N2bfVܧlKCIz+1&fUTNdڶÍkrn(T@*yz\:t ϻiTN^qhȁ]|9/,;Q/  .ү[j0JR(5;fp7sejxȅ7?s~u]q F]SN|I&ݔ/Nm}rQAkQxx?XDʢhRr*1 PjD]PjQ.WZY?"!5\\b&%/O_ 5E >{TSϒ?}~S) "⹷v~ca=]$ųL9'̸i[ 0Œ:) 6 M#ڋʔS>>8$Bɞg۟  jN͝3_vIIOurXYI,,,4s ??$2,/x ̬:뽞K 5nAU8''[qjzfmGPkBilf@ zP=(@J% "k+sK 3H$ VaJŕJ\yB#xU  l-sٹy +*ۘv1P$I92r  =s3{}\i~BJ @S'BTʲr%KW SP(po#͗P@UKdX]T=hʯ=KKyvjT*yӴ y=0=3WұV4nn׮G^V1T25=gp|"⥯`j!Z[ImZRT^}CDM4ԡ6Ҟa$j\^ W(-sdU>W=^ѡ"'5 fS"mN\Js3RbnR<ςs}m~b?^<zb1A}zPؕ+0bci%[ȨwSnݹKD:Cjܼ|k+^eftoءom<ѯ&2WP _/Y&0wlغ˨Aol=3?᥽rtxT  劒'ͻ8;ѽ< I}[@+vba}"Εg9ιBx-Gcqn0^>.A Ecu(s֤6يUҴkwꭚ}'=u)4N~]X|A:c EQg) 2R $f<ٽsL19G^ɹ fZ+1yCd.84ݷP{3t?TzStnٳ ~9R]7kpUGi+GV}k>i[T~4LG|nNԼG >{ 97kԲJ(` {uK375m٤9gߌ#Ert96o]F¸44?zZLJYfܵ12QBԧWxlzkw; SF|Rh!&ګ.XH6@;c12RE-1ZG\bkf4hkR]bŵ=+-aGľQׇC5E=;js;L)a. z͚-ϟ]እOIs{NS޾_~γft Q&4-˖hNvMﴜ?/Zy ͬtE߈1eH/sfDKRr=qn֛*""AY6s?nae'}K}JD\b✡>br_N6fo<pkH+s?ZiJ<)ٓn&Z4 +X޼xN{@D]ݺt eVS"kڵճۗΟp5FްK[l2]=LIw*> ?zx]ڍs'/\K0pCLWXH.3S_:vb&M4j0RYFl;2QۻFh{^ "1B":t8&G}2L8%R&ADV=r"0IfǟEG۶BFMWq&hŜst_}.|~靶J2Yʬsf[ߞHY#zÆI"I֌QbZn":p<?LDF\L]sYwzyMfp!DDnp]ie2/f,CP^?6٣"7j4|FJn xtIf'42)G{UC჈';7ȱy3)WGkRf-D0"!WAȋf- hChjT.* X$}޺}ǵ~NуGǻz16.B03P^=ޤ~m,A !)1V W.ZuOԢJ_z6Q}_-Ikg]X{]DD;'6 eJoG"AO7r?KG6+zr cĴ67"T^fDϳA~#Dԗ1'D> g T5Os"?u.C]oHPDDdƭj?e9r W,%yi5;4'JEz{6%*>J~.?t䌕k./6 1^!U5=YAV~~]:wҹvvmZ'gdTg'ȓVmS'umإm?8KϗҜaDD͊NݚiĜ+>_,.#jM{ђ;z'y'7pʶG=F5k=rlȭ=:y7!k6ޛKsս)FVΝfsY-"4>q%&ۤ-xc+uw#twݙ7ŤLƨ|~g `"؃H~PcuL3zPfWA#%w, RQr^8^SbSg F΍<>ӮD{Mdզ9e_`i!NtS)) RT&\rEVP`aa1CuG PB"WZoLS2Rl}{#D4o3uCW]'bM Mf}DNeјQ-͈6h0dO΃=ytQhg~ٜD .S N0"Ƥ[syyr.-'r؋QΎ=Q "݌=,<*H,:kj;T]GDq^XS:ffߨksqٽ[ ڏ#hm:X9!Ow{wkȪa7U=͉ ,]|.?=Z83hݶ)Z{8Hęɳe::oor9R&ZDDۥ񃙌1 YGwou*i/u\UuYyV32whw&gOGs/qE4/39޿.ڤڏ ֥Zw"/[2kT ݢUˀ&T+.8iۣgOK'ӅG[6oliFĘ#st+9bKM-*/ۆRf{dզ9׷v+vqq B [cmSxIà 䔔#xRbkmR8Vlũ?*5˷9OzɚY s_/ āQm2YP'Nސ+GFF鯪س2 jG')9i<Vvv)e mD$mgQY[ 2DDYbp`5ܓ.ITB))%˭CZfs$323OU cvsB')֞-sfёgN ^ ԮQ%wWÁQw2*F,myRBT4I,,,%y<ӷ,#ZgbQe^7U<5-/+[fgc  ʿy T*P*rgrW*2=B25=/5CkP=(@J%C!@ zP=(@糿|v]+ >Q܅+yRi9sn&"y{w`|1[DD|aj,c[rnC0F9{{=1}}F|ܻ!W_99s!VVV5"۷'%&h$ ̯P:@EU \_?³>#,X3Sjc 9*LjH2W!!S!!Dd٣<8b|)$"r+UY V!>RP^_DD[T{o["  U];̐4]ۘqgƓfsιח^ /?ռYszr.\{<7&mN#vcDD6nN cL ":x2~]ث/ nX,ep̹}O@eYFR:bpm""ۑLȧ,E#5CDDW}S)1"^jB,]]23-WFDxfDDttM퍞Dtǿ\j{΀_ɐEVrNVG{me0l?j:F.!c7ΗiBDD?0S;×s9yyy h"cÇǎ7V/}~忋r&00tl+2 ;-9{uzن^ Cϗ rc3f8o5BHDd=cΗTD OԊ(wNLLؘK;֝͑s`O^\twuD:/h,$mDD^x/s}ϧ4o?mކTcD3Hԕz22~h6+rb5#jFtgQƝ'/p9::ڪTh"ڶmWM Z`SwGvhRZ KsGh#"C~C{n`'ȳnt/q4ʿ0a?&ŭL R^ٳtÃqUav~xԝc s'z;w}K]6cwmbIҧ/O97 IDAT"4;GǢY\`ڰ`,X QN"Aq ձW *UDM6= "W\hJU1.%Y p< "#[\r:Fo$&غkTcD6}'nB2k?MD}&u0coId"z785cBH~28hu3ADDtW9,9g4$a}\ͩg!t_NBhhJtppW^Æ U*զM}||xn)1 +}H?"®_g| :ix~aw3gv_xVdu$tM`0`Q3e岥6M|A6wvk}g6T!M jhNށvt1r/d|wRt:OlMD5[sLIiO~) RF:އ嬫eae0ӆv6c> *.>xIoܸ! ҪU+>}ڭ[7Ƙ4w͙)¶R3זW1-ey/%0FnbXQz Fm.qDCuL`0q-ىW#J%/3c)dN]a+Ju GԸ;g}N0n"QI P~U!?O9s~S(uѲRyJ=m7OvʝYg1MoD0RlP:1z.P7Q?E/BkUQx_CRQfN>KD;w &8Sr<2Vtٛ791D, "K Xe:5%b^ߒyr*M{W ܈(d/i cC%"~]Fq+#6 Ww*;v3g\.߻wommm5^Ɣ}I.TrfU٢t6&k.&`PQGNo{Mz r )Թ<Π1:[ 2d,gI.[qQ *ػ&t„)3vp1E{V7n~Ԫ=F)j#Rg$sT68qn*YuԼF|c;`zyb )3g$bY^q׷f߿]\\Тv^W8يS3˟J"L<3s(W euX 8/\0%V 8|@DO)e>P6j!eb3|iӧOML#DK"yVM`U^p`$)ҽ10xC([VNYH&e>W'ymLd.O=:X[~^Pe2xɘeg@ sgK=Dgo xAwrޙB>%S;=5ǝr!ԸkA<]lE*iz܍3B㔬˂ `vٖd8DQ=C#j\Y(HI-!bS%GѩY y"1oxLD~,8HDtpE(r.}99'd~&*)_A^бcD$lWj+|:^Mǽ;}w@!">)ؔ?xB1]^qiM#bL1bZ3W5r EWo yT_+^HC ߻<جAkfngT'jE͊zwߖM{툈t2h/m;eBƌ]ڱDl|5o+9-"DD"}/?gtߙjasxnVmGo_2˧]GAD61"zSQj,B""R T"3 wmD~Pgx ꒻?8I$Q\.OR(U=gv75}OoXO`2?=9꜌)5Ȍm Цצ ~W#)w7qٽG/؄ӽ法dyE   gG䧩 ,KZVþwnUd+NMϬ(j7z/O1n I.͹B٨{ddI3s#b#PAuxaÓd{Pz1y M"쳹Ǫdʩ7.O}椁]\K*r1\K_}|O?W ?q_u^GMrc/.^ʙܦ_V8Z1(Q[#A1*N~`yܯq;<.>;7wv<;MGqΰݑԳs'z;w}K]6cwmbIҧ/x߅ QT0lkpx+sG{:Y-XQ92H{eLE{ _ڻ94Y}_~L7]+}%R1 :SgKF_{J.cD܏7_^(O<kN5Ef}'vojgAGW_d׬@?7[ Sd9#bDm l^ϖEuv*)otVmq)]F"r2$|43DÏ+ԓtF[ן_=:G6j'~:ũ#~#^oMzkիf>޴`|YS~? t%"2A?XL;yqq9? 9nՕDr 0mE!1xםyܴ9#"vvMpR>/OQFXm3ޙ\"hXsXЪX]Tӯ0<L/tEoDD,LmsfR^r#E?)1[Oz#'?iKEMiȆm$Zf率K']_;5͙D$zgsM{eGml"l:aqGVЩެ}$Yv㧾"S`ӆ*Mm^ux>:%Y'i})8)BN5_VJXOu⸖֫I'_<Ώp)Aʹ4)lY7`o4OQFcL4J_3^tV^J0H,u lؕGѱ(sIQ7u~k 4# ҧP{|%c7^nY \!}º ZKLTB5&LxX{)J߳ĸqgGV)0xq}]b19iBWSfQ? 3pj #ڙMt_?޶,=D#SfΞ7Hʜd^c/!J;4eן6|̻$,K~[Ps#ruziSXF=VPuh:Q;?m9vgɳD iz]DEck ڴ,'a@k {wtΥj4DGţX^J׷f߿]\\¶-ń3ai3&$v"ŝ?NwAsAw*_chsw~4dG\}<:K/w(Vj(#8/\0%V 8|@9Wi?3T#_؄D픁,hYy]q9FϿFӽԟ'$U27 eFQNeqȉ8S}B":x2''QrV7Ά]=GP1@ʌȩ}4o;I qtj ̄* DZWTj,jBSgRLC""EK1寮 =3<x1}5}rتJof!Ns礪1@Qm=|ٹ+w>eKCԌ6Q㲲xLLwC\„KWe3;)~OU_ŘN)sĪiJ*"{oW{@D{op"UL"e{95+doRyKG+KB(*KSNUC{rŌoB돛 8..vnV;s /PZ`1GvTa*JP(T*P(89V>ETeP-|}k;ͳ$N׾U*MNy~2Ck^82ٓD>p4C!@ zP2xiwvbR?8I$2O;Y r12OʎdnCʅQWf}lw-J uG%DTrbqG s,Iy$Rb_5f-Zأ۴=M⍕/Zx}O丹S:kC 3~E%Fhd)*yPΦ=݈s,;ƑcWI}{8ZYPAF㛗ϟ[MY`/ݭ*WZuՕtt%4}ٍ}+_%$v&T'1O=/qs9t'͜g_dΌA-mM_`~\ѭբv8=HDtlƲvL,x_DMDD& \풟<Uj @t3ٕ[s*M{HS2yXBc""StEw+e}x{]MD줻1=@ 2ա#={qQ:sq!9Wl?̘.^HZS.ݑԳs'z;w}K ~Ov.Myp jgwsM]XË;g1FD _3ʿ0a>l d0?UC~C{n`'ȳntWe~~vǗnM.y3wSݰ8 7)"#>&X]G7M2s Lc.\0sG{:Y-XD)D`u+Bɦ/speX}<EP;w*, Ŝ#^CԜȧ]FM =n.Fn5|;* p9y?6M} rn;bmxgrrտ5Wuut}i\~@KDd>~f22wVojkLn{/)//ٜXJ"L6kt䢐B3\;6gDĢr2r4<'c-NklFzkһ^ND! noZKZnxo lAV'T.m.F9vJm{k's1s32~1qօzEgԇDNt^u>Df$j>x}QJ"U,l<% x\v,㻏ԗT1@e{؛|2+5`vbcxlӣ芖-j|T҄zǵd'^MԺ8ȴ)nf bM/fmb@A6"e~F?W]P_6TigИS lR2sA"XkB'L@<#>jNm3b #mBQgq&gINS]|̇A"yڣU!tcG1'cuoyEDE9<|z۲hOaʻL1Ю_O8e@<#>r#5|KnwiƸѹ !nq"֧{7{Ho ͂Elx9يS3k;Zyυ ^rªxzU1yKů }E8xx9 4->!E9W(5r_2h_[,tHj͡%bz"8s8_vx*ڌ>\{nuE F?K5~E,\0sEʩ;e"2WDf@,#~BPY@TBa%6 .N9g^@ X!@eddfwaG9yIU{zp%O*-3g՝g?X[G_ˉ󋋿 b5 i߲Dȗe%V;3܄ʗɞ$&JU!6ny9Z[ DΓ{rlGk)NUkTSis6<={kXԬg} EV bQ-}}Ά]yk4_IUn _o窣D8=_ jb+RInb/k,Z{uԷd$?y{T"=f}il%Te:|2FW Yvʍ#bu{)S6;0"D]U פkiiKxNe;;ۡ9`nPcS܈m c?łL. LjȢ` !>RP^_DDk-DDm!U/X,FUR:bx|3p烢{|ϼ9Z3,<Ԛi><.nG&"yvVՄ-vEʈM DRknZ%Ԩj!΅]s7'iٸ"ƸtDYqswŵȾޭND͈&"jLDD`/&41-Sv5f)Um{dG:(wGBZ]鈀+ah57M17biƆ b7(JQAQAQzr~,,R²yx3g#a87YꃸoʙO!*GmNM? 8rp\qGFAE NfviǠo/]R`cGɥ9J,Б,LFq٘5csQI,<*"*Z"j9 ^w{NAqi:!z /oW{d}=.6*#"Č ה);ցhl̟fG_,pLݾJ#yrlw֝u4hg-/?it V^@QF"`ou|Mva$NvQD7r.2~[mҼk㤌_sNFlom'OOsХDY饒̀hPPa˷,nmPQFjݰkP+{8=ΥܴĻRTq$$"Y]ː?\FI.[wwD\R$3:|ݕ&⧧o dq޻ .RɊ }n<}G9/v&*ߟ>E?/f@Ug&qI~zJ~#( , #"%3 7p)j{:}"} pyCSlO0pNDDN 0O/'7vdj 3O'Ƣq:ȢY}FsbOݔbn?vxJÐ'ED/s"bϯv51OxNʝ#D[K|ݳ&v lw?q=}G^Xa"1}a-yr~#y>r}%+'ZW?YHet:k8s;+ެ !5aʭ[=-$58Z "zq|3OJwh@DSKx K+45y!]{X5?rc4t4]?GDѥ}Ɍ:[i0jfD -Z&9GUM" E~?hٌ#:> qDʮ?gitNK %A*-=Mk}p&_a%,[ϫ!ĎNTs|tsm+]VkǶ^SDHY5h8CH{~jȳjˊsS_?G;у8""j_.DD4bee35Qh;QfgHmFO!"qvK); >k;K@PҋMjhODg$ 5\&IGh?9kV}ɹ8OYPPP|UwVu%JCDdKDDJ&9< I+5U6V-{NvsD{'֜7-c#_\JQϾuC~Zf> tW~fީ,#4F@sG#D4qD&ŕ CTCDS&u@dҪ%&s[2~Y@D/9'"{HW{yV'ɴYFRǩDxLFDD$-,ڹ*߈0J<9٧ܬsz:b3Ih|jT*˷hj^(#HoS߷\ I&0Ƴ&znQ5YĹ@)ow4ҫUoyg>?cՄU:8'[qz6ӓ'޹rR"fwzwij((~0 UPܸP/]]ƏxP}b1zwNKд ŝWÆE7j$Ol~㏇ [9/s-9~8~uKdm7s4Rin!,'"CcX~ǜs|k$[\ٰ9\ȪVS?ܮPA{ AZ\Tes±јq~{_v!"+W(יu%"PՇDT/W_B"b5KЁ+j%Z$J=U #:038O<ʩD" D֝*yhp֬277ZNY^TY2 ?˜1ʋ@ݦ9BbJ/g@ԅ+Q m-b'"b=;2&{zDDz~Jz V'Z/``R_r݈"IVL"YC_UvVV0MPqNCT=NO'@GA ɢnDŽފ***pΙIQDDtW^r7c4r&h.7Uw*?Дo:y=qg_ c~9=M3Ȥe.޿{ 71>؝Eaڣ/9b^G+=)]7qDǗ_YQa*֤Y'xCj%7ѵuf\Hvt~g cD⠄*SJmSOrdBId}Z )1g匈֬rvln"(LOy >foO9H4EwEBOpJ1vn yG$" Lܣ />K=rwloivݷ[kV}#8ݱm3G|Cr[K&B.Q֬rgX}+Yӿ7OSY}WcMm?YtU_߃F@P !!1)0FFf滔<_RQ Ȧ{tjwC. suJ]qV$lcGt{ǔE1y:𥟌J Sbn(-d(Wyˆ _> Vۋ}aZNt$e;Ӹ!=>wJ(SU&\r.-.Jz\xRiEDDV%.-|ﴤr$jfLr1gYTRn>񱿍3ZХ~MC%diaf[Kme]Os[ K߄ÑoH<) Y>:h϶ b3{ -qwH=eLnq D$lb/9r*іֱ;Tͣ?~;Zp[0lϏ~6mYг~~"RMҖFoBDDMfZDzOm/++z+rʽ/3gp'ݦq `&`Xnr~OҖ(%=ODPӋ7]T cE.?ݺ VeWݕE&`"$ӑr+Tz*Sy[Xܕ:YE5gΥXb}h$?}o=eHF/[)\E^iƈ#}VM![!B&MLe%\^xƈ٢w82EۥݡɌ2z;9 1r3ѾQ 5)[jaDlr}VKLTnϳ|uuOTM)Cl7Yj˹㭡Om<*x;癣z65 sbS\8c髯ߎȷhD%jҥlҷiD{6mI񳻗ÉȖX媊Q([ #n ԧg%wr(t1|kC caG fcM~ZVPWapM7C=h\i8G2ONֽ퐍 IDATfv3gސc;{]$ZQU?Z}_mq,9gM;zZpn"#$bmM9]-YG(_my>xcOw;؛@兺RC]W1&^~{aTctnݷ_;_1& %BӶ=Wَ}m 3l9`^ٳd} L1=caSUnөo572iG"%jo& }c߯22fraLO_ G<_r!|HdžwMW 10;GGdfLmG;YevD½49iWMDՃ<CjziW^9.y3-뷽b1vϾ8tu?6VBi^FB5ߋqJG׳.ܵ s:1LB]IRrZCx}A{_UhߞwNٶ~Öexy'6}CɆ-/ywc'RRAU> Oձ4(tސ.-S퇭{nu8eŐ,bΌ>]<߹5qDD]uRXđYqͭ_#PφzFV'GtDpNfF$3\4ģH8|L͢}M8}2\&T"ӗvnRS3C#bjX^v_L)4~͸[N=ΒP^Y""NqɳOyqgԤ1[C ,8 -ITq^*hiNiw^Ae˸3"9s3F([y3O$e7ԟc9%˷r$9AsN{?B 2eBt AƌQJΉ@" ZD⤋`X5n"zve׶:g_p3`>հQmw<}Xv&"dRl^ɦG*+{5R`""rňDhgHmFO!"qvKQsQOg[~:qTNlTCxTn@W'-x"ƨx05=|EZ ΆoeED/Ju$"GqDD^UrY,y ٩zV2 n&99cj¼l =Dž17js2+LQ7.%o E#4'?߯tU6V-{NvsD{'*Ա""̝m}I+8类եIS\U+;n6!G}E%u%JCT[%g_L:!v-1@=!Hk7Zȭ_`BN܉ZZץūl55744j4X%T|t;C}PV|Lz]AKtHovyǗ*BEꋴB<_^:ofjT*˷hj^uUYFh Ccͪ/U(#]WTR?lgdZVK7, Ch֬rvln"(LOy̱k}o?5ݱurn:dA(ţd1K%;Ò<[=ռ}ژ ?#zXInA/99axVqvJ̙=c9)=EwYm= T`DV<Ѐ#_ !4PK[Ɵo3?7ШtѼ)X߯]-5(o)MT(͂wqeYwO~ǯ7\S5 ʯMoUsg_?SON)#vӡpcnΪ^,qwxun~ׅDB^7S}gG9U]V{yq^'sV|Ѫq#hoaaP-+h;j[??}o=͖,?9x1g"/iZzqkpG<mZvyWhr>);SԴ,sAf1ύebYv/֩Nv$dK =ߗ:TݠRJ{/Ǯ?Nϓpd?ۇ,')픂v:d˽ŽzQHԤ8+!f]h{]NlۇGEDEK$z;nw ?d"u}fѽv46ب򫺘![W:<\􉈢x@5/M;[nG[J4P]DDL>ކQJ2c"DvYvx *Q[DĹLiԿQV+hPUb@aæ 6)~vw8a0A$-.Wo>~.]icf(d?s>X7JG3OZ&ް/.j#MY9wG2MMj\vjߕ4"ZZd6 mشS$y#qC+ ³@\B+{ؖQ1qZ XM>@_i[V~_D Jџ_mƯ_;s =WEb^\~ʍDtdQ`I9ksւwvkMz~ZVPW6lZ6kZefv ߺW|N'u)ݼS{񫧉U)XLs]o&_M'd[70(-6lɢOCMУҬ`H(*&Nbc'wey>p@kC m;?VCܱ>cz6væ.S_?Hy:QDqQADhڶ*QS60ӰN}ۮ@ InG`4@dB]Zj9{[ \tbËhXCNH\4D|a"&zp| s],Fy8QF=#NUedƈ5NYsB*F!tomA!FraڴGQA˿)Re61z+cAQFrԕ=xs=Gz%|a'{hqSSg<>LO_+ZU/v?1u4sAQֳc59!XqttHLLspQմ闯}p$Ɣ|QCy6}o,zboV\Xe۟%DxoRԳnQQ=l+dK*4gh_ f_k; Tm[ #əͷ;qz$UE+-WO~ L[z c@-<Ythnk;2oh`!0@4yZ{iUu=(G(J=S5r94RRL(J( - 7Ch0tN fj"#3˦52e2WC#Ch0 @w&=Kʡ< 2( 4 aC#YFG9]Iei=45ѓH{ʅ{5N \&LxyYykoTUfo_06 /vd>,<*"*Z">%/DuEO 9 M-mpů1+V{yVx]U!P3>HoS߷\ I##*Js6&& kn.ȋ;Jq77f$<Ɉsa1{J NeV{ỵWKcYF9<mLewOdD$2vHO3C$;9ĈH}<.6٣!;b^ۮ;%5byO9ߪw] ]s 59wއJ8͢6ʼnmg~:e͋I䝧/5`"lf+g>#Cg\#MwlIkGg7V';ʮͷSyi3lj7萆g|*Er-Tnݪn۵Q5&]K5-{8칙#%qF=Nʕ 9ԗ@y> R*Ȋx,ˋ44COUE\eCkviVb*Q=l?@i\WC(Eq.u<>Ό6u WV?>c˗;usc1&LD)ˌ[yہ]21 sgߵCy8[S/ U;-;hҳD/5~( +Z o޻PӂG=#MDDNDW4;6FjOҝ={@ 9)or|[SSafL8GՎ$9i4іI4KUƯ[ۓ8ʍ,BPZe$#4CZ@߬ː)$!/?)u^зDoWc7 'B$ QN5ӘH".., L[<]QbrnMLȹUʑ>! ;Uws&2:`xYULSVUuHlTYFQ1qBBbR`ЍlM*q[> #əͷqz$j\qe9E)4|XOyh|oĩN5g=WҚU)yc܏G艳F3V[ɹD\*5.x*_e XqttHLLi1 z`i&JS-7H۵kYye 2(  C2 2(  C2Pg|KsO]dr. s_ ]nƧ}بh]HϨ^.HQIoji۽ϸK-~v6U  Ҝ4#$O*_H&I{zӂ~[h q|AA誆~Ш$*V߾@F&hLbʯ"C!MEY6F)/@lFETZz Mlz-י*՞襊lF@ЍyLVy*k$_&hSSk丠I˷&EVșm>cGByQ~MИd$ pN4 A~A;<9 ^j4VuPW]wŊCbbLK3QZz.k:""&ݘAe!@dPAe!@dPFOTAy6͚a\(޾ralTTGh4 lR`CԴw]  Ҝ4נ3Vzҟ(I876x|_xcDs׭gZ2bhc*˸{xbI7';_>,<1"Ytv;@zXbD$>bssO{xѐ UE=lVը8t0ΐNn4뷨jڌ 7͢6ʼnmg~:e͋I䝧/5`yEByX6SR"lf+g>#Cg\#MwlIkGg7ݪ A* P z5Qr,poDr<̺Sl, =`ϿIJ"GE>o\g= o9}=.+OI!#//"jD$N=[M!h}[m BQES,<15XI훛 3nT=;jA]dölE-_Z@Agd;ߢRyi3>v.@\"lLe[A{!>&#.yP~:/j- !>OvmOEhU;jAU!x-gh3[=*]^)ztg{# yȞ`}=S -lPcM +ǩx%YM*>y1[.*r'R~>YUZcr$+H%6PÎQ2m=DQ8{t;"߲Dԗ':RV~hhcu"EsLoj<cvDՎtPaX@h©gGclzJ4jɚQDDK{Un у Tv!/\ĭ-1"r"*>jkʎReuT7{{gSEHjA2z`L"FEŦ#4lCkC`=}rƀg]XnNݯH@BW8AcSESK6Ɛ 1;**;JAQQgEkXM9fgJ >CoE3>fHi6NnsI2F&V8ҎC.G`e,(HtBaYyM>@aD+-WO~ (^ DP*;Ji~PW]wŊCbbL2pT-fuޑ7(-=SQh H۵kYySeT>rʄ Z8@k;k4A2[G/tlR*6?~tB4@o(#27FF!@dPAeyPu \]E[AANb%L|/_c,?/-ŋP1yPl娄.[?>}.ihaQ'[fp&詏VM/Hۨzgڸ?@Lse3vJ=&"}͐s"h?YNʕ 9ԗ@yGw7" fǩ_٫C' \f|f˛2@t'ݗ//I#`TGbi<ֆ$0l1p {&Gg| ,hQe:vp~,DXi6$0?g9Y5`7~oC-j5]x&! ݾ@D{^NX_i?Z>Z@߮g|4̐ m+r}>W}ŶmAFidPAS`@B}Q!+d$T Wߵy׮mGFvKJJʻg) 9Q6nP w/e`~Rz/&Pgp/@#!H4tCv}ؕ '8p؊@qk<`~J+􋉩٘2Z;!Dcc63uH{;5›TfOLZ- w`t8u:ӕ8JOZ##W..ab]CN_PaӮyиij]JYjp^p˕3^(/p.{ og*Oow|jM>L}ϖNCfzu6i7ǿ8-|21c|~7WpNWhp+ϕ(GINAogݶPf]J7~.='1*@̀^Η n{Ω7|r3EdhQ"~0pa\yL+p %]{j{˔ RZŽ7]&ϻE~ZۋRA)%JIvAbb@,"*4fCG\ڤx ";̋oK`Ӫzpb:wJ\IsDZMbn9jԴ"q^ -%RRӝ/ϖZBf4g}EWNX>Uji_j]> ۾]*~Cs:e޵^~m~eobDG\u ⷉPMX5_)=2[RRrU-#m f1r!"K~5׃5]jmUxC_p!H4@#!n̈́`~Kh" OVwzSk=qs#lLKIiJnm:!ȍpH{;2>R@k Qr9O 'n^)|6pGn|_yG'h*;{;:7m⩃;od R]$옠%%[: }|+S 2ǿ8-|21c|~7W;W=!'_Lׁm}gg^l4dƬW'~kȿw)(;b\EQ|>H{>ѫqڋg&ߕz'n,kMT^0?a߼:{|ԅ; "%⯔Xgu.\y7\{l]%cqFcߓΕ(GI{߿AV,KI~ϥUu:&E'6FL/wn}E&Ty_>=ȦVxr* Q~ 0pvr_5N]{q]ķ}XXܾMh̀^Η+֯!|eH^7n1eeܘlcņ;Rd,2 F-%7H Z!QMZ3o5RPzs!~6~]Ե+>=1<6SliEnB/M N?3wՊ(% M?#2\今"_pio/JqaMDn|~wJ̉+\rqf޵R]uo8۫i kd b2Vq78~]ԵƹrP)iWgplUHyTYDTh>˨VFMQOu棊9"q>K,Mxjp71wOTڗͬdmb9CB&15jswG?|*us"2)됩3+۫i kAg'&sFVT2/I,CgLkpƩk/^:_0+UՃ2JIMw\P "I/M|UL1$9_k1)uKM4$]^Weپ]*~Cs:eU˗uY}FmOzwZ%nG*b+JdڞX@9pQ]ץtMyx*r{{>>x_GѕӇ>O_|]Եسsr+bcge3{ /,͍ J _0?ͷq&,ؚ{G6 (=2[RRrU^9HỈqC}½^~O@Ԯ$-f--g{M?My$̷rVωjqBu2[mBBh$ FBpU6<e;}-k3&xsusFFGZly9}qlYγ|尕^qtǎqMN bbj6~%62ϡM+7e\n {'bQc~1)|ϧL/if49 \z )1ϣBo)2-3h@k amǩ|~dKg{ijmL7D$?cՃ!~R{5{Yb ŔttBQ&Oߔr*r4}l:%7?ZƎ#YUܲ3EpO~ЛKv0ǿ8-|21c|~7WuX64x"X,g&ߕz'n,klͨUԧ,7Įu;".rY"㛴L5-7,ǖ?TUJqc&8}p[yYiuԙ5߾6tҜ!QVu59gdhkG9sE,ԡ}͆Y)AZJJo;_- ۵k?2jژ΁{I^/|y{qo=g#X ƿSUmV7LOPV^RtBe+~)?<_XlEW2W̭U1t=/[>b)/6etLDD>I}Z g%سssm5] Ι=XPcE=ڌww  ^*|@ǵGw~sꯡp~115RRZ[FMn?ʄ>6ܔq5<Þ[ڈjy׵݄PR 'n^)|6m 275upPsSW=TP81;Bnv6M,4gHT$vFM{YoOp^q1`M}45t#֍ĵ#2JIMw<[k" kTZ}FmOzw%'<47cz^]?8ldԴ1-_s-|1aG2okrQZ7Irs/6bprkw1W.:g `AP_X{+=AfW ^*߯{?w!; km!Oa7=wck>:־ϰ)UU͝uON'~o;u2gDdö ץ춭"7|| Tsh8'# ʽtew{g4]EDDRhpUU~ixiE o?]Q%qcB())ݵNƿ7CH_T^""પww;w45>jIol|! ǡWImR<5v4X?""lpUU}yg3~8uADDM*5Bf4g-K>GG=W+tC UU}xD=sx")lz]2*{+jStڗ.]FDxE1#%ڷsQ|\ꉜrè]D wx{(e<<6sn.;kJK$7krơ[R }Eej""fUlHU^ 'r||s vG'_)w^;yŇY 'n_jNmo+ 6ngye|,#F}O?|;s&&Szw:y^| `]=rK!Q (7l?7WЬ[>^_1/!S"n9p(ӍED$0 ㇯?bwn a"gb? O^/(ֹˣن!bN^/#(uDD4sY;br0Ds#/?zkkAi ÐCk2Ss Ð3)O."LgaXvP 9X_u4WG.Ckxa͗M.-H%g(;EŪ׷ٿocxrJ|wDYGRdVg"[jlHvaotlB[Zfn՝tvO|M=Ȣ5vSv"奥mv]~dMnrqQOܭIYNEұb߱WܸknI>{U;U9 YܮˀHcjw2OB R-QGO6R>a1ӆM]Z;NY ?+:g^IKZa~qJ]V3e]^n~ q̬2rdž: sOZ|_yh,yg|&2[1AȹXmOO¬/~PJDw/]7r T؞u Қ6W T~<!D$Z~{W߯^)hqߘߟl{գYkmq=Ͷ+?Z<}Ĉ_;8+aqZu<~5XBbz9`=>k!A"\vz{Uߟn]SG󗯽wm8.Jf얔\s@KPzr- 7mcG/"!NzZH[;U(喜CBp"!C]Sb$?OP jMc1\,ޫwd^l6q4Ÿ^"JҺ+-JOa6lvM??_wS)eXl6[K9]/)c4=dH>} E%> Z]@ovpkBCclBG))++trkߗ_-ڱ}pK^uy7]ڽk.̽t Ƚ ,0vVr+e@#!H4@#!H4@#!,՗-Rzt5䰰P0Z$aFXXhRRrkke%O'Ԑ $0BBRSSc1ZӅ 9]F+++s8-\$`Z:v Z-Gݿu[0Q׽U՗nx@#!HTSÞIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/img/dashboard_environment_variables.png0000644000175000017500000003353500000000000032415 0ustar00coreycorey00000000000000PNG  IHDRnpsBITOtEXtSoftwaremate-screenshotȖJ IDATxw\Ggr^T).^b(v^M_ޔWSL7&&w^X*vE)J~p}q\~$3s{$$= &ƮB34#,,!BM'|oBBugPJa0#PsBtЮB9wvv&Qw_T/B* ќQ j@d>R9*+,\ D>k B!j !O3S_sO)-,,r8ʷT*MKKuqq3Wu <=bHH'%##C.r8~P\\(JZBĂJgFgvaaRpԃDgg'W(,,(**|[(Z5[|˖ 3?;KؕyN'6_r^ l;R3ґu`@6v/R.W{{]:99j2ܸqC,bq@ kP#u|)}IKSRRCW'h,9>g+4^rif~':y ϲQqq6<q#Wׅ0dwx0c5 R蹧vIȷbT^a07S;wF@z3SN*kR Kk#(+@gu1}wn*L)ɱ΅GL@P`-9<<XE)|.ãJnCLᚵp@Zi EW1~஝G.ljg_)ƒ&͝|~\Lf证W?'Ў-UgUWiC;:kj,{ [ |dנ!'T`x]`gY^Rtn=vؼd5QX+ w׶.n(GLPpИɮn`.+a(@?|{i_6j5\ (t3rd9GtQ9cTsۉ#?Z1+GK~B!)l@6\!8W@s&0R4!蹤K[Q%iA=Y0 :+J#_#y#yJJf|V]8ly_6nn+dٷҙ /C_ԧ^CUp;`WՃGym[_ ̬k4T՚Pr@|#9g!1Lk? 4X6 SBll.쉆0.BWAP?};,LY wUfLqo7K.T0%vwQJ,hU?쿘'jV 6mM)Q ?H/etE?wŧT Z+RjJͶ;e,pћYe\ŘL:בTYcWnUyrgVVFkRL"H$j_;Jeex0BՋ:dOO~  6cL|>^;.B)סQ!`@6 ?H)%%ť8BR}ҢT,V9d"Tf~BՎN7-!J33 !lJrB= ^J]uBYJ]rB?BfCFB53:GU B G3!0A!!~:B`G!!~:Y]yiBvljHmB?m_BȚ%ݻrj ݱx-C?c-5DٳƮBϯچ~1 FBwQ Zdص@ _Z x^eƭUa0@-uh͎t/O[>] O_/J{IܖY=֐_?pwșTެbӜ2 o-0׋-F6+9F `C%9IgMotgb؎޶La^Gwv` q)JyЯWUq "P Q$+kK)P~Uf&֡/C6.į)l:)?13K$`gkoU_W6KU~Oo#i3ɽt H1wo~0h7.o5Q.j=StOeT6ez0 @F;RqYE Ж}'f/sj IgQ 3!|6C@۷OTY9NEʹpn7ZFu_q*6 CѤLK|`BL0+>I.1oVS0*QqۊߙߒYCP frJm*1=A[FYϿ£zQ?E2Sb P$J$ro6E0p0U˒g,9Zwعk9M[#! Q?KaVwύWr-ÆO? k̀y녦hҟg'?"C^=OYzm3BWϪVfװfq ތ݄ktq凣R +ϘPYʕKZJd%m @B*cb$$D4漖R5@Ȳ ۷oAH")'τR(hŘB*s$^`yi9OJU-7jn]Ռ&@ed/[ vkiu %B! L vV~-CeBV8?dB؎dB0=/~zT"!VAeG!!~;+ї]iZ;oge iٳwW9k7ϔ1 7W}}8DϮ>^;r/ ,J <}_{0΅}') UrO׾=)Mзۭf"jZabz{<hYŏ_լPoitRΪ|3;:UCTt$Ұeޕ߬ػ1zk.zcxB! Mܫsɀ~ffw|Ѩ}YE*[.=#o@F1ͻR%"n:&et|;u5TzCzw#{R-ZabՉ'}T~_ma֦1 &wZC,yF!MN8dG O O3pwǏ{{G9UB>ëcOgfgy/ܻ|*EZ=+{U% ؇% }zd YK-ZQEJ[࿌e7Ln2'Nn G cJ9VƸ \gwF)8r\eAƵ]r+4[;0ꞽ vf h$+G ؓ$l;4z@GGGQ}䶿.aS"{:+!{̅-[/yee{O'M,ʸyˊuU'ߣ >ih+)C}]EP魧ލջ]DVﶣcSE75oz< lk:G:0K+B$QېΒGZNpEp֬`;`!rp#7eKS_E+jT1Ł3v| H{_=лi=CFv-v='L^ZtD*Rj #+#۫}#'s'Y5MPEe{\6W9u f"2=) "NU~%jNU9bB?پ+qpڪ[WM8΃r DW2TOc3 QgmzidIo3[r`:4'IPK;23D M Җ(s>/u;Frf@=8.#@tͤy^ EغQ+j\ w@WUN M/ʙ=ҍֲ pA=Pyc(c|wAhWpj{;SDvc^(EI~Գ~<:ő:qTYwz-հSx9T¬k45̘٭0R1z7 ȏ;:z*IQk˵)AgJ;IŬ?=iD yo12M3'2؅O]{Pg8̛>-qW&;rw_]pZ^Qq~9T4S^XrX*⣮*B^ێOٽƮNLJ6{F5 4& ý+4q(7"?hER>!k>!O.j|!WӇRz##jC?B5>a, Z~\n:B!ܾԐñ~:B`G!!~:BXO?QQg?pB9G3 ?@&T_/W~ɊϾ\S&i[pYcw䡲Ss *~ YHƗRV%w,M-f&VOg%\*&&SFe:}rvNE4=ZIGEDA=|_Wo~{}k#v;Y=BƼ;R$vmzMo>W/VYJ]+o ^z~)mt"t>}V7ӿCqpcbxTEjYxJ >@$D:Z#FZbqW宿>*QPEQ?v*C_|.̞hGkLK)qtEkʊRۜ̄2|eB9p'BNk8 ҺF5hjq5*Oai}^kߪt5s@LP^RQ2!*Xl=f9)'~>N/hgU&q* \0߾OB2>H*iѕy߀v5C4&Sw5+5q&+BP.6jпd ~ @mI5*J**:|J&TrlZtxYLf~J/b̮-# 2o$4._<,FNI]O@ɥ*H3`K#B(Ժc>3y:u;!hg-xqG9 P3IDAT[\1%;иIu* >8a9TPu)кk 8b}L}`e#?dJD, ?ƕ㝝hw#TwZŒ!dUn];dfeԮ4 w6~Y9 :;'!Yh:/MIMn];gex{E?~jd\.WżҖ> BB!WB3[x{/O !LPJL2 >^Osr@ߺЏBjRڭkgv- C?B=_ݸ~Bj!s`GFF 0#P#ã~RڷaY9}v`GFG)e>61#P#u~ !eaG!!~:B`G!!~:.zM?N9a!#ow= tC?B=G]\.`\V(7cgS,< !@)>q/H%*KC?B52a}ֽ{.T1ڏt#!;~Bڵ}gKBbT查ǣ~z.hN??z{G!Ԝ6ֶ} *QBb5rF!UJRebGèA~A!kA)~a(C*C?B=׾:M-C?B=}!du!o#ЏBVC?BY !du0#ЏBVC?BY !du0#ЏBVC?BY !du0#ЏBVC?BYZ>ز@!!6@!`p!~:B4P ҹSì!"'{[R7lȠ;x{}u!Ե{q+P˛;yH/E~~~r u_#B5 80ޫT(GiZNcW "GRoooJB;;֭[۳23:O?YQQdh7l?Ɏ׮s~5R@视BȩS߿oggxn>*6Y''&^`d ”2Y-gLށz XyES8[9$Rr[e./ @3 :t'N 54"g_l*4j,z(3|$ ڼ\dۼNdzgtr>(}Rb2dTY k#0)))T\\m6// E}Ɋܟiϑ_ٹt!Jm>,5~:Kik&)u7mf 70#VDž,Hwl{OVxR+O|i˥ߥq:pq%Ywm:D ɓly_oUm {z2SE?]˃F&}b[Na q;^.6\I6'Sg ]wŜZ90I#5VS2Jo+N:w05[ЖCgV߲&;t=opN"Z0ދόx*w%<~2BQo2z;sHA·~poޯ =s?s3`3- &$NGS?66v̘1~~~|>חaM6 _zz-Nݴ`XТܗ^uze<.uS.N͜wW2׋Ts>?v)XkoW_|Q kwfwiߜp틃 xov3GwM+, fl<'KBz?rηu|ۃ%`; =9%ݵJӮpanE?ON"Znd+@$dw-?Ǐ)yo󿳖j5[¼0nYҤ]mO)~a^UBHQ^^P Otg ;K^qAH>!6gʥc(>Sާww^QK/q)WK\~N:@LLLnnn.]!fm{f()zi+l0 SK ϻ}̬:QO)-Ϻ9M^.(ARp=fOֻfQ$6q//>*(SRPf0oB1!s\VN#N@ !nW1:k3up9@ienk-kdzhD͊|`Z$zG@TN6YLv胈 3!| Z_+y #nY Cؼ줁 g>-eJɝTRc<ArdJhk7$ȦSϑp 7j=.[rooL&۷o_v5} ଁrJ)5*{hSo S_ZvHg?͚OuKd U.UР/"Პ9fS{FR2zĪz<1Xz[afkɯw˚lyr1fEJs7f;p+f"kP3gW”(%e<^ӳ (U<ٜuV޽ӶAsJ@ {TQZ~%e;|_|7߸jժ͞]ʁ9r@ig)#ϱU34{s}D{ͯdsI2aNW!<;S]fٞs;lR!ȕ\Se3ִ'dqIB9wߧl)"DgazgVuVWo=a$-koOpSX rN(w'jgLUZg;l[>eus8aNSN\.]vўauU铖 r$q{uN/v͌oaʞ\赧ϘH0#1 |9lȹ8§7b7]vSs)%Z~?t/}`S?`e\Ĝo|4G_iQP.|8(if>$yW|luVFݲ&[0aiNYqQtB-Ҟ5-N]\\A)TRU6 # 9A=:gFPu떉IKB50 ~RUXҎ_Hf ҅BVC?BY !du0#ЏBVC?BY !du0#ЏBVC?BY !du0#ЏBVC?BY !du0#ЏBVC?BY !du0#ЏBVC?BY !dux]ǷsrthPREeb9BFiRͤRi~Pr~^06v]jC@; Lf2BD֔ @\¼f!T/*nZ@EA,)+'d~>/)4@,(U*%y4C?B&uM"<̴6x|sqK!\PJƮC@t# *h?FhJB PڄLZby긑!Zj١Skg?q=QJع~0z:S%zT)~"0Q=(7 =8 hY^'owŠO0#tY$07Gc;ϥ[D>87N^Pst |y#GCc7v,C?B 675n9v ´ۿlslʵW'0`;E汍'N%Zy=S-)ItC@!Bl$7^[Q}l9Ǖrmѽ<Iyq@)}=Fԃxԏjh;3vS Fw'4z|o^ԟn38DOK&]#$[*j֠fpwAx͖2 US.ĀPcTT%ͺ>#Ҟ'b%>& iJsZyB 2>A@V :`v3fύٶNSNn|)}\}i#ڵr9 P&@!|6x/}vYp!԰̋&mˁi)L9gϻpS:qmj :% -LIˎP,W2ӍW51 y{.oh Rǹ y:9WDkPʈxOxF}M P|v& &MʁG"l6@JeJw4ZfmUi-m9ܶ:t0_{BX⨟л߮O|Kil*iiNRtR {/Jb~W!st~+`yTL"dyŰKm;)r،wmY$$eBb!6!Ps#ЏBVC?BY蛋6 yIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/index.rst0000644000175000017500000000030700000000000024360 0ustar00coreycorey00000000000000========================= Contributor Documentation ========================= .. toctree:: :maxdepth: 3 coding_guidelines debugging_and_testing profiling troubleshooting devstack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/profiling.rst0000644000175000017500000003131000000000000025240 0ustar00coreycorey00000000000000Profiling Mistral ================= What Is Profiling? ------------------ Profiling is a procedure for gathering runtime statistics about certain code snippets like: - The maximum run time - The minimum run time - The average run time - The number of runs Such info is a key to understanding performance bottlenecks residing in a system. Having these metrics, we can focus on places in code that slow down the system most and come up with optimisations to improve them. A typical code snippet eligible for gathering this kind of information is a function or a method since, most popular engineering techniques encourage developers to decompose code into functions/methods representing well defined parts of program logic. However, any arbitrary piece of code may be a target for measuring. 'osprofiler' Project -------------------- `osprofiler `_ is a project created within the OpenStack ecosystem to do profiling. The paragraphs below explain how Mistral uses 'osprofiler' for profiling. The central concept of 'osprofiler' is a profile trace. A developer can mark code snippets with profiler traces and 'osprofiler' will be tracking them. In general, 'osprofiler' allows cross-service profiling, that is, tracking a chain of calls that belong to different RESTful services but related with the same user request. However, this guide doesn't cover this more complex use case and focus on profiling within just one service, Mistral. Profiler Traces --------------- The most common way to create a profiler trace in the code is adding a special ''@trace" .. code-block:: python from osprofiler import profiler class DefaultEngine(base.Engine): ... @profiler.trace('engine-on-action-complete', hide_args=True) def on_action_complete(self, action_ex_id, result, wf_action=False, async_=False): with db_api.transaction(): if wf_action: action_ex = db_api.get_workflow_execution(action_ex_id) if result is None: result = ml_actions.Result(data=action_ex.output) else: action_ex = db_api.get_action_execution(action_ex_id) action_handler.on_action_complete(action_ex, result) return action_ex.get_clone() In this example, we applied a special decorator to a method that adds a profiling trace. The most important argument of the decorator is a trace name. Its value is 'engine-on-action-complete' in our case. The second argument 'hide_args' defines whether 'osprofiler' needs to pass method argument values down to other layers. More specifically, there's a notion metrics collector in 'osprofiler' that accumulates info about traces in any desirable form, it depends on a particular implementation. This topic though is out of the scope of this document. For our purposes, it's better to set this argument to **True** which will not lead to loosing performance on processing additional data (argument values of all method calls). Another way of adding a profiling trace is the following: .. code-block:: python try: profiler.start("engine-on-action-complete") action_handler.on_action_complete(action_ex, result) finally: profiler.stop() Here we don't decorate the entire method, we only want to profile just one line of code. But like in the previous example, we added a profiling trace. The obvious advantage of using the decorator is that it can live in code permanently because it doesn't pollute it too much and we can use them any time we want to profile the system. Even simpler and more concise way to achieve the same is use a special context manager from 'osprofiler': .. code-block:: python with profiler.Trace('engine-on-action-complete'): action_handler.on_action_complete(action_ex, result) Configuring Mistral for Profiling --------------------------------- To start a profiling session, one needs to make the steps below. Mistral Configuration File ^^^^^^^^^^^^^^^^^^^^^^^^^^ Make these change in the config file: .. code-block:: cfg [DEFAULT] log_config_append = wf_trace_logging.conf [profiler] enabled = True hmac_keys = secret_word Defining the 'log_config_append' property allows to have all the logging configuration in a separate file. In the example above, it's called 'wf_trace_logging.conf' but it can have a different name, if needed. '[profiler]' group directly refers to the 'osprofiler' project and is brought by it. The property 'enabled' is self-explaining, but the other one is not. The value of the property 'hmac_keys' basically needs to be known by someone who wants to start a profiling session. This value needs to be passed as part of the user request. It will be shown a bit later. Logging Configuration File ^^^^^^^^^^^^^^^^^^^^^^^^^^ The content of the logging configuration file conforms the documentation for the standard 'logging' Python module. Find more details at https://docs.python.org/3/library/logging.config.html#configuration-file-format This particular example of the logging file configures three different loggers and their corresponding counterparts like handlers. For the purpose of this document though we only need to pay attention how 'profiler_trace' logger is configure. Every entity starting with 'profiler' is related to profiling configuration. The reason why other loggers are also included here is to show how different loggers can coexist within one configuration file and how they can reuse same entities. .. code-block:: cfg [loggers] keys=workflow_trace,profiler_trace,root [handlers] keys=consoleHandler, wfTraceFileHandler, profilerFileHandler, fileHandler [formatters] keys=wfFormatter, profilerFormatter, simpleFormatter, verboseFormatter [logger_workflow_trace] level=INFO handlers=consoleHandler, wfTraceFileHandler qualname=workflow_trace propagate=0 [logger_profiler_trace] level=INFO handlers=profilerFileHandler qualname=profiler_trace [logger_root] level=DEBUG handlers=fileHandler [handler_fileHandler] class=FileHandler level=DEBUG formatter=verboseFormatter args=("/tmp/mistral.log",) [handler_consoleHandler] class=StreamHandler level=INFO formatter=simpleFormatter args=(sys.stdout,) [handler_wfTraceFileHandler] class=FileHandler level=INFO formatter=wfFormatter args=("/tmp/mistral_wf_trace.log",) [handler_profilerFileHandler] class=FileHandler level=INFO formatter=profilerFormatter args=("/tmp/mistral_osprofile.log",) [formatter_verboseFormatter] format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s datefmt= [formatter_simpleFormatter] format=%(asctime)s - %(message)s datefmt=%y-%m-%d %H:%M:%S [formatter_wfFormatter] format=%(asctime)s WF [-] %(message)s datefmt= [formatter_profilerFormatter] format=%(message)s datefmt=%H:%M:%S Triggering Profiling Sessions ----------------------------- Once Mistral is configured like explained above, in order to start a profiling session we need to make a user request to Mistral that we want to analyse but adding one property to it. The name of the property is 'profile' and it needs to be set to the value of the 'hmac_keys' property from the main configuration file. .. code-block:: bash $ mistral execution-create my_slow_workflow --profile secret_word Profiling Session Result ------------------------ When started in a profiling mode like just shown, Mistral will be writing info about the profiling traces into the configured file. In our case it is '/tmp/mistral_osprofile.log'. .. code-block:: cfg 2020-02-27T08:04:25.789433 f12e75d5-5d59-4cbc-b74d-357f19290dd7 f12e75d5-5d59-4cbc-b74d-357f19290dd7 b9b29981-0916-4635-af18-d6c92f991f46 engine-start-workflow-start 2020-02-27T08:04:25.790232 f12e75d5-5d59-4cbc-b74d-357f19290dd7 b9b29981-0916-4635-af18-d6c92f991f46 3cdd41b5-318a-4926-a38e-63344b6aef7a workflow-handler-start-workflow-start 2020-02-27T08:04:25.812879 f12e75d5-5d59-4cbc-b74d-357f19290dd7 3cdd41b5-318a-4926-a38e-63344b6aef7a 603f1fab-be78-438d-af13-d94ed3b7e416 workflow-start-start 2020-02-27T08:04:25.954502 f12e75d5-5d59-4cbc-b74d-357f19290dd7 603f1fab-be78-438d-af13-d94ed3b7e416 b1d0a77a-52f5-4415-a6c4-f16b3591a47d workflow-set-state-start 2020-02-27T08:04:25.961298 0.006782 f12e75d5-5d59-4cbc-b74d-357f19290dd7 603f1fab-be78-438d-af13-d94ed3b7e416 b1d0a77a-52f5-4415-a6c4-f16b3591a47d workflow-set-state-stop 2020-02-27T08:04:25.961769 f12e75d5-5d59-4cbc-b74d-357f19290dd7 603f1fab-be78-438d-af13-d94ed3b7e416 27b58351-aebe-4e37-9cec-91fdbef5c68b wf-controller-get-controller-start 2020-02-27T08:04:25.962041 0.000267 f12e75d5-5d59-4cbc-b74d-357f19290dd7 603f1fab-be78-438d-af13-d94ed3b7e416 27b58351-aebe-4e37-9cec-91fdbef5c68b wf-controller-get-controller-stop 2020-02-27T08:04:25.962311 f12e75d5-5d59-4cbc-b74d-357f19290dd7 603f1fab-be78-438d-af13-d94ed3b7e416 605ebfc2-a2bb-4fe1-8159-fc16f6741f5f workflow-controller-continue-workflow-start 2020-02-27T08:04:26.023134 0.060832 f12e75d5-5d59-4cbc-b74d-357f19290dd7 603f1fab-be78-438d-af13-d94ed3b7e416 605ebfc2-a2bb-4fe1-8159-fc16f6741f5f workflow-controller-continue-workflow-stop 2020-02-27T08:04:26.023600 f12e75d5-5d59-4cbc-b74d-357f19290dd7 603f1fab-be78-438d-af13-d94ed3b7e416 3a5a384a-9598-4844-a740-981f92e604af dispatcher-dispatch-commands-start 2020-02-27T08:04:26.023918 f12e75d5-5d59-4cbc-b74d-357f19290dd7 3a5a384a-9598-4844-a740-981f92e604af d84a13e4-4763-4321-ab08-8cbd19656f2f task-handler-run-task-start 2020-02-27T08:04:26.024179 f12e75d5-5d59-4cbc-b74d-357f19290dd7 d84a13e4-4763-4321-ab08-8cbd19656f2f 7878e4f8-aaaa-4b9b-b15a-35848b5cdd61 task-handler-build-task-from-command-start 2020-02-27T08:04:26.024422 0.000243 f12e75d5-5d59-4cbc-b74d-357f19290dd7 d84a13e4-4763-4321-ab08-8cbd19656f2f 7878e4f8-aaaa-4b9b-b15a-35848b5cdd61 task-handler-build-task-from-command-stop So any time Mistral runs code marked as a profiling trace it prints two entries into the file: right before the code snippet starts and right after its completion. Notice also that for the corresponding "-stop" entry (the suffix going after the trace name) Mistral prints an additional number in the second column. This is a duration of the code snippet. This content of this file itself is probably not so useful (although, it might be for some purpose) but based on it we can build the following report: .. code-block:: bash Total time | Max time | Avg time | Occurrences | Trace name ------------------------------------------------------------------------------------------- 2948.326 8.612 1.218 2420 engine-on-action-complete 2859.172 8.516 1.181 2420 action-handler-on-action-complete 2812.726 8.482 1.162 2420 task-handler-on-action-complete 2767.836 8.412 1.144 2420 regular-task-on-action-complete 2766.199 8.411 1.143 2420 task-complete 2702.764 8.351 0.460 5878 task-run 2506.531 8.354 0.850 2948 dispatcher-dispatch-commands 2503.398 8.353 0.437 5735 task-handler-run-task 2488.940 8.350 0.434 5735 task-run-new 1669.179 54.737 0.881 1894 default-executor-run-action 1201.582 3.687 0.497 2420 regular-task-get-action-input 1126.351 2.093 0.476 2366 ad-hoc-action-validate-input 1125.129 2.092 0.238 4732 ad-hoc-action-prepare-input 687.619 7.594 0.651 1056 task-handler-refresh-task-state 387.622 3.872 0.300 1291 workflow-handler-check-and-fix-integrity 234.231 4.068 0.392 597 workflow-handler-check-and-complete 224.026 4.042 0.375 597 workflow-check-and-complete 210.184 6.694 1.470 143 task-run-existing 160.118 8.343 0.304 526 workflow-action-schedule 141.398 4.546 0.268 528 workflow-handler-start-workflow 109.641 4.361 0.208 528 workflow-start 78.683 2.004 0.077 1024 direct-wf-controller-get-join-logical-state ... To generate this report, run: .. code-block:: bash $ python tools/rank_profiled_methods.py /tmp/mistral_osprofile.log report.txt And this report is somewhat really useful when it comes to analysing performance bottlenecks. All times are shown in seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/contributor/troubleshooting.rst0000644000175000017500000000453100000000000026503 0ustar00coreycorey00000000000000============================= Troubleshooting And Debugging ============================= Mistral-Dashboard debug instructions ==================================== **Pycharm** Debugging OpenStack Mistral-Dashboard is the same as debugging OpenStack Horizon. The following instructions should get you sorted to debug both on the same run. Set PyCharm debug settings: 1. Under File > Settings > Languages and Framework > Django - Enter the following: a. Check "Enable Django Support" b. Django project root: your file system path to Horizon project root c. Settings: openstack_dashboard/settings.py (under your Horizon folder) d. Manage script: manage.py (also in your horizon folder) e. Click OK .. image:: img/dashboard_django_settings.png 2. Enter debug configurations menu, using the tiny arrow pointing down, left to the "play" icon, or under the run menu .. image:: img/Pycharm_run_config_menu.png 3. In the new window, click the green plus icon and then select "Django server" to create a new Django Server configuration. 4. In the new window appeared: a. Name that configuration Horizon b. Enter some port so it won't run on the default (for example - port: 4000) .. image:: img/dashboard_debug_config.png 5. Click on Environment variables button, then in the new window: a. Make sure you have PYTHONUNBUFFERED set as 1 b. Create a new pair - DJANGO_SETTINGS_MODULE : openstack_dashboard.settings c. When finished click OK. .. image:: img/dashboard_environment_variables.png You should now be able to debug and run the project using PyCharm. PyCharm will listen to any changes you make and restart the Horizon server automatically. **Note**: When executing the project via PyCharm Run / Debug, you could get an error page after trying to login: "Page not found (404)". To resolve that - remove the port from the browser URL bar, then login. You should be able to login without it. After a successful login bring the port back - it will continue your session. **Further notes** - If you need help with PyCharm and general debugging, please refer to: `JetBrains PyCharm developer guide `_ - If you would like to manually restart the apache server, open a terminal and run:: $ sudo service apache2 restart *(if not under Ubuntu, replace "sudo" with an identical command)* ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0895667 mistral-10.0.0.0b3/doc/source/developer/extensions/0000755000175000017500000000000000000000000022344 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/extensions/creating_custom_action.rst0000644000175000017500000000216600000000000027626 0ustar00coreycorey00000000000000============================ How to write a Custom Action ============================ 1. Write a class inherited from mistral.actions.base.Action .. code-block:: python from mistral_lib import actions class RunnerAction(actions.Action): def __init__(self, param): # store the incoming params self.param = param def run(self): # return your results here return {'status': 0} 2. Publish the class in a namespace (in your ``setup.cfg``) .. code-block:: ini [entry_points] mistral.actions = example.runner = my.mistral_plugins.somefile:RunnerAction 3. Install the Python package containing the action. If this was added to Mistral itself it will need to be reinstalled. 4. Run the following command so Mistral discovers the new action .. code-block:: console $ mistral-db-manage --config-file populate 5. Now you can call the action ``example.runner`` .. code-block:: yaml my_workflow: tasks: my_action_task: action: example.runner input: param: avalue_to_pass_in ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/extensions/extending_yaql.rst0000644000175000017500000001235400000000000026116 0ustar00coreycorey00000000000000=================================== How to write a custom YAQL function =================================== ******** Tutorial ******** 1. Create a new Python project, an empty folder, containing a basic ``setup.py`` file. .. code-block:: bash $ mkdir my_project $ cd my_project $ vim setup.py .. code-block:: python try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages setup( name="project_name", version="0.1.0", packages=find_packages(), install_requires=["mistral", "yaql"], entry_points={ "mistral.expression.functions": [ "random_uuid = my_package.sub_package.yaql:random_uuid_" ] } ) Publish the ``random_uuid_`` function in the ``entry_points`` section, in the ``mistral.expression.functions`` namespace in ``setup.py``. This function will be defined later. Note that the package name will be used in Pip and must not overlap with other packages installed. ``project_name`` may be replaced by something else. The package name (``my_package`` here) may overlap with other packages, but module paths (``.py`` files) may not. For example, it is possible to have a ``mistral`` package (though not recommended), but there must not be a ``mistral/version.py`` file, which would overlap with the file existing in the original ``mistral`` package. ``yaql`` and ``mistral`` are the required packages. ``mistral`` is necessary in this example only because calls to the Mistral Python DB API are made. For each entry point, the syntax is: .. code-block:: python " = :" ``stevedore`` will detect all the entry points and make them available to all Python applications needing them. Using this feature, there is no need to modify Mistral's core code. 2. Create a package folder. A package folder is directory with a ``__init__.py`` file. Create a file that will contain the custom YAQL functions. There are no restrictions on the paths or file names used. .. code-block:: bash $ mkdir -p my_package/sub_package $ touch my_package/__init__.py $ touch my_package/sub_package/__init__.py 3. Write a function in ``yaql.py``. That function might have ``context`` as first argument to have the current YAQL context available inside the function. .. code-block:: bash $ cd my_package/sub_package $ vim yaql.py .. code-block:: python from uuid import uuid5, UUID from time import time def random_uuid_(context): """generate a UUID using the execution ID and the clock""" # fetch the current workflow execution ID found in the context execution_id = context['__execution']['id'] time_str = str(time()) execution_uuid = UUID(execution_id) return uuid5(execution_uuid, time_str) This function returns a random UUID using the current workflow execution ID as a namespace. The ``context`` argument will be passed by Mistral YAQL engine to the function. It is invisible to the user. It contains variables from the current task execution scope, such as ``__execution`` which is a dictionary with information about the current workflow execution such as its ``id``. Note that errors can be raised and will be displayed in the task execution state information in case they are raised. Any valid Python primitives may be returned. The ``context`` argument is optional. There can be as many arguments as wanted, even list arguments such as ``*args`` or dictionary arguments such as ``**kwargs`` can be used as function arguments. For more information about YAQL, read the `official YAQL documentation `_. 4. Install ``pip`` and ``setuptools``. .. code-block:: bash $ curl https://bootstrap.pypa.io/3.2/get-pip.py | python $ pip install --upgrade setuptools $ cd - 5. Install the package (note that there is a dot ``.`` at the end of the line). .. code-block:: bash $ pip install . 6. The YAQL function can be called in Mistral using its name ``random_uuid``. The function name in Python ``random_uuid_`` does not matter, only the entry point name ``random_uuid`` does. .. code-block:: yaml my_workflow: tasks: my_action_task: action: std.echo publish: random_id: <% random_uuid() %> input: output: "hello world" **************** Updating changes **************** After any new created functions or any modification in the code, re-run ``pip install .`` and restart Mistral. *********** Development *********** While developing, it is sufficient to add the root source folder (the parent folder of ``my_package``) to the ``PYTHONPATH`` environment variable and the line ``random_uuid = my_package.sub_package.yaql:random_uuid_`` in the Mistral entry points in the ``mistral.expression.functions`` namespace. If the path to the parent folder of ``my_package`` is ``/path/to/my_project``. .. code-block:: bash $ export PYTHONPATH=$PYTHONPATH:/path/to/my_project $ vim $(find / -name "mistral.*egg-info*")/entry_points.txt .. code-block:: ini [entry_points] mistral.expression.functions = random_uuid = my_package.sub_package.yaql:random_uuid_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/extensions/index.rst0000644000175000017500000000023400000000000024204 0ustar00coreycorey00000000000000========================== Writing Mistral Extensions ========================== .. toctree:: :maxdepth: 3 creating_custom_action extending_yaql ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/developer/index.rst0000644000175000017500000000022000000000000022000 0ustar00coreycorey00000000000000======================= Developer Documentation ======================= .. toctree:: :maxdepth: 2 contributor/index extensions/index ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/index.rst0000644000175000017500000000632700000000000020031 0ustar00coreycorey00000000000000======================== Mistral Workflow Service ======================== What is Mistral? ================ Mistral is a workflow service. Lots of computations in computer systems nowadays can be represented as processes that consist of multiple interconnected steps that need to run in a particular order. Those steps are often interactions with components distributed across different machines: real hardware machines, cloud virtual machines or containers. Mistral provides capabilities to automate such processes. Particularly, Mistral can be used, for example, for solving administrator tasks related to managing clusters of software, or for any other tasks that span multiple components and take long to complete. It can also be used as a central component for deploying distributed software in a truly large scale. In any case where the ability to track the progress of the activity becomes crucial, Mistral is a good fit. A Mistral user can describe such a process as a set of tasks and transitions between them, and upload such a definition to Mistral, which will take care of state management, correct execution order, parallelism, synchronization and high availability. In Mistral terminology such a set of tasks and relations between them is called a **workflow**. Just to Get Started =================== * :doc:`user/overview`: If you've just started with Mistral, this short article will help you understand the main Mistral ideas and concepts. * :doc:`user/faq`: Some of the typical questions you have may have already been answered here. For End Users ============= * :doc:`user/index`: If you're going to use Mistral functionality as an end user, i.e. writing and running workflows, then you need to read the full user documentation that tells about all Mistral features, including the full description of the Mistral Workflow Language and Mistral ReST API. * :doc:`user/wf_lang_v2`: If you just want a direct link to the full specification of the Mistral Workflow Language, this is it. * :doc:`user/rest_api_v2`: This is where you can find the full specification of the Mistral REST API. For Administrators and Operators ================================ * :doc:`admin/index`: If you need to install, configure and maintain a Mistral cluster, this is a place to start. For Developers ============== * :doc:`developer/contributor/coding_guidelines`: No matter what you're going to develop regarding Mistral, please read the coding guidelines we accept in our project. * :doc:`developer/index`: If you want to contribute to the project or write Mistral extensions, please start here. * :doc:`developer/extensions/index`: Read this section if you want to write custom Mistral actions and other extensions. Workflow Visualization (CloudFlow) ================================== * `CloudFlow `_: If you're looking for a nice workflow visualization tool then visit this web page. CloudFlow provides a nice UI for debugging and analysing workflow executions. Main Chapters ============= .. toctree:: :maxdepth: 1 :includehidden: user/index admin/index developer/index .. only:: html Search ====== * :ref:`Document search `: Search the contents of this document. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.093567 mistral-10.0.0.0b3/doc/source/user/0000755000175000017500000000000000000000000017136 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/asynchronous_actions.rst0000644000175000017500000001340000000000000024141 0ustar00coreycorey00000000000000===================================== How to work with asynchronous actions ===================================== ******* Concept ******* .. image:: /user/terminology/img/actions.png During a workflow execution Mistral eventually runs actions. Action is a particular function (or a piece of work) that a workflow task is associated to. Actions can be synchronous and asynchronous. Synchronous actions are actions that get completed without a 3rd party, i.e. by Mistral itself. When Mistral engine schedules to run a synchronous action it sends its definition and parameters to Mistral executor, then executor runs it and upon its completion sends a result of the action back to Mistral engine. In case of asynchronous actions executor doesn't send a result back to Mistral. In fact, the concept of asynchronous action assumes that a result won't be known at a time when executor is running it. It rather assumes that action will just delegate actual work to a 3rd party which can be either a human or a computer system (e.g. a web service). So an asynchronous action's run() method is supposed to just send a signal to something that is capable of doing required job. Once the 3rd party has done the job it takes responsibility to send result of the action back to Mistral via Mistral API. Effectively, the 3rd party just needs to update the state of corresponding action execution object. To make it possible it must know corresponding action execution id. It's worth noting that from Mistral engine perspective the schema is essentially the same in case of synchronous and asynchronous actions. If action is synchronous, then executor immediately sends a result back with RPC mechanism (most often, a message queue as a transport) to Mistral engine after action completion. But engine itself is not waiting anything proactively, its architecture is fully on asynchronous messages. So in case of asynchronous action the only change is that executor is not responsible for sending action result, something else takes over. Let's see what we need to keep in mind when working with asynchronous actions. ****** How to ****** Currently, Mistral comes with one asynchronous action out of the box, "mistral_http". There's also "async_noop" action that is also asynchronous but it's mostly useful for testing purposes because it does nothing. "mistral_http" is an asynchronous version of action "http" sending HTTP requests. Asynchrony is controlled by action's method is_sync() which should return *True* for synchronous actions and *False* for asynchronous. Let's see how "mistral_http" action works and how to use it step by step. We can imagine that we have a simple web service playing a role of 3rd party system mentioned before accessible at http://my.webservice.com. And if we send an HTTP request to that url then our web service will do something useful. To keep it simple, let's say our web service just calculates a sum of two numbers provided as request parameters "a" and "b". 1. Workflow example =================== .. code-block:: yaml --- version: '2.0' my_workflow: tasks: one_plus_two: action: mistral_http url=http://my.webservice.com input: params: a: 1 b: 2 So our workflow has just one task "one_plus_two" that sends a request to our web service and passes parameters "a" and "b" in a query string. Note that we specify "url" right after action name but "params" in a special section "input". This is because there's no one-line syntax for dictionaries currently in Mistral. But both "url" and "params" are basically just parameters of action "mistral_http". It is important to know that when "mistral_http" action sends a request it includes special HTTP headers that help identify action execution object. These headers are: - **Mistral-Workflow-Name** - **Mistral-Workflow-Execution-Id** - **Mistral-Task-Id** - **Mistral-Action-Execution-Id** - **Mistral-Callback-URL** The most important one is "Mistral-Action-Execution-Id" which contains an id of action execution that we need to calculate result for. Using that id a 3rd party can deliver a result back to Mistral once it's calculated. If a 3rd party is a computer system it can just call Mistral API via HTTP using header "Mistral-Callback-URL" which contains a base URL. However, a human can also do it, the simplest way is just to use Mistral CLI. Of course, this is a practically meaningless example. It doesn't make sense to use asynchronous actions for simple arithmetic operations. Real examples when asynchronous actions are needed may include: - **Analysis of big data volumes**. E.g. we need to run an external reporting tool. - **Human interaction**. E.g. an administrator needs to approve allocation of resources. In general, this can be anything that takes significant time, such as hours, days or weeks. Sometimes duration of a job may be even unpredictable (it's reasonable though to try to limit such jobs with timeout policy in practice). The key point here is that Mistral shouldn't try to wait for completion of such job holding some resources needed for that in memory. An important aspect of using asynchronous actions is that even when we interact with 3rd party computer systems a human can still trigger action completion by just calling Mistral API. 2. Pushing action result to Mistral =================================== Using CLI: .. code-block:: console $ mistral action-execution-update --state SUCCESS --output 3 This command will update "state" and "output" of action execution object with corresponding id. That way Mistral will know what the result of this action is and decide how to proceed with workflow execution. Using raw HTTP:: POST /v2/action-executions/ { "state": "SUCCESS", "output": 3 } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.093567 mistral-10.0.0.0b3/doc/source/user/cli/0000755000175000017500000000000000000000000017705 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/cli/index.rst0000644000175000017500000000666300000000000021561 0ustar00coreycorey00000000000000Mistral Client Commands Guide ============================= The Mistral CLI can be used with ``mistral`` command or via `OpenStackClient `_. Mistral Client -------------- The best way to learn about all the commands and arguments that are expected is to use the ``mistral help`` command. .. code-block:: bash $ mistral help usage: mistral [--version] [-v] [--log-file LOG_FILE] [-q] [-h] [--debug] [--os-mistral-url MISTRAL_URL] [--os-mistral-version MISTRAL_VERSION] [--os-mistral-service-type SERVICE_TYPE] ... It can also be used with the name of a sub-command. .. code-block:: bash $ mistral help execution-create usage: mistral execution-create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--max-width ] [--print-empty] [--noindent] [--prefix PREFIX] [-d DESCRIPTION] workflow_identifier [workflow_input] [params] Create new execution. positional arguments: workflow_identifier Workflow ID or name. Workflow name will be deprecated since Mitaka. ... OpenStack Client ---------------- OpenStack client works in a similar way, the command ``openstack help`` shows all the available commands and then ``openstack help `` will show the detailed usage. The full list of Mistral commands that are registered with OpenStack client can be listed with ``openstack command list``. By default it will list all commands grouped together, but we can specify only the Mistral command group. .. code-block:: bash $ openstack command list --group openstack.workflow_engine.v2 +------------------------------+-----------------------------------+ | Command Group | Commands | +------------------------------+-----------------------------------+ | openstack.workflow_engine.v2 | action definition create | | | action definition show | | | action definition delete | | | action definition list | | | action definition show | | | action definition update | | | action execution delete | ... Then detailed help output can be requested for an individual command. .. code-block:: bash $ openstack help workflow execution create usage: openstack workflow execution create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--max-width ] [--print-empty] [--noindent] [--prefix PREFIX] [-d DESCRIPTION] workflow_identifier [workflow_input] [params] Create new execution. positional arguments: workflow_identifier Workflow ID or name. Workflow name will be deprecated since Mitaka. workflow_input Workflow input params Workflow additional parameters ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.093567 mistral-10.0.0.0b3/doc/source/user/cookbooks/0000755000175000017500000000000000000000000021127 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/cookbooks/cloud_cron.rst0000644000175000017500000003255200000000000024017 0ustar00coreycorey00000000000000=========================================== Mistral for Administration (aka Cloud Cron) =========================================== Prerequisites ============= A reader should be familiar with basic Mistral concepts such as workflow, task, action, cron trigger and YAQL expression language. Please refer to the corresponding sections of :doc:`/user/index` to get more information on that. Background ========== When it comes to managing IT infrastructure such as a cloud or a data center, system administrators typically need to solve a lot of tasks. To name just a few: * Update Linux kernel or specific software on all or a subset of servers * Re-configure certain software on a subset of servers * Crawl data from a subset of servers and build a report based on this data * Check health of certain software on a subset of servers or health of servers themselves It’s worth adding that any of the tasks listed above may need to be done periodically according to a specified schedule. Dealing with them would require a lot of human attention if not using any special software that would allow to automate it. In this article we’ll take OpenStack cloud tenant as an example of IT infrastructure that a system administrator needs to manage and see how Mistral workflow service can be useful for addressing those cases and why it’s worthwhile to use exactly workflow technology. Important aspects ================= So what does it take to solve any of the above problems? Let’s have a look at pretty simple task as upgrading Linux kernel on a single server. It requires the following: * Download new linux kernel packages * Install packages * Reboot the server Looks pretty simple to do. However, things get more complicated when: * We want to do this for multiple servers * We need to clearly see which servers have been successfully updated and which haven’t after this sequence is completed on all the servers * We need to run this sequence automatically on a periodic basis For example, if we want to do this kind of automation by just writing a script (as administrators usually do), whether it is a Shell or Python, we’ll quickly see that taking care of these aspects is pretty challenging because in order to do that efficiently it makes sense to process all the servers in parallel, and once all the servers have been processed send a notification with the information showing whether all is fine or there were issues occurred during some of the operations. Additionally, if a script running on a single machine that is responsible for solving this task just fails for whatever reason then the whole process of updating a hundred servers will not complete and end up in an unknown state. .. figure:: img/cloud_cron_updating_multiple_servers.png :align: center Figure 1. Updating multiple tenant servers So that shows that we need to take care of at least: * Parallel execution * Persistent state giving info about what happened with every server (at minimum, success or failure) * High availability to make sure the whole thing will complete * Notification mechanism so that we don’t have to check the status of the process manually And, as a matter of fact, this should be repeated every time we need to do something similar. Notification mechanism is not a must if we always want to run this upgrade manually and it doesn’t take long. In case if a human doesn't control when it starts and/or it takes long then notifications become very important. That all actually means that we most likely need to use an external tool that would take care of these concerns. A workflow technology like Mistral workflow service is exactly the type of tool that can help to deal with those problems. Mistral-based solution ====================== Let’s now show how we can solve this kind of tasks with Mistral and explore in details how Mistral addresses aforementioned concerns. Updating Linux kernel on all tenant VMs ======================================= As an example, let's see how we can upgrade Linux kernel version on all cloud tenant servers (virtual machines, or just VMs) assuming they all have Ubuntu installed on them. We'll also have some assumptions about how we access guest operating systems which we'll mention separately. In fact, those assumptions don't change much from overall approach perspective so that it remains applicable if we alter some details as using a different operating system, not Ubuntu. This use case is fairly simple but it demonstrates the essential advantages of using a workflow technology. Initial workflow ================ The central Mistral concept is workflow so first of all, we need to create a Mistral workflow that contains the logic of updating Linux kernel on multiple tenant servers. Let’s create a text file named *update_kernel.yaml* in any convenient text editor: :: --- version: '2.0' upgrade_kernel: input: - username: ubuntu - private_key_filename - gateway_host tasks: get_hosts: action: nova.servers_list publish: hosts: <% task(get_hosts).result.select({ip => $.addresses.get($.addresses.keys().first()).where($.get("OS-EXT-IPS:type") = fixed).first().addr}).ip %> keep-result: false on-success: upgrade upgrade: with-items: host in <% $.hosts %> action: std.ssh_proxied host=<% $.host %> input: host: <% $.host %> gateway_host: <% $.gateway_host %> username: <% $.username %> private_key_filename: <% $.private_key_filename %> cmd: "sudo apt-get update && sudo apt-get install linux-image-generic-lts-$(lsb_release -sc) -y && sudo reboot" This is the simplest version of Mistral workflow that does what we need. Let’s see what it consists of. It has two task definitions: “get_hosts” and “upgrade”. “get_hosts” calls Nova action “nova.servers_list” that returns information about all servers in a tenant as JSON list. What we really need here is to extract their IP addresses. In order to do that we declare “publish” clause that introduces a new variable in workflow context called “hosts” that will contain a list of IPs. YAQL expression used to extract IP addresses is pretty tricky here just for how Nova structures networking information. NOTE: it’s easy to see in what form Nova returns info about a server just by running: .. code-block:: bash $ mistral run-action nova.servers_get '{"server": ""}' It’s worth noting that since in Mistral a result of a task is a result of its action (or workflow) we use special task property “keep-result” assigned with “false” so that the result doesn’t get stored in workflow context. We do this just because we’re not interested in all information that Nova returns, only IPs are relevant. This makes sense to do because even if we have a tenant with 30 virtual servers all information about them returned by Nova will take ~100 KB of disk space. Task “upgrade” is where the most interesting things happen. It leverages “with-items” functionality to iterate over a list of server IPs and ssh to each of the servers in order to upgrade kernel. Word “iterate” here doesn't mean though that processing is sequential. Conversely, here’s the place where Mistral runs kernel upgrade in parallel. Every action execution object for “std.ssh_proxied” is stored in database and keeps state and result of upgrade operation on a certain virtual server. An attentive reader may have noticed suffix "proxied" in name of action "std.ssh_proxied" and asked "What does it mean? Why not just "std.ssh" which Mistral also has in its standard action pack?" So now we're getting back to the assumption about the way how we access guest operating system. Mistral, by default, can't really get secure shell access to guest VMs for how cloud isolates management network where all OpenStack services reside from guest networks. In fact, if a server doesn't have a floating IP then any service running in a management network can't get network access to that server, it is simply in a different network. In our particular example, we assume that at least one VM in a tenant has a floating IP address so that it can be used as an ssh-gateway through which we can actually ssh other VMs. That's why we're using special action called "std.ssh_proxied" where "proxied" means that we have a proxy VM to access all tenant VMs. .. figure:: img/ssh_proxied.png :align: center Figure 2. Ssh access through a gateway VM. Mistral is a distributed highly-available system and it’s designed not only to survive infrastructural failures but also keep its workflows running. That’s why we can make sure that such a process automated with a workflow service as Mistral will finish even in case of failures of control system components, which in our case Mistral engine and executors. Adding notifications ==================== What our workflow is missing is the ability to notify a cloud operator when kernel upgrade has complete on all servers. In order to do that we just need to add one more task, let’s call it “send_success_email”. The full workflow now would look like: :: --- version: '2.0' upgrade_kernel: input: - username: ubuntu - private_key_filename - gateway_host - email_info: null # [to_email, from_email, smtp_server, smtp_password] tasks: get_hosts: action: nova.servers_list publish: hosts: <% task(get_hosts).result.select({ip => $.addresses.get($.addresses.keys().first()).where($.get("OS-EXT-IPS:type") = fixed).first().addr}).ip %> keep-result: false on-success: upgrade upgrade: with-items: host in <% $.hosts %> action: std.ssh_proxied input: host: <% $.host %> gateway_host: <% $.gateway_host %> username: <% $.username %> private_key_filename: <% $.private_key_filename %> cmd: "sudo apt-get update && sudo apt-get install linux-image-generic-lts-$(lsb_release -sc) -y && sudo reboot" on-success: - send_success_email: <% $.email_info != null %> send_success_email: action: std.email input: subject: Linux kernel on tenant VMs successfully updated body: | Number of updated VMs: <% $.hosts.len() %> -- Thanks from_addr: <% $.email_info.from_email %> to_addrs: [<% $.email_info.to_email %>] smtp_server: <% $.email_info.smtp_server %> smtp_password: <% $.email_info.smtp_password %> Note that along with task we’ve also added “on-success” clause for “upgrade” task that defines a transition to task “send_success_email” on successful completion of “upgrade”. This transition is conditional: it only works if we passed data needed to send an email as an input parameter. That’s why this new version of workflow has a new input parameter called “email_info”. It’s expected that “email_info” is a data structure that consists of fields “from_email”, “to_email”, “smtp_server” and “smtp_password”. Uploading workflow to Mistral ============================= Assuming we have installed Mistral client we can upload this workflow to Mistral with the command: .. code-block:: bash $ mistral workflow-create update_kernel.yaml Normal output of this command (and most others) shows a table with a newly uploaded workflow. It may look like: .. code-block:: bash +----------------+--------+------------------------------+----------------------------+------------+ | Name | Tags | Input | Created at | Updated at | +----------------+--------+------------------------------+----------------------------+------------+ | upgrade_kernel | | username=ubuntu, private_... | 2015-10-19 10:32:27 | None | +----------------+--------+------------------------------+----------------------------+------------+ NOTE: In order to print all available workflows run: .. code-block:: bash $ mistral workflow-list Running the workflow ==================== Now once Mistral knows about workflow “upgrade_kernel” we can start it by running: .. code-block:: bash $ mistral execution-create upgrade_kernel input.json File input.json should contain a workflow input data in JSON such as: .. code-block:: rest { “private_key_filename”: “my_key.pem”, “gateway_host”: “172.16.74.8” } Configuring a Cron Trigger ========================== In order to make this workflow run periodically we need to create a cron trigger: .. code-block:: bash $ mistral cron-trigger-create update_kernel_weekly update_kernel --pattern “0 2 * * mon” In order to print all active cron triggers run: .. code-block:: bash $ mistral cron-trigger-list From now on the workflow we created will be started every Monday at 2.00 am and it will be updating Linux kernel on all servers in a tenant we logged in. What’s important about Mistral Cron Triggers is that it is also a distributed fault-tolerant mechanism. That means that if a number of Mistral engines crash then cron triggers will keep working because there’s no single point of failure for them. If we no longer need to upgrade kernel periodically we can just delete the trigger: .. code-block:: bash $ mistral cron-trigger-delete update_kernel_weekly ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.093567 mistral-10.0.0.0b3/doc/source/user/cookbooks/img/0000755000175000017500000000000000000000000021703 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/cookbooks/img/cloud_cron_updating_multiple_servers.png0000644000175000017500000007554700000000000032141 0ustar00coreycorey00000000000000PNG  IHDReT#sRGB pHYs  iTXtXML:com.adobe.xmp 5 2 1 2@IDATxEӆ9#Q(b@PD1 (*b#(sĀaDU( I$tn3ֈ{==55oO;ձdVVV =E@PE )^Ջ"("`P@PE@˼1R E@PE@RE@PE o/H%E@PK}E@PP#PE@P/PE@PF@2oTBPE@Pw@PE@˼1R E@PE@RE@PE o/H%E@PK}E@PP#PE@("noVejTm"(@dɒaن9|y}~nҥd[۶mnꕿ2?,~UWo^y$נWE@QO>^z 6m<_^%E@P @2Ig`ŭ|G-Z_ {aÆvixWWE@P 7ܐ[%M򨣎1bĘ1c8|!{6hЀ;w'N;ᄋzW^СC5v8 \.T^sYr q]toѮ];$={ ]+hԨQr4i\7n\ΝK/ED&T%6n8`-[yK,ISE@P&_xgjW6WPᡇzW7.X@G}ꩧc=FG)<_[NݻO0K[l߿?]B{5믿B_|q=|UtI_۷K/tq :T(yŊSNE9w $s|(믿eB}׬Y+"!EeN@^~e8͛oذ}!z}!8mLҥ-[Ԯ]uzuGp_\>: b/_%)Îа>p XZ5&E`r%ϟ?}t7D"۴i E@*_7#oHf8/x.p6l&q5ԫWYfxdXߝE/V`) 媳?[!@!_۷Ps8/yڔB?]/5ke;#[wg}vС AD5p4 4Hv,XN]T'<Rᭂ ,G%~ٱiQxufQd UD( B􊆃/Y[`X K$a;z+卡-c!xvyȾҥKG6m*bN#| I-rJ%j1!0~xv Uj"pA/߃8n&7FrRJ;kdKL:)oY$r^F»*eDϥW,0= ^9^^p&#sv39Cv4N xKm۶ ɡsN hd4 #/mHvQ0`{'l%sch…q9_t%B%}0JNp:U ("(VvU s= fN:$qFmd| pXo hesժUB^d\Ca!T YF 4Oæ?a 2އPcQ0.JfɴiӐajvkbxzJr;MT"(CDnːfhe 4Hdg-[ eN^ĕ1eS I< QfR _"pi/_F_t>`Ywnϗ$y ؠeRtPX_h5-&ycev¥mY-6[.3K&џP~Q|#'z"P,кFB=$} Bܰa|頭5GD CF?Gi'մ#i7C PrC Dh8M(>r{!4^H!>7 ~͡ |:U30aIqXd8,֪@ _Ҡ7HIdET!5@"<="}D=!IUިE@P LdM~eDPE ÿdYT%j̿ka&v ' ]H ˔$U(۩nڴ)I /۵"6+_<o<©ñf 6SOekP"Ƈ=ka["OW2|DʱcӀ@ժU}ԨQ9QVǚ| zpQ^0 6]gBJØݻODOb裏q+B.xNlܹ}y0sυÈ2e f[nv}C!.]~ٰu ,d'^@=f̘{饗 >6g}*዁(xg@ O9uBհEUzU&6}:"PF[t4iI_~דּS!u$^[@o&9C3nz}!G,l2^ʕ+ïwSz!{/^^c]ӧ @H(?/\LGy @;=!uN_}UnK$$s}gij+|VNqk̙r}gx.B Ep?^(w|Up V&4a$g͚)<EK/i =I, IS2[E@?_&̰L;Od9E-N0= _͛7Ϙ1㩧B+Ep%eiE/x4"^aÆ|?3Ѳ@RH&D r)Ը\h&\n;<:PsI6,oҬY3w#"RKtP|ٳgHxOhS/!?fǗҡJGr 3ο=P(!^Uc#N οLqۙbN/Sȱ%G S2֦`hĿ*w32`,Tx ]WZ@9: $L=47ZesE9 GC(}/i%9^,_x% w2*JfW\ ->ȑ#-nSlK'%"D "jn6z{8!4<2fw%g2jQeUu.U>C?oA9ϬJ*<bҤD^W /M6?ҥK7{>4,TZY/!KcͳE˳|[E9>H#@\{E CUeap`*@WPHK04x  n`p \B,U.\P~3΀pG2qF_ 8W7Nt¸XS:2aAh Cܙ'7GN B+fpJ.۴iSNIN6MX1> u]w4 Ie])N {T%ȏSSeq4htb xJrIz[12v7j _z(AtaF[t@NtbX?uk2A\Ηȼ _rJ@xt8RQ R}q@eC0*2GQ08dL^ZhF "QTtzT>zLn|%A+GOh|k߼u. 섀%clQ$K:\Y#{9p!xI A{@ ҟLDp&~31O3XKÐNi?L01>UŽK2qyOz"C 2 KTS(̄)W_ .5_ҁ |dS_E!O5Zxً))RTURDP>H-$d TH%Ez;Gmd/Ζwl ҈GBQi'KRA{`§4uix{=gE'!|H"@^p~1Eu {r@$D~0&@!F44"P# u*#^P$0ʗxHE8`J ] G |,YWA%,wYAaϔV. ^d=vwa}Xoo߾|@)S[eO~ǐ?(6T|;BE h*~Yav[d}p|8y Fd1tx%ש=7nя$*aVc;yO-.8ǣE{@‚8Lr=H>e \by;30^]v 9\ 8j;ӿk319a#R7/DsZ)z(A`+p9cѩaD|`ILb4S kũb.I,cpdbӒaI4PJ)ˠCim }W-3XNtF+d&{|2VӍf ſē㗄Rs\ eZY 9ޘ+.x@nȰ/Cލ8 `$N.h$O] J0(NhT* -LmU"xgkԨQδ0qPf[8rZrr/SgC!pb橁Gр IF QӨ.z\4ʾ8F$iO- Qϕf~,[]i~%g:N?%wܳ8 S&C֭["$uyfY_ҨÞYW*Y<ؑA7SB4_qwRK24(cROhȥbٳ'rudIyS[޴A钦N K0獑<I8N[>A&ݽ. Wqa?{H<3 /މ ϰ&.h4oʰh \ o3v/e_eFRƎH, (S(k`駟vrX!:Xΐ=xAv ]4—ԅ$47w(/C N'jin▐4!@W2de 0~^g-C6s$p$Agef8|yTkkys BKh aFPr@,U6(蜣KlvQ'#h-dcAma+"3$wN#[40Fgl'v`diժU!?D3nL1~Stc 2VלĄD9x>Hg!LҠ;V4.v0I $GȉkaeIijdH*^tSix`:_I\q7ݽ|0QN7H+Wi{)1| BCW 7K1zD c 3b}> nl6P }9!K#CZep b%>o2*΢p6 j❃͂$O rJUTi+R=ϋ+k'X0i,X5ʒ1<^Q(af2#h2ahr\DR\w@\(_c| ٦I02xYzYNSиr.L ɣ'R\p% ȐQ%@ 9RJiYL-Sza#FiEr/CMj"Dʺۄt~ѓ*t03҇ Ń@I\yRa'.WPE@(0OX5%qQx?3 ՟#F*~R2"fjaј SlopaHE "0P.}.[B􊆃/&[?/E@P"" XI~}ߵ2YaO=T~399j[)Sb@U] {fLZ5D65=V2/(@XcФ)"pldwe3Y:ea\r [ze8|E %/STUd [zu&\cgPX|lR#"LrB􊆃/a}CV"(/15t_[s LV`ȣ>F9Hk"P؂<0cfʑ#GѣyM6֭[A)SkEo׮9 ,Y]2œBT]z"P4|Gs̱/iu]i)hZ|yI(Ul2JAUE ;Coܸq֭!ZpnO+Td … 5jhոAah5jԠzonm۶2o޼b[ni$s9 N:)Ye>|8tI~UW)Y$w >k5R|ڶmdQ3@-=X^Ny7bĈ hҥ߿(;k>]H@k׮E{c$rh#\e;IJm=}7X-4fϞMNB{weQׯ__2h/B ;wV Z`ϕW^ɇ&GydSVZhтCFƏ5fZH ?ɧr IuY(6:6m 2ycL# xWp3(,d¥sò),ZVZޱV7*Edt]v;(Tx!'R5("(D cwXgDhxCsNjFA˝.{"-͛/5g]%J?s(9*e59EP1 _y;߷땘)1H1T 'oqu I u:_nv[ML!MS p_ ES&2p?;k)Qb۶ҒaV#bcWy"6-挚:g/Ru+ 1j4_@Qw/XL^hȇZ%f֭=z%cFexp]4@3Z/aK0hHYJC |hNmL y<4K/;|cvXTѓ'NYK7JB6`1gtRl6P:Kn=f`¶wu*קaOSd݌|:@ZC|m,mxSiN{jN|^o]zkۘzjM񂁨򏹕PI0&`h%Pz=hŗ[l#~,- Ri|rZ:g/maË$ qtdNc?O|Z_\#oc֭2bz/(٦W3@I#"W#*/ "u:7_ZR4ʐ%yܩ P4im`~#&7a4atIi8ڔOO (Z=Ҏ6j?-)DfC¤"pͳʹh} ڲqms*iԖ܌85Jyӊ!ŗ|}H6 FuhLLm?s9i}s~Nur'ݱb6cJ1rJA1.qL C|!2 7NAڄLiMJw03Vs'xȬgR[mim&ÈHпAD@/)+(u41oIy6܉ ~Oc>)8W#-q."f)P5yR CbWKq"%h 3q)d7Gmy]ʗ1v= ,J P@i\jrP@i\jr <Z8FKWtX8Kra/z'1(0}+/Ǥ1A%lK,%4њô R$ֿA@y/)pa2)ۆ c%ܒ;|<}/X9c񱶥2Eq)iєX+3i"OB,ZC%Sl%Li,!K6TjԼ nΣ%/) V/hyY7ǿ#S"-QZ9?$#h;ZXʻǗ_ eƈSLV]t)7Ҷj+Hطa |?S({*_/1Vec ;x96~2UQ<;,gJÑÄc)~ FE>9䊀!2+J\ϗREXN6aW%u|2 w;D8cyҗI|9B$I&M4!^v(#f\iŗvuAml˸/d8OK5w;r*-Y[mLNJ5ZCL8m5ĚRdeM4˖9CO Phŗ;BnN /O)|k|Qy/_1KibJnchyҖsAsE1&qzC|!rk|E)p;OolN&oS͝_H3~.%Gܨ[vațPYKJcڂNël6i)ȷ]s'3$M]qf$Jǘhlc'.`Vt䃜Gj!>_|i-ϦG%C-ęa˖n@D_^R_͝מ[L3K2'1BD"ꉏi  C|f/_ȦŠ{DQ'+c8!/>[kKuFO>24!G!>rQj%sf&Jlx%KYٕ-[dQJ7jZBJ.G| K5`mſy+}/QRj_~-[)2ʜF2ܩV!]O9ʕAyLLoKrڳi!2]a( 3qOx֭&lشI}:ժ(oeW?_C_~sK,unvsO_D {bgb/ոZ]*;ͭFu_0jxN?kƘѷ,C588H!Gw҅ 7?~}=OGa<_|)yvjҤģ/<s{[Ӵ^GRq_@Z㗒/KGoͪ[m+mʯ=yȝS7+W>ɳf{ʚ$,n;Kz:dӆ _5jǜ~!mrUEYcWK^caFMY{Xսmz߸b٢}ɯs,E{+O}/wow^9bˎ'#|bin5eC%6ei5;cOZ}vv뀩Ǐ]5BOq_w_l)|Ki7C/[i?KYB*O=ϐ{*W}~2F ^\_񷏗0+~ytanQG5kɒ>tŊ5jJs37٨J`/Lah]Ry?9u-\L_f|K5~OZiƿ<7zZ˕~e657EgI5n7]|śxN@N+WMκzGGQ4:s꪿ Ñ'rX ;}%JgCG5a/+l~-m%`ukנkNTs)9{v;Oo&?no.ܹ8x^YAŞE ޮ0N Dz3-xɩ4-7߸q@3$N+?wғW|koٴͷN7&|#znyB/:r7ms,~h}8m1wݽq~}bYSz{UJew&w6o'{7 tc P[cukVS R9qW'{<ƩmqC/8nÿtݝS^jC})YYk5X"8Njl4@6lx+ >pՠѩ jSwZbed0IYҎOqp`sg~n쌖'V'-kK<"p *j >_[=/?>j{x%VsW6 \Zj=zSh5i7p Y%׮e6KrLDf<}^ Wj 5j78t;c,~'czD3-9w'ޮy?Mɺ I+c&[ΰa&&)d:i _}}rlX"נjҰZ5nlف·^Ү=>+ܸm˖)ڷ |ժ[Y˖9ey}󓿬W*MI iv +֭«{[{էTڠ탗Lƿ6xLpANTǟqqyDZ{п|5wUTj?x%$5n~#.-,ˣs^&Je` -=/QJnxo{vFmzTZ?^}pG3qz1mݺG0N׮^Ǣ1|J7[rկ>}E7-[mcׯ_pq u1Gb4d}1n:1\oKG 9[o_қ'i -aϝmڛVEյx챏MŪev1wJ@Z=oc2 $@<':RٲL/&W._]w=~g߾T)P}ӛop7.Xأ~^?5Q,I_ v ^oMv].g~BZ1 5X('0`i72(Úy}1w|?/h1FAE \vB5C5NnXJ~$H*}>P/[7s_p-/Tv:v:`!xUb4Y oN-_ <%qm5[?W╧[g?Dr4edKT볆_or  V,F7?^Ip]C&dΜn[Ι=SߏN\zhl9IDATW9z,(Q\C[הɭ[h/\b%?S2ٽ5b1 h]%J =kҥ8+;tdSZ׫{⃏^4 =&zu7j|'ƫq܌?jT¶NniWS.]?}[K5^|y?ljdj"054DP9iL2ʛgfmŸld?K>-ki|+cB6~UO1J6l[f9uzOq`S.~'[y{Yse'Xj~?Cĸ}fK,eJXN5Flܰᙻ|uui陼-_bau^1^GּNd*T>=m)ON'y_y?fz-QMDѭZ֪Ti.=ӻ3~8MKOEhسn]=bֻa{ Mq s~{b0: ! nlg\Ϻ y5e8G{kAUQFmo?j`θV"0bD`^064 {ʔX$ }ad\_|i/׌/Mp>1U\~7}7ݹ?N^r9R&4*jUkU.jv{xj5Z ?6-ŀ*jJ1{D=9ŋg'> ܁,wŸK~ɏ&>yTǿ3Rx+s4USJ,qс8ioU*W.]޿jK HF1n.YLVShC;?'8"ThֵYtX{*/3ykU(ojRhw׿^Q䫖-ӱiӯ*.[zYz(@B"8*WVL(YjT,f»R9(jjL5~3*W8b/4缢hE2(L"c>N=׮^fUԬc-[AVIYYSC`䇿X)17Y%- 6gK~N~QAFUT TQJOMz!Y1p%Jo?}mI%Kq[ ̪RNu7[4\U8;x?=fŃSe?y3PIK lĈ vrhg?Q} :5 l_B{vקMgmټr?Mj(m߆ XN u+UW"jE+VyvIg -Z"4Xu+%hKv];1g/_޸z;{U:~HC418^ ƪMlM L5>IύBPt{P+xS7ji TYo7`^DifBx ߼pM%O=ܿ^ȜPOIVQ1sÓK[Ư)!؜Zr%Ft¹w'~;']v팯>}~|5|1{Ǖ3v=极-`Oߡi3ovrQqcz=sU?X 3&s4(a;o INNg7MZ¡襇q"6sgN/c@;2jAaM^wÝ 0ԴFmw}+W2(%K}U6g/Hha4uڨ XExM[S-#F17n\Jk|=,ip̧x ^"[<o5}_yuEK7a鲳kIhJ"OѯGPHC:xN [[k^,,^Xn#g|5>KS;^(Bc^Fvz:T]O7Ŷ,4@Tܻnj@5No˛q>(Vv+J//I}o{?˗|ߎ-kKSѥ4Z{+0h1Kzkxgp0ןBC:)L?߭4>qE)UzՎ)џg-ּoUlQ)=Xa/Ya_Sfd_Kjk,5ĕ8YY\C#X ?GCo;X̝_*gJ+9.L"6ؚtmڲ׿djL!xڕ*g,+W|GÒkkVXLJsvbJ]eK[J=9ʛHno=ySʎI4Z^cϵ65CFbSos0\cۗ{!w۫RArw%O?e S ߼q-Yw2eipB`{C|L\<ĖX|+%~#0g]y̙{<%KѲj[(ȯJWx<ԇ=ܑ֫+YIxowQ(|hJ/W5QÔ %7T |9Ȼ[?4TT,-dDxK(se\Hkn 1s̵kblӦMׯﵚ>SDvڕ-[Vdɒpjc鹿sCW֨U[&ضz'u5<1;2BH`L!"_F]GEEKXAS~yf/4i"f۩1<؊__T4wL}h'1[HaKN<&5ySӧW_}Fꪫ6/[cǎt63f̐!CzaM0{cZC|d-R4!/"HP˻t))Sۂ7f)'"\"Oby&Ŭ?C@/izA|f+5ƛ('E2m5w-KNML-IlwDJ^qZ)RJ/d9UrݷԄ  ڏ: Z}|>ƻ[|nOKO<Eq`/Ͷ&M|l\.:Zu/_fHN2{aG[͛`1'2uuAk1cf q8C|4Y#,/nl,+Ao/[w\F.SN2#)f|,ƃ.d9c Eyf=!>_ |2,(9J qĴ4wbo`1qEVi"dʊt`|cf?T*=hŗ(Y6e|qo?3^LqX26Rx0Ɔa4ƞ-y!>|뛄m/dwܛ36WMb.`[zX2񇶔G]Ȗ[Q=g_[̉'`Zl*O Z\2Z./)v:'ŗM e qu!kK& DR> #oc!5]z4nmiߗҿi -J !O;ZC|&3=5kBr3Bn'˙O7Ҿj[\ &X9K#z. &ZC|YҸ`ZZ6mHz|0u͝BrA̟؏G14.! fd:g/]IC)Ҷl{Y'K ڏi '㌍y{QpFsIbc6-QfU!>_|ׇظ;Ė}J)GiH9i#Z%m XSJcl)Ĕ ~X ze/JZ/kǗ--RXf=v3'Z;& gfB|eG4wv#=g[O6n$ecLPz:ldžHo@CC@y/iQ2Ç >5gSm-Ji1iޙw2G vb"暢k=L/: @i`:y/=%dR6_¶_єOej4@nli+#ߛcT1oO!&|!Sl[@1l>?H9IcA_͝]prr4ͲS ܨ'GԒh& ԕ%b|fC&E-豈i֦|W4UAm#^2Tr?b[=ڒҴɘ5yC|!Ҍ6\h J%NMY1VPNC'̗]sǼWqR Zemd kg*WEPf$僖1jC|!rmlg95;V)|fb.K=y 掼)Y[73 6 P=h0hhŗ,CD.m7=0R=;3wJf1L1E#N9hi 0|eFR>٣VYY˭cC_Nmiؿg@IX2żV ZH}ŗ[rl\LSM\CtKS)󶴛N<ٶuKeH?jShE h[7o*Ulے|Ôxٱ^4#& T)VmQ7 MAo?D u3T!r֘[Ә9©FΖ|.]dhEaiHƾЮ: BC [.]h"!!˧(&hT+[lX*T']!!h G"(F e)"pZ("^B XC80ӓ ysIR1#e^\)>z)^K/tg&}zno-[Ƨv͘1#f@n 4hԨQ] T|RI0F$S->A'OHN >9^2řTBkTuC@s'uX^οL=1MrRˢBV*"D _F nM"(@HP iƩي"(Ŋ@>:X_|>L_b)VaG@_xGuN̙{}vR(^T҉dN |?;;CUPOz<t>I*D_T8ՈXfPUuF%˔)jE/efI[i 6Ee3۴yU@ D5οLEE@P#2YSE@ʗ!$5QPE /ueߒ Nzק@h _1bذayiAয়~g}h}5kh"?*>zĉiyt~$ ʼnvTD@_mTj]Zb@U]_`(s25/RhT,ue*S@ҥ٥5Lο,2hU"(@Ae֤("˰ڭ("P/C;Zq\@xO5#;:2;&(B􊆀/ r*۷oN54ׯbŊY(6m̚5˝9A~t>Is۴<Լ'QU2A܍Dڔ/Sb*L.͝T#b}:2ŀT#̰|է("EBE5M"(!C@2d*"οL˛:'P tAM/!zEC}}ӛ:vUO;ժU۰aCP]5krN `NWH$hAA'OHN/ _߬q\XAEP/bXP3,IB_hw"~^ bxl04D2,|/M"(AC e S{E@P2 tM"(@_|j1pB4-i"M’eϞ=_ivN<˴T( jj!nݺZj%!KOI+@PE =6(kE@PŽesPWE@(BLcfq?:2:Xևrc?~|J:0aBΝi>;w/sW4#0gΜ=3F~|K;7UP-;;'C@瓤8Ohxu^AHs'ȹ@l2ˀgο xey|UT), ˰ک("NB_}"("`PAPE@2y[,!K?Y ksФ/=o…9SN<@'+9 @B _l/*eA>;NUT3f̘>}4{L81PF1|GuTX_ L:c!s5TXVjoݝj@OW_}kF_|N;İx~_}ڵk$3ٌEu҅O7]nݺee2,Ie˚4iVc+Vhyp2H-;v1{lڨH[nݨ䃌Lp/ׯ_d>Wd436_~RL]\|ŋgOi8/;:,eeO?tL;T&G}[od{饗R;(`e|y '$p'i ;JM߾}h>ZH@y[rwk<8A,h˶mz}s< 42^s4hj tI<ČMiG P\r%ޕ}jԨqAy._ռ5kHXؖTqg,L8eSO+ jU&#лwoƩ- 4_{"ʕ+3'߭WL&NM'xbphp4D

?N:i͗1 4(RA^ƍQ]>Eto޼ë* :_2P4m"SnYgଔkV"md cwPF*"]` yeqRE@+Ao +j"(@PV~jjE@PˢU*"D h姦FPEhP,\U"(@PV~jjE@PˢU*"D h姦F6d>qz(A@ =!E`֭wqGcOLҥKY +|)dO>GqDv%k2(/%auU?c)S߲;u]Idz-]6^ReTsVӕ 7|pXn`]]~79e6m<-8"`J 駟'B|رcU֣G5k&Wҭ[VZ%ѫ@"P&cS 4֬Y#<)†q/'{aN;4<<"B$y)@"ef}%~kٲ%!HkԨ*l'kAIv;w.α.;z뭴ڍ70BsΒ%Kk^:f9{?0İ(Ο\={'\~&M˽H}cNz.ѣG~B7xonA,wwq>G뮻O#;˥~c[MP"W<ʗbM` ڋnF3<@C=6nܸ ,h߾=0mt¯"۽P&$:uΝ;rCNofvͭ[$0k,ayРA4kNC%e˖=ئgI͟?Zkoe˖7 N>Sdƌ3p38㥗^:sIqF[hѯ_+U;C<-c~ L0W^˗U8jGuTO?c> .,9 >@rFz;ub!I+w.U$[N!}BYjjj`n.=yt+K&A[+\?*ppL!HZ8]jӦ n.zcȁN8/ '+O=%‘8ʕҥ N$"vVQYMW"8vxf^bdeh]v^zӃqwq5ԫWv-sժUd^p*U1N:م sϱ3Viȥ4aj!@P ]D~GZ_Kw?M| LJ7^=3xuŴ26_{-л- Y0O>ĝ}]'4ƲIIj.LZ1bD*QˆesMm.,+7ωGZeq!0j^r%~]Ocq%GM6syp? 1}ꩧ0yuCxq8Mp7rIdC6ab C q) `M7D%XABrJJ!|RS .t V*`8f!H7FxC[f͓O>I|̤FFRo!Z2-'SBoSA)#g/4s ?,zo,Ly,V÷.NW;%]7LS+>Xz5f7ngbzm Q˱w… Xb-yBðlK'#cI|$K@ |!TE@(R(fE@P A@2C2Z("P(/ ެ("!(_fHFk2C#7dh"|ڬS!0yd&x0Uџxζ'D&xC,&0ƿJ*C C~58Ԯ]G2?e9^{>s=: :%,PR@P ]Cuvط2LEؙݾfdYeVc=i6l@lKz*b2=N駟fuV`INXV,Vql);0sK_4hd]YiX扬˒+sI*dMW-I KF+\eX;, HYYEXۖ8tb7|3 &?Of;<[c6X;c0V-X/cƌS9e \._׮]iYƍI,X礓N:ujnFrk:B=U"@lȤG$ e˖7o<Cn[v2x荱6l)~N_~i߾}!EٿNZ\7hv,o{ExaJB=.y={ĩeD֤!Nǯ'͹|+@رs'Dj<\^̙3InSQE@c^ 7rvHkPu(CB Uleu뭷r&;@V$w RիE[.b)M8dKEw190O܊0z ?Ν;]P/2&1H:wTi@5:'٧ܻ{Nߚ6mtkPdZh—2BTp$ۄά5+ ]jU{Jm⡺xĻȄ8 ">@Se/ivܳ3р"vԿ {GYfW/tH?{=:5q"n.@'v3ym5#C%lE?"3w._9m8sD qWkf/ڐI)(3ҫJÊ@P uEi=u7o>`AF2ʔc}b@O[T2]2W]`/0p{"^,nЪU+oE (_LU ,Az'OHQo]h(Kj;I*!BE;,h0mbZ "6"X$Hk'.,KKn͞W޿bQQ@k>|_|Iz T,&y'*Fuڰ_~$p߼y㗄-R*OI5V2 0;ϟ?#!)Aqln7 Q4{f){vHT^xQ"*x$jի8Z*Y_NUqTlirưWWW%ȿwx9_EA0 "0/y(ׯOjmVE-˧i=X zɱh֭[U@o Ϫ+Uˡ|_,"էVj|d#?|þrP#/ \bs @{$ާw  E)nw?k׮Y5q}y`.Y)+jK;(#cDnWoҬÖh H۫UXeb`+" OwޡC[t-ڞF*fsTE؉x}ݻ' ܜG&Ȝx-":{؏=rF]ϕ0ƀ@r o9s<ۇwugXUR<Kclg{(5&%?yDcdeľ|b{W6 g&b]bVrwZgy)~/ K{ScA`UNxت@UkUcĒ-~=>4X˧PN^:I ,%=hڰѤ_oعLݕB]=R?~cv@5Fע0þolϺeqĥZ5f.J8|ˆvnv_N߃W'Z k:a͉[R #A |9i”,9*{g@h'ifrinnn,E e6;$iбbNO A1—L' A I5 @c/{NA  @rXiA 4@  /5 @c/{NA  @rXiA 4@  /5 @c/{NA  @rXiA 4@  % PqIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/cookbooks/img/ssh_proxied.png0000644000175000017500000011541500000000000024747 0ustar00coreycorey00000000000000PNG  IHDR wsRGB pHYs  iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]`EN@HH!ޫT齈 U4#"`H E ER!!@ͥv{ݙo|3;uu`FHnzbF`f#0GY$#0"F`ҏH'F`E80#fcO2#0p`FH?"ǎdF`<0#~Eҏ?is^ղBFWGK?pYsZpuu#'ӊ3vD"v$I$<`fN f7A2Xstf'IHE&)9"f &&h DF " aF`T#,jXPg-3f]`e@Y$#l# lҤ70&BW(TF`r$aF0"&J,6`CYᒄ bFD0(TF`fK6`!,br~S4hЩS''ǐp"x%ɣ-[6 3yT|F !m U^ 1I",$,0#*xTB `@[pAD3'X7f#氒C@Hr; W#Vwu%'8J|.+7b͙լg_f$T1SoƠ<E?ȁDR y࠳bycD0(Tz` QltB+?tH1G '3!,bb[G ` qQC'F |2"fJ-׷v 5(cbx(%\*Ň`1Qb9믿dT +Gt-ҩ"Eǂ#5>#9f[#+YCUк̢HTӖD,o>}غ$`Ivrb 4KYp{H  `yq QcqD(%h ` qOl!(.YVk@!\X^+>ɤ{9&".l^J##4>CP" 9D#ҁOfBYLŶZaE<@r,]Aז^(>Y07lbbbRJ͙3RЪCQ5&"Dݳr>̅˙u֮]Ξ=k$VDrrV1 Eu[ᣦfidBZ j/[ $e$ >ɇy`1OZ9)98[d 5KΠ#D _76̓y-5Eh VV'D |Y^|0"H!-B) 0"Vm tWI!,L$"zϿAY4Ikh]'be"f !oX2Nj>L˩ k]cEǏC d$']r\Ĺ `X^PaELTPDb7x*D*]ˮ+4,/0 |vxE>2AAA=\ʕuXbԃhCuW.-yT6G,meuF}޽~< L'$gv1)Kc6xv٨4W|ӥ?)ba hu9GYKdǚ:,֐6QG\'#dLh;x795n+>ͭBZ=˝7h 8"fC]p|оw@"1.`y#_!m%?iPȐϧq?MxG"z{Cܼ;\A?~j<>p[$ |LELdlUPOE5cp[GFE?~}Kө¢b]Ο95K-*֢)c=2*kwʅjSo>u3'KڿE}:5|/=Q϶+j@Êzyu+H"wo߬٠E@b+ȧ~EX߆h@?C"K26*s&A ӵT'$h[ŋ3e$zyW7_3WˎKUyt酆,rL65*TIW/]0}ƒ{w>1**,]>Z *֨?]ݒuq4DCæ5KW A *ė\b]GD̙26wҕkU5f>=D| fYZ5|b$44TXΈG<|\PIa?wjQ4K9scaBXw`6G"6Zm0*r ] 9j qa|`̝4PK@_򄔈Dw׆mܸrʈw^3'+0|dχ`1[9h2Je͛4lďѻF!@hd@ 9,uݼ'$L2~O-bb"Q|o ڰG(¨~fW|)5׌Ň")<}<6ΉS+Vhѓ'OMMr Tʓr5?'q-ɣ}pytdʛ3OA,߹q'{gfts3{~P=3EXأݥ T3h3'|n$2ƴEDڶli;*P45J%r Ԥi"Uϕ6htze7Wb 6~|]!b"X<{!O+$lS|ć`1[9ʕkذQĸ(P< SDQaqg)bTfyc5Y`=1}dʔ{6OPu1C:%EkBz4eI",b_9tsdaHhck cM(?#<&\3y𓬼,vS?c| ^/bbcF@yŌRb_h[W\4IoZ6gPz'oo\6+E#E^'I{.<2ॲI=RΝ0@ߛFvӷEusԦЮMZoK}kSNw`;7YK{Y4SU<7߳OM) FYh9<" CƓQ~UoԨ4YrdX~Y=Gh}P뷆U&Ey$ԏbs/ 哵gߖ3G5'_ǯ8hi#Ĭ_-!];[Zu =;W/CK|poTd)D|-'#oftƏl'"α؜7%}c!#|-|œlZ6Ȟ-[~7_(롿߿ݰ63'KTtoGk^f8ZK ' ,w_t֩|-\~/vU`i?sc2UTulC-oWU{H,]ͳ|6, !J|ݹ٬A3e-^06=*/IOi7qʨ3dԔϨF&>noXJK%rbNʑ'm1%6wj{Utus;}%u۽g+_4|ЁG<޲bMxWb(%IX~hƚ'4gңyl-<˔,($Z8n]ì_#sVl*|5-D+|ݾ|<_Y-Ujͫ6EOK3j4qELxNfǏK.=h ]XϊN1]h}\t-DKy8yfP0xd:liwS3{g-{ .[Ub\k .ᗻ`#IH/(XMhk;77t7Fhif!.QQn~| .~ȣ'|72 M)2_,.g{tT䃻18k yh5Y-=PeAǎÇ&Lq2/,*z1J-z:B(Eݟ %*+VS%6g#o,V %ū4{b`bx 9 Y S N!w1R,P-|*>iy|#3,&g"_Esl,|3 wO[|=ax | RCeI#)┴'_f%*++(Ue9`dyp5Kwr3Pb.wr[d̻)ϕ?ΑK#Ӿ )^=`+gݿs۴/wgyir < Ia:-b$cBU{e΂ęzPQ]_}wdߩҢjy׽g~}8wtxo`߅^5My a'=;­Sݱd|E"F:bX?&B@Yk"TgE۟TP|NqĞ #[?ZXpT]_GsRу;Y|\Qk&2{7.ŁbT~7" w'Gn;-GGEE<@;@S@?Fj^U}~'D<I)X<{*)dn$2joTN)TR!\C'Yk|r@5uUgl0M ޵԰64:Lfw@nM>m{f˅㈋EHDAJ|=𬂘G>+A޳dOa#̃y*cX*k[ AE驔_XMres@UuG H3X|`$0$2J}_v_9f"1e).BRLl3J>w@$ P荏nIϊuBYD'`YmK(ѢE4?֠% gI'X A0 XP2 &:" l!S^쳱%wO67FƱ)e{|0RV`ANŒS4BeԾA"wW1 =l\R0$ 9/Z ->!+ 1Hs7f]2cK Xz`y?>k>f%jhHA6]*ĽB&|-e`FEX^g|`>̄R˹mŗ{Ν;5J-s%scr:h@q Seg!,brrSq'O]VWQEP*_ڠGrQ:D P)}$K '=fgLUsIE۹SէTۦC18,G,ё[ZLfO"6ЦDІ^JSC:e#4p"ndy#ir2`1d"U?hŃ:H~֠H Yz㓪BEF >@sHEL&_JHSDeyQ0 ,bDbm@$ pA@9G.7 |2"fJ-ՠq%A#&b܄e18w&w18e2Rư? <T:QwA Ƈ:L-'861eF3dw)SH"͛7.pXt!nD$)X0|tM}V"z:ӃߩSdZs)FA)!'eэpnP,o>@s!,bbkE *j+꧑5b]X#Wbby"Wb^‚@Fcm#,b8]8>o]gC@4՝-RF q̙ҥK9e%OB͡acF M\lٲNS,fɪ={ĉcTET$ܾ}pǏ! ?<<|˖-111-zifC fJO3gϞ]n#a[̙;%́1*"*koR{[a@$,~ф d:D08Qb:ETxQ*Qvgiڸqva05r6ux [Β@RFv?:,#`Æ Cs$SLq㏛7oZf҆HbiH]jՊ+fW_{YbSH0da ,bY `IāGu-[6OO4=Œ` Ce<Q(_|HHqز 1:KJr<F6*t=ba 3yZsd~UVsΎl$ؘEuk7~m4Y~B.8᰼^m1@ix;w&:!`{}Q8r# q(@E|PN|Id_$hX%pPBk[PHvJ.Q|(up"dɂoU9Q8*&C,->6j-Bl"1Nb4"*=FU"SV^tRaZ0cY["`PDX*6T !zHDsgy[|D x,∖9Mx]6uΌY$F4<@Ar5GEtpf,|8euȀ"r\c9BXK?Y(3I5X QwJ_O ۳>Rʺu6!"`{`)f`ѯJQs;m >痾gj$Hj{A[Z")WR.-HB'5.2+W.X`ӦM:j/N(.p5K*AtC3W,o>ŚSY )i %g g bp]X8|w.@!ϟw4@@r 2!KTB00'`{ FJMsY c>)e 7O<)RȥKLfjC a uUAEJ Qꙿgh0^Ѱd #lkjy y=̉@̙/_,*h{PP C jA)$.ȪV'eyI1GٞEbb(>DHnds5țnX^A|t> dS~-rƋ Q] ay 6 ,B"6N}h2IW΂Y$JO+裐۶R[Y(>QVdI5уE0. %S\a3A.$\(r,aEù8}ID=Z"_; t^*+&kI+(D-,'>)+۳@JW&Z X(7HE@bye4p̙3׳gO4q2z` Q31n9 ൐; 4cI.۞EL_<D :G@A~,o>eٳŋ]1GK l!bB 1 Q,n!`8dB5An:QX^b4HnC1eS#T8?QJCeyf>)fF۳-dh` IP m(?;ɲ/! *b:̶m߿N:faXPB]bB\] upl"1H찢 ) *",>) 77`* a7EUDSx$! (ZT:#)Y4PhC[5A T3vfx#|F/M^qGG *Wj ܕ;|U[eOZOR?|}<$KdqvCDuF:U]ȂP)3aҪ}-*FR= @%r"U40&fPɞz%sW޻Cծ٠edwty"b%5ooV%I=ٷ<rhHi?I7n/X9h~KU@Y$Ibn/F!T  ,oY> nm,""ӲR_ 5HT+j.|`>KEkx6O}g=b}19'_ѤHW77kIR<|$wHĤ{?tH$Y^!* fzY$*&xhj.W4yEr&m._mYC(EKux1caIT#XRF6pͮ1ndMG ~V?ƍ;alD RBPOo{E-M8-PKB٧pɐwԦ5ۛkӿjpl٩ѦUpH$u;/Ve+sx꓈'~n١Ć_޺~=G f,+-].8 ;yau]8杠|Џώ{vo9pꭈG}XыuԸO m T%:‘Z|tc/^&Fщ Ԓ&@ȐCIiˋ3#Nl{5#}:)xQ\0Kޑzy/EF`Ջh֮^.ܽ[v$Wvƽo{tΘq^Aأl9Va8-롽Ǥ3Wd&]t.f|1ޝUs2>ún\շ /gy}?qĹ#+,+]hFk^0A'y<|r2n߾mQQ(Ƀd/.Wsdy *gqMs%>: %#֧/A8P)QmqEɓ޵T5K6wEtbp] C 4HmnޢnRJz/xׁ:5|s#ϛ X$op;K*4?tuߊKK;y)׷` e(T{]>w֍;== BmTkhȅg[)UٙGʌUcBBo E we`Rl7۳AQ4%h~ ѭ("kȫ1;'YR#b=~>5W(E[$Wr쀺z5s>r#qh1[.PG˧߼vb2EKIKp>Pى6n.n#헆T~FU'% |5f.?;JZ6/mzo^۳:PzSJ}H '!F$/ET^9@? }\Rߗlo×jCq4# s{ocoW>iݵIZ}|+lR ,i 8ͪ,2Z|X~PM[s\  J-.$ "2 >;۷oժIc$Kx%݊h^Zɣy-@mO푑O?W>'|e{ABGTŦ"M1""-BQA|T+ uusmܶ֒gš㧛fҢBp2JhGIի w~/[o??R<<ߤ Ζ,"(ҤP&Q|~g˿GVwڹymCFuꂵvGq7[¢.͛6 ݗ/_۳U)[PA(.DUҋnh4;m"yFCԤ}ȟ=k,(,pEa-4EK d"e[NլSmkKgE1?G.? &vzDgr~0c+~\`PBWXm}hPDQ-2 <ڵ7ndqϤhQBV=-jzX)Ǫ%:ޤMgΝ9|׮̕x{d|_6ų'\{a:6^ڦsO+n<EYDݔTQgŭB. ZQXkp:yuy~ݒ-mҡ6J!@!O TR)[x٪zxޭ>jw#j/6xha?;r0{/7P?/Q gH. 2YYO+> D)#pvD&@TWo 3ƁlM" b|ttG܇Mw*h ǥtxh`8:V-][ׯ圐~[b?GҹSߌt(NWW8 GYhvmچ=yh AB(1rDBK/Ke6>dFH]F^(4[QKs-3~o㨻{654*q6(Њ̖C;?-ɞ3[VU/ܮG3HHܺ/Z*Te}%M+o:"{lZ|Ğ#J.#&ϒE07ɓժUs|;"ȯJ21\{E)UwERBiwus^ ~ྜྷQXNM^yʥ gzBh`ĩc=**,ŭ`5_8}ϵ+4-?gǥhh cICP`8OƷm>þC."ׯ^wVނ5RXG1RfG[U*J"Mŝ.Oi'Y/mcbXuƘ8`JW_RPO^kHQ_-+K{jPe٬u'YE B j|//e &םpAYEB;IVu Q"8qjWL_֥` h yR?W8/!YYųzW^AZw13O (7vrbF^||K*T k| ;#=ڷcIsA 3')D\-4Ф_/Ba@zݵ.Pw%/= g큇CE@|4aa"؜ A_LdT<wS!6y5dXh+|e_Tk`)[p7yosi5{PA>2:Y3śaL{lPp߼bǓ'Rk93eV0 ?"2 G_ZI~٧}p߳mCUw>sÈ`A㗺+#ib_r̔jګj@ChO6." ψ/zui|xߎ-͚x|4S:Eʵ8򚌐$9|/9wB^R256(Eq4l_c V~ijΈ'OOQ?N1oiHN:f!%* ȕ''{x7mrkn\o&/ϷFtjҩ4tdOx*!`"ȱ);p(~q xΞNNuZtz76v;pέ_fOƟ@9ݼv%Jt`,Z_Zf#ںKoLF/ez ,i ׯP|]\ -#U% +~ EJ}!E#\>J̓сE8MPd=`[U4X4i'DJ`޽[X1I~lɋ@鞦;zKBxؾ.Ԉ?_M**)Ȇ(ڡwMᓈOmB\"kMP~{\{ Ъ[~<{ҫo Oc/I*⓬%s[$I-[+WAGx8ĻL=XTj{WSuw9Nq^߽/u]7wpJb7'˄n,Y_{oD6K'WA.޾T;=k7m-_5D Cjވm^H~'4$Eӿ7}G.X/lOd{5ACkOtӋA(CuT5N 4w;i$?nqC (!K 78GxXKw ϖ=k`lY?2H| 2@-F53gg2d{5RCd V^]فP&, MG❩@T ia =x,N]a84u<~.4+]f>dC5jȗ/_Æ :6J/PsGA?*P]%AJ 7he(o\>xw|o~?{CH[uGb%yka3aOj=5A^ 4ܞ!OHHA@*q|WΝpާϙ;6N fҟҡP>!$2G%(3Pay |R/_K:k/hr@GÉ+ 0-{/g%3ϛkb3FKђHQ@XRXs&@+ޙ@I8F^ݒb'N?ܓ?l kDy{FaVp<.&$!/[̎67hh~1D@@Eg\J |?K~Ӳ9ڕ}3>y{Y)'(>IpȀʦ NM#ۢ׃:߹qUC6k-+hYNw`;7YK{lmDZ}_(:[7xjSTo`( FS࠙Z]mPmC4HPMX^pF v؁l޼]v5vJ !g^7AnD(6o^6+׹}ʚM>ro Q|*d| ߂iF~φa+b{J,PTnu&]S7}glXqKR0B=Z҅sOo||GӦM3cJu^Y4Xomk#Op>cJTmt4t+w`K{?Wѿi{;[V޷iYеܺJIǒ@;78A>c .]%tPG{e˞h)\tfA!%Q'wfJbS7Q/[yze7𠔏>X[Mc]IxRѬjH1SٞEBBx\ :cxhX #mOy^NyI}||r9bNGЧOlXD 29x~*JL-D풺h)xZ⏘.dJq̜% o,`u>Y΃fv[]v޹~څͻ\/ww),-tU?J+)_u{zxƨ ǭpǗGE _5ć%8+=Y QlY6so[4gM_^ӟ|=ZHCy7nlTBsayZ7ⲍ,9ܟ~.\}v ڴis.0 ڷo2d˖-˼EP~)q"(:4?YHU\PEEQ'_rbo^vL:⻸g"_Esl,|3)|q+B!_O/EXQ؂\;o7 .N$eP&hV<%V>'Kl"1Q CH]df]rpɳx&1 Xq'N۶m;uTÇΝ{`TnJC(T*Ё f;9{WB;˭gF]ykoH% i߆_(w$큣BN-p{+OeifibεR[O#³-59ٱVJ>+=0Dbﴏ)5`6vJϴ¡C C Q-PR< WRE,o$>)y:Vg!/n<_zzz&vyvdΜҦ~3&%r0upS@gT"2Y}~<"R|G\>sݾbfH^7߉gcb U +gzYu /rB&͔s6/wuuoogC&xj)۷EЦR /QK-97FAF9vݻws„  HҸq(8&@VTm8B|pEH=3塊 :Y5cdRϕzjp;} Fv֘ȧ; W得ө&XIڃa h(_R1 ~+C>wdߩҢjgx8͕UϾU? Vѽ>gj;W<70Bw\皿nP$pVy%訨w|hH7N_5lበ*C;T|&%XicG&_bؾ-"GCnT^&a%&V:]6w/bR0J,Ź9#I(NTR!\TDJ gO(6Ћ"@w<5,kîBe45ܚ|w we<8!x]REo(!SV|=W">j6>هOA !D6A%pH_'( GI1 0A (ORpamaRO*6l ]|qAh2 xDQ-z1țU1q|{%es@02-g\"b\Dd *("(78Xrȑ . RsOt49f 1:",5*T+"BFQUDh xfhn7>FrY$*6RV(X`/ uvW/-,o >0|8]/&MRxS ٭Z)+gLҋkvBT"I* eyI)O0/<ᅸ2C -]7TT%U7r|%2ć0NЁEb'pVFbZ&cy J>OjO=͊@2ekF1 bYwF(F /YT I?)(I=Ж6DrʒQRa5,|/ԩ̙3[ha *^(///3@똡ud8de sz# ,+>f,۳3|\Ci/r]%  6'`OOl rl,bd0&MT|y`;"c.^h칒9YJf 1W>,loA4iС.7 2NE?7:pFygE 5'4Č'<,"D &N6uXq[D1+9RbKvo.H4P4*1-oYX|R̖")B$ٳg8q"vmK,tRGdGDP`A,M"/ P;Osi-,bbI"(R63gΧ~Zn]|Ѣga_2#z` |n](|NjVn%pay#I1xt=X_fϞ /4o>˓' ]6>W^=[C N$nD˒i}K@/ lGMĿ"1.nnn> )[,:p@.~8E2DJ{$#7Piґ u9*hb^1 ,o>)&)h3 `Vqۇy\Ŋ矝9;^tVtN-AQtO |RQ")B!!!/^p_~YfM;1h_bbQtָDLz]QhFw)rb!(jڳgܹs |(wxf͚ѣG iفE▶E .[,o>)(nQ ݻc(ZB#GT@ ?&]/"&bK*JDLRD\dayn3,;f$@Nޘ?:u %hJSd)SZ)׫7ٽV+i.,e/2v2=X`GFڵo߾QQQ(y]1ٸqcnϟ4]b厃@ᦃt2&*G_g4o"-?-իWZwœ`_|6ƗE}|eY$՘^0)C,*"Ǐc%f:#iB{Wܹ3KRJC5D r)ڧO~w^YcҟYM FnGq40X2k֬-ލJ4(J8VԩSmpLz3,bT(~XjΕ+W?mh Y 'Re=]|ڷoP_ Q^ Ί fѠ"s_Ç7ڵk> 93'h!b"VB̠Az [iS>,jDZjKk}-{L62M6駟<-Q8 Va7LuX6>"M:6a[/ դYXƌ%gΜsΕC)iVdΜ9,,DÚS| %Md3fr;k֬lڴ[P[ 6mZVH/_m|0&Oj"|M׬YJ ;$P"o5k@9\r}ojժ!M <={vtcx~hݺ,?q.\Ş={0&Ghs>I>>>UT2\۞-[`!tw^s7)|V2Ǝxֈ#! 郹mǎIʇs!}Ν;2޽{gϞ5>Ɛ.GF JsK{رcQFOmWb}_H+*TO> Mϝ8q1gΜ;wȱ8ʕ+7}t(hG[ =V1w( 07;V^)ZZtHt:f1{ƌ&"&nOd o`'#GƻO%"$hVḂ`Z`ΝCpaG[\ൗ}iH۶mQhΉ]"8PN~u3X.]j `..I{k5x!AQᓅW4$F&h/b,"""@`,- Șg,6Km ^pa|V+Nb&Jp$٘({h Y:eR5AwJ^ZҮŤIӍ7^cb8%XJ&8khH_9wsB и6q08%ȝ;78$'O4Ozɒ%R@|1 3]$(G,2ڵђmk[N_~eTȐ1/%vAXh=QI5UD a%#*+Xj>4kB41ؐE{X&BۡCY-j0"v@k nEGGK1Km>} 셾~ 9+WƧJ tޢksPA5_9{&vAO6G"QUr ,Eደ_zдΝ;}$ Bi( ؚ+4jAO8S7bX` 1̎u ?ݳgO =/aW`3gÇ(5\H=Tߠio䗚pY0mLwt"a946D֦!]NBиTKǸ|*4J9F[5$ByrjQ(c0O{)]0ȜJFchVh 9!,ZX@Ĵ. $Bf & 0 0ֈ`1\_ СGLWA#ObIx6%p=ӇmO9Ә6kH2FEgQh^ڰ@ǎ 'n'JMWmK 1[LZC~|%S)vhxT>ej{3 hbPWې.1s[$e8 HW2OY 0,Y'F{͔ܢkN_Ē&o JdZT Y H dc.8f*UAo]mKj@T,qeS5L౜4єqUh~駟| ӛB,)M]7QbYkK/$cYHm4M6h5lk0 >=TWf3ٖq5z)9s^6p ,bDЕE`}nSp` Ka/2Rޔ'Xʔ02ٜFa*޻偉˘ń ca0X{E 4 Y߰p"A96 2L2a1tm'lNBJ- `D[iݔ/fQz1Z]v 8 qq~#cX"v'-'_ +1\ўfY66d_O&d x͵;cGC[0 3V^KFa6>fS1= ŒE_&'J"T{-W^"ȱc9h vu, 7XКG7Og ,gOU?+OInw.l}a<[ɵ_`bXXrtquBFakL/3ÝE.I2A6l.,1?NtBa_ 8'Q1P]Wtc!9p'E(_b,/1"4 08fd\,B4#(E(.4@ĉQ H-08bdLG+yTQ{Hą_E)/Y@UyTo|O@"vv(bȜX19""*&ld]L߸+4/ T8-Jbh][ZO|+kf3ۨ?PXsHkB@.KKr!{7@·c",V֊HPjb];6p!x75E`qa{샀hXPOP* eyb H[!#b? 5 AAGH<:&ϟ̗?? SnI\|v<E/M"{ -^ FD)ay||.?qDT۬UQBs ~TMm:>;uGKk ,x9@"6shטE8b>* e[kǎ Ռ'nذ6_eO'yO 45p[$5(_ŋ 1"aDh 1p.V|"jqLp2R5/_ܶ-vZo_W{;}]z-gO]48~𰇰lj/|4%|v@- &k'-""EZR&O#"?wR6nj Ԑ=ΜӵMAmVJl_K…~; 2u0'PIQ,%yP((B>yW7Z ^`^쌌n3W^\<աѓnv/Fmttуl'sUyٓ_Loș9E8 !s8 y #FNE֝Z?[R?'fpc[aSd0P:贒T*}Xʅ梘 X!?˙du%w\/F+U~rXe>r>}%m{q]YkW`(#޾yԹPnXU JA?_ 2[3LāOE-E T?cVfp'q+孵Y}FMGуn/ {WфF .HIP+Pŵ?PJq(P4h bC(Z( !I {8 wwvggg73͝Jٿk|V{T5o#u>l9*]V>WhUp@5ԮvHDBH|]Bn!PVj/fܤu'au9Y:%=ڰrTG :*"oea O SGA--%>nX0;c$9{4GJ" CDfm cE᧶ A&m(҉w?vpה=7 s/)vkC-mmܲ}w"%l@Q;ܙpxvT?~hϤ!up|* 5wE8̩Xi6%C E9.+VUFט▅S9Wg'sg(v:t֓8^HV~Knvv.iiF|"\0i4Kة'Wۄn\d?a^# ku<>hWŸK, _DbB5k… Z=S\57_!τ+CD삻T83vNVһyNHIInںÈkL|ߤMWӭ;PYn qLIIN(S>5%@&RpkQ 313gF *Viֆs6#nN8)#Wtrd@͚RsC/"Q1L^T)t_t|x*be{5}I>0crM*hpՙ+UVj5j5f֊+緩ѿ}=aHLByG(Sۍ>aEև %#Sf9d9d UW '0]DG~e) Eh 2Flt%m_\ǝxr17tV=<w%$8w+h\b'6:  C {< } hkT ,M`CZ,ƣzJy2vؒa^o;R\'` C-dEd;4$FHfJTr" z-)|4:| @V$`# 6a؋ 0"xE>W"o,L|xzȊA""`w#`bZ͈7SRRP<痿ҫg3ZFEy{{GEE;cg%l p(¸ xiqF++|լb(6U 3s/ZV@umy!OC#;ݸ܌]<߰\W3Gt?E᝱Y~-_/w]\)Ltw~ I_|e$?F +^+TDdg|ś02hmZ=tƪt8fҳF3GY6gA^m s 4j a'_ܼuWS>CJ54Y MheE6dyl/)V=Y%'{ֵק#$>6ٓ:D+ǏܬSpX6uSqŻ>t/_8N 7sS[ܳիWQoC'q1[:hɞkPܳuo$ $G95/YE>Jb:-X)Q%(G,ZB7uA_U4+bzg<Js?D-hw][SB \k!Yл #l˪ξ's+k6EJ̲-o+[;C˾_ڰ_;֝?_/h/L^/KyL;3[qj}!_޹εp ή"CR~ +))*YrJ 4>1/51J)*h'3> LϷЇ +"11Twyy~S~(r*,Qm0672HxW *~ӿVSąyw=ij6~uWhcwO e(]Aп66q,Q>[^mA:k"Kʫ8诙eW3Y، (͈h#B33t "r ÖEVR ٿ\B43Jz"gc= %bk!-v_QQ]8.HJ453?)q جXf LEIx-Xx?:q(H7ĸG^$9gG+X?1svgļ(=wh*1!VY7#>-N)Kо,8<þ "NqEx"9(rP4YCpA=LZ=xvd%_W]AZYڠAXMբnLdy%|8YDA E6>VZ|垇>4\2(f|-[C!z@a%xn^2tk7CŦܪ}Z9׶SStlR٧#-l*G,a(žwg}F`s"7/ Eݠ凊1i]Jv>kzE\ӆ .gWMvP{RJik v.oAF7/)J}@~ 7rBPL<PKNґTy.]z7{t؅Rg[",9Lj)K9l蓞sq&hE?\&^z-[Ms *Ozjڳqe_e)B㟖+ RY';}U0Q| 2_DM' wDJo]ԁŦxuQ$Z/ 0lbĬD?MLm\6e_".( 2?g Ϫ7~"vNϖkbUvش xq&oaffҊ{B~,>ƣ|ն9{H YR ]rDGd<a[JEOUen=E5܎kǶ,~{N٩w%znW_M?쀈]t.s rcVк.qP͠0o8|`ch*u唕c52ܹtx3AcCج$&$|1xĸkut6=mt4rl\n!ε'K֕ne|j4s9?=kvei_UP=;N-{V%q`dUj˞k'vӌÿ|۰ ѢDYAEhhr1"m\Xa5CɲfK[:J{*dpݳK^_A>5[XYuDJ+y_xa_"ZPmB~ncCrh^wTFTƽ&JYFu: ʊ|qO+~}l~7 :67f]JzilDמ{mݸ*(*=sꦧm_4enߖ^΋qw,m yUp!X}Ū%SȊt` P,aEq,P'!t< {g[珠Mџ'wmz^PNĈ:xVA7>}iP/:y(^Q>!^%FEF62p6_X1;G_kg0+ xs4..21ok"kW>ZJ]x4ÿãѱM߻Ώ0:LIDATt(P g>6hi>֥M*7hfnCU7QJ^x40,e,\{3 $9=8%-dŜ";ãrt.z^k^Z㏿U;|s|8ʝGg@ A I$8=>ck%WW$n^ľ]4-޶U2>765Ky֮h].9"@"rÔIh=t?9uJqJk0#Jq!kR%ʞھ9Ol͖c",\}!Xn#fd@#T}m-NX*%g/kK"u.Nn!9u\Ujc<]GE!?rRӌV6 EGnmE\!Р뱍sjbbfS~+ρ =Ȍ/T"JC_OsM6B:c\sx+MJՓu޻=\=dlnu] '`>-G.u !^&Wn]ƉaVe†.tzNpFa<CM)!j/gUo~/Z*=%;p*Wub'M|% dGqa,sf~Z1`oS[(ު Z`[cQ͖POaTcCĆ: KyS6a;D9yVV,>S ?;'8@7!F&|΍9[!O_dEi-[>zbŊh+sL3ō 9Kҗ۾;zTYSs87պ|s pL}gZ;^LBN9!C(%A!4"ZAC0aT'bQ)LsoXes ]OGHp"*}G@ڜv*,^l,jfD4u!AF #zv, Wa'G~ߘDŠhx ӄ,Dq+lL!' kmM j7WȊ hĔ4ڞ1rW`-fM?rؒIjDI|E"HcEnS wAȻGuzY $uCfVERRR|||*$~_ˮu-[5^$-!@B"Z BnIK +" iB-Ȋx!@ " Bn>K۳g:u۴yoQ>999=~Q7{@R"83}w]BR9#@V$g|Ts!>ȊKD!@V\=&_D#Cr9!@V$'tL,MMmB$>A~P=8B@SNnPD+7 ^~…@6ϊ̙3KN:e@3-[?t'44jad׮]9裏>Ero]Z5|#M6StRb?+TO|X-_͛V_~9r>H}ӧg̘+P˖-={^P/-V6oެu5lP5҄!]XfO?155_Ν;;vhW6j]ȷA{~z %N$)czzz"Ր(2d'O\Lgg>,))Imݺ500ή]vgߺuky{5k HM,[lp=<<-ZbŊ-[]Agڵk_uݺu%"DzCQ1kDp4i€<@>6k׮=y$xՓ̛7o_ 锔'VX"af)D-񙘘ضm[,EU)m8,\|]vUrrϟ M4J1o1^x,.7nXre++%JHKKᑿ~hWT ;==鐆C={6T۷MLL޽+*\SFSLNrʒ%K)c/^|gϞ&-RȬY鋅ҸqPFM`"T֭t.%(ـF3DtEFs"ٰ[TX5حg{h]TWܳu!9tl ̡ e}}<0۷i<8CΝ?cA[ VP!Cd377G>}q}`E&ȑ#s8.D5 UԽ|2~ӑ#޿s1*]a.=pP(_ +*XcNEׯKVd,|6Ĕ4,JtU<PCźFT޽;@J"h.\"p8@ 4aiH7n܀W^mճU`$R=[nl3B{@Cx|D!M6 n6i<(x;K&Jn BoذA +7]¢RB D޽{ט1cn 'z_7HgtdO6 lǎ/D=,0* &ܹsK8Л UϟQDň=5Z*0G`@vEXCx9 (c/X>a!IճPbe2D:6<@$UA=~x^$ ZhxbLmָqc/BD)TCD-K" %  :✸j,""BϜn.]VZX8b+gBpb+<YVC ٦ hE>,185 TE?- K8P(ˣXaœ >,CJ&jł1@omO8ɹzrȜϜ a T4PҭHAlr<@ `/_B$UhࢡFK͚5BH2.[8t1T+ Bx*ELʗbF6yO,;uT 6B8vtTRlI# -2J q*'xǪ}J1 L\.Xc۶mB4"G +1W>`A 5 (332O lիW#[&+Vv]-me 1@q-Qĉ09ptp+3'2&P!:D0@WVY2Y*4!S^[q۷oǮ;rbI"6ncF:pD'G`BX9&HMPx*(͈#0`UyaTjC-؂_ <(Ƒl`, n.`q+?'a[¹/@ / `E]xapahdir cǎ"'eF%# W,1Sl]n"]F,q0RU0!}: u'4<0Q< =rH˃ K]Gh4`[U]+,spa+b̙ETþ&8!Z*[O LaZCn{a8gT@`<Y, HGH4'b&% QN ngv[ {$ ggcGxr"'EH@TTb`V8& cwNO錖ȁ0X`J2O` XI\N@L-ckkLX߈^ . "o6m2qF++:hv_=p#lpPZ *JA)Ydvls93JjRNP0.Zb,M0"X'uHBИ bcr#o,.!-bpAC8*zt T,,+v%;H C\4GL\L =ڕZ'ȸ0əZå8燯o y/-2!j /"F .XiW0P"c w1wIN:`E mH 0ة!!w`u\(Et$yl 0x@!@YDJ! Ȋhuj }A$ m @VDS!@ dEe$!@h"@$B@_ +/#I B@&!@Y}I!@@6P6 BȊHR?BdE:I @VD_FA6 + ԩMB ) IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/cookbooks/index.rst0000644000175000017500000000014500000000000022770 0ustar00coreycorey00000000000000================= Mistral Cookbooks ================= .. toctree:: :maxdepth: 2 cloud_cron ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/faq.rst0000644000175000017500000002456400000000000020452 0ustar00coreycorey00000000000000========================== Frequently Asked Questions ========================== What is Mistral? ---------------- Mistral is a task management service. It can also be called Workflow Service. Most business processes consist of multiple distinct interconnected steps that need to be executed in a particular order. One can describe such process as a set of tasks and task relations and upload such description to Mistral so that it takes care of state management, correct execution order, task distribution and high availability. Mistral also provides flexible task scheduling so that we can run a process according to a specified schedule (i.e. every Sunday at 4.00pm) instead of running it immediately. We call such set of tasks and dependencies between them a workflow. Independent routes are called flows and Mistral can execute them in parallel. Who are Mistral users? ---------------------- Potential Mistral users are: Developers. Both who work on OpenStack services and those running in tenant’s VMs. Developers use Mistral DSL/API to access it. System integrators. They customize workflows related with deployment using either special scripts or manually using Mistral CLI/UI. System administrators can use Mistral via additional toolset for common administrative tasks. This can be distributed cron, mass deployment tasks, backups etc. How does Mistral relate to OpenStack? ------------------------------------- Mistral was original started within the OpenStack community. It is still used within a number of OpenStack projects for various purposes. Mistral has integration with OpenStack: authentication/authorization with Keystone identity service and actions to interact with all major OpenStack services like Nova, Neutron, Heat etc. Why offload business processes to a 3rd party service? ------------------------------------------------------ * *Reason 1*: **High Availability**. A typical application’s workflow consists of many independent tasks like collecting data, processing, resource acquiring, obtaining user input, reporting, sending notifications, replicating data etc. All of the steps must happen in appropriate time as they depend on each other. Many such processes can run in parallel. Now if your application crashes somewhere in the middle or a power outage occurs your business process terminates at unknown stage in an unknown state. So you need to track a state of every single flow in some external persistent storage like database so that you can resume it (or roll it back) from the place it crashed. You also need some health monitoring tool that would watch your app and if it crashed schedule unfinished flows on another instance. This is exactly what Mistral can do out of the box without reinventing the wheel for each application time and time again. * *Reason 2*: **Scalability**. Most workflows have steps that can be performed in parallel (i.e. different routes in a workflow). Mistral can distribute execution of such tasks across your application’s instances so that the whole execution would scale. * *Reason 3*: **Observable state**. Because flow state is tracked outside of application it becomes observable. At any given moment system administrator can access information on what is currently going on, what tasks are in pending state and what has already been executed. You can obtain metrics on your business processes and profile them. * *Reason 4*: **Scheduling**. Using Mistral you can schedule your process to be run periodically or at a fixed moment in future. You can have your execution to be triggered on alarm condition from an external health monitoring system or upon a new email in your mailbox. * *Reason 5*: **Dependency management offloading**. Because you offload task management to an external service you don’t have to specify all the triggers and actions in advance. For example, you may say “here is the task that must be triggered if my domain is down for 1 minute” without specifying how exactly the event is obtained. System administrator can setup Nagios to watch your domain and trigger the action and replace it later with Ceilometer without your application being affected or even aware of the change. Administrator can even manually trigger the task using CLI or UI console. Or another example is having a task that triggers each time a flow reaches some desired state and let administrator configure what exactly needs to happen there (like send a notification mail and later replace it with SMS). * *Reason 6*: **Open additional points for integration**. As soon as your business process is converted to a Mistral workflow that can be accessed by others, other application can setup their own workflow to be triggered by your application reaching a certain state. For example suppose OpenStack Nova would declare a workflow for new VM instance spawning. One application (or system administrator) can hook to a task “finish” so that every time Nova spawns another instance you would receive a notification. Or suppose you want your users to have flexible quotas on how many instances one can spawn based on information in external billing system. Normally you would have to patch Nova to access your billing system but with Mistral you can just alter Nova’s workflow so that it includes your custom tasks that would do it instead. * *Reason 7*: **Formalized graphs of tasks are just easier to manage and understand**. They can be visualized, analyzed and optimized. They simplify program development and debugging. You can model program workflows, replace task actions with stubs, easily mock external dependencies, do task profiling. Why not use Celery or something similar? ---------------------------------------- While Celery is a distributed task engine it was designed to execute custom Python code on pre-installed private workers. Again, this is a different use case from Mistral, which assumes that tasks can be executed on a shared service and they do not require (or allow) custom code uploaded in advance. In other words, Celery itself could be implemented on top of Mistral, if it was started now. How does Mistral relate to Amazon SWF? -------------------------------------- Amazon SWF shares many ideas with Mistral but, in fact, is designed to be language-oriented (Java, Ruby, Python). It is hard and mostly meaningless to use SWF without its, for example, Java SDK that exposes its functionality as a set of Java annotations and interfaces. In this sense SWF is closer to Celery than to Mistral. Mistral on the other hand aims to be both simpler and more user-friendly. We want to have a service that is usable without an SDK in any programming language. At the same time it’s always possible to implement additional convenient language-oriented bindings based on cool features like Python decorators, Java annotations and aspects. How do I make Mistral know about my workflows? ---------------------------------------------- Workflows are described using the Mistral Workflow Language based on YAML. There is a REST API that is used to upload workflows, execute them and make run-time modifications against them. The Workflow Language describes * Workflows * Tasks * Transitions between tasks (what should run next once a task completed). Applicable for "direct" workflow type. * Dependencies between tasks (what tasks need to be run before this task can be executed). Applicable for "reverse" workflow type. * Various policies applied to how tasks should run. For example, "retry" policies helps with running a task multiple times in case of failures. * Ad-hoc actions that can be used for to transform input or output of other actions for convenience. What are Mistral tasks? ----------------------- Tasks are entities written with the Mistral Workflow Language that define certain workflow steps. Each task has: * Name * Optional tag names * List of tasks it depends on for reverse workflows or list of transitions for direct workflows * Optional YAQL expression that extracts data from current data context so that it would go as a task execution input * Optional task action (concrete work to do) * Optional task workflow. If specified, such task is associated with another workflow execution (subworkflow). What are Mistral Workflows? --------------------------- A set of tasks and rules according to which these tasks run. Each workflow is designed to solve a certain domain problem like auto-scaling a web application. What are Mistral Workbooks? --------------------------- Workbook is a convenience bag to carry multiple workflows and ad-hoc actions within a single file. Workbooks can also be used like namespaces. What are Mistral actions and how does Mistral execute them? ----------------------------------------------------------- Action is what to do when a particular task runs. Examples are: * Run a shell script * Send an email * Call your app’s URI. Send an AMQP (RabbitMQ) message to some queue. * Other types of signaling (email, UDP message, polling etc.). Mistral can be extended to include other general purpose actions like calling Puppet, Chef, Ansible etc. etc. Is it possible to organize a data flow between different tasks in Mistral? -------------------------------------------------------------------------- Yes, tasks belonging to the same workflow can take some input as a json structure, query a subset of this structure interesting for this particular task using YAQL expression (https://pypi.python.org/pypi/yaql) and pass it along to a corresponding action. Once the action has done its processing it returns the result back using similar json format. So in this case Mistral acts as a data flow hub dispatching results of one tasks to inputs of other tasks. Does Mistral provide a mechanism to run nested workflows? --------------------------------------------------------- Instead of performing a concrete action associated with a task Mistral can start a nested workflow. That is, given the input that came to the task, Mistral starts a new workflow with that input and after completion execution jumps back to the parent workflow and continues from the same point. The closest analogy in programming would be calling one method from another passing all required parameters and optionally getting back a result. It’s worth noting that the nested workflow works in parallel with the rest of the activities belonging to the parent execution and it has its own isolated execution context observable via API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/index.rst0000644000175000017500000000144700000000000021005 0ustar00coreycorey00000000000000================== User Documentation ================== This part of the documentation provides detailed information about the Mistral capabilities, use cases and also complete description of Mistral Workflow Language and APIs. If you want to write Mistral workflows and and run them with the Mistral service, it is highly recommended to read the articles of this part one by one. Please pay a special attention to the examples since they're proven to explain Mistral concepts best. However, you can also read individual articles in case you are searching info on concrete features. .. toctree:: :maxdepth: 2 overview faq terminology/index main_features use_cases/index wf_namespaces asynchronous_actions wf_lang_v2 rest_api_v2 cli/index cookbooks/index ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/main_features.rst0000644000175000017500000002603600000000000022521 0ustar00coreycorey00000000000000===================== Mistral Main Features ===================== Task result / Data flow ----------------------- Mistral supports transferring data from one task to another. In other words, if *taskA* produces a value then *taskB* which follows *taskA* can use it. In order to use this data Mistral relies on a query language called `YAQL `_. YAQL is a powerful yet simple tool that allows the user to filter information, transform data and call functions. Find more information about it in the `YAQL official documentation `_ . This mechanism for transferring data plays a central role in the workflow concept and is referred to as Data Flow. Below is a simple example of how Mistral Data Flow looks like from the Mistral Workflow Language perspective: :: version: '2.0' my_workflow: input: - host - username - password tasks: task1: action: std.ssh host=<% $.host %> username=<% $.username %> \ password=<% $.password %> input: cmd: "cd ~ && ls" on-complete: task2 task2: action: do_something data=<% task(task1).result %> The task called "task1" produces a result that contains a list of the files in a user's home folder on a host(both username and host are provided as workflow input) and the task "task2" uses this data using the YAQLexpression "task(task1).result". "task()" here is a function registered in YAQL by Mistral to get information about a task by its name. Task affinity ------------- Task affinity is a feature which could be useful for executing particular tasks on specific Mistral executors. In fact, there are 2 cases: 1. You need to execute the task on a single executor. 2. You need to execute the task on any executor within a named group. To enable the task affinity feature, edit the "host" property in the "executor" section of the configuration file:: [executor] host = my_favorite_executor Then start (restart) the executor. Use the "target" task property to specify this executor in Mistral Workflow Language:: ... Workflow YAML ... task1: ... target: my_favorite_executor ... Workflow YAML ... Task policies ------------- Any Mistral task regardless of its workflow type can optionally have configured policies. Policies control the flow of the task - for example, a policy can delay task execution before the task starts or after the task completes. YAML example ^^^^^^^^^^^^ .. code-block:: yaml my_task: action: my_action pause-before: true wait-before: 2 wait-after: 4 fail-on: <% $.some_value < 4 %> timeout: 30 retry: count: 10 delay: 20 break-on: <% $.my_var = true %> There are different types of policies in Mistral. 1. **pause-before** Specifies whether Mistral Engine should put the workflow on pause or not before starting a task. 2. **wait-before** Specifies a delay in seconds that Mistral Engine should wait before starting a task. 3. **wait-after** Specifies a delay in seconds that Mistral Engine should wait after a task has completed before starting the tasks specified in *'on-success'*, *'on-error'* or *'on-complete'*. 4. **fail-on** Specifies a condition under which the task will fail, even if the action was completed successfully. 4. **timeout** Specifies a period of time in seconds after which a task will be failed automatically by the engine if it hasn't completed. 5. **retry** Specifies a pattern for how the task should be repeated. * *count* - Specifies a maximum number of times that a task can be repeated. * *delay* - Specifies a delay in seconds between subsequent task iterations. * *break-on* - Specifies a YAQL expression that will break the iteration loop if it evaluates to *'true'*. If it fires then the task is considered to have experienced an error. * *continue-on* - Specifies a YAQL expression that will continue the iteration loop if it evaluates to *'true'*. If it fires then the task is considered successful. A retry policy can also be configured on a single line, as follows .. code-block:: yaml task1: action: my_action retry: count=10 delay=5 break-on=<% $.foo = 'bar' %> All parameter values for any policy can be defined as YAQL expressions. **NOTE:** It would be rare to use both break-on and continue-on in the same retry block. *break-on* should be used when one expects the action to be in an ERROR state for some amount of tries, but may eventually go to a SUCCESS state, thereby stopping the loop. But if *break-on* is *'true'* then the retries will stop and the task will be in ERROR. *continue-on* should be used if the action will usually return *SUCCESS*, but the action has other results that can be used to signal whether to continue the loop or not. Join ---- Join flow control allows to synchronize multiple parallel workflow branches and aggregate their data. **Full join (join: all)**. YAML example ^^^^^^^^^^^^ .. code-block:: yaml register_vm_in_load_balancer: ... on-success: - wait_for_all_registrations register_vm_in_dns: ... on-success: - wait_for_all_registrations try_to_do_something_without_registration: ... on-error: - wait_for_all_registrations wait_for_all_registrations: join: all action: send_email When a task has property *"join"* assigned with value *"all"* the task will run only if all upstream tasks (ones that lead to this task) are completed and corresponding conditions have triggered. Task A is considered an upstream task of Task B if Task A has Task B mentioned in any of its *"on-success"*, *"on-error"* and *"on-complete"* clauses regardless of YAQL guard expressions. **Partial join (join: 2)** YAML example ^^^^^^^^^^^^ .. code-block:: yaml register_vm_in_load_balancer: ... on-success: - wait_for_all_registrations register_vm_in_dns: ... on-success: - wait_for_all_registrations register_vm_in_zabbix: ... on-success: - wait_for_all_registrations wait_for_two_registrations: join: 2 action: send_email When a task has a numeric value assigned to the property *"join"*, then the task will run once at least this number of upstream tasks are completed and the corresponding conditions have triggered. In the example above, the task "wait_for_two_registrations" will run if two any of the "register_vm_xxx" tasks are complete. **Discriminator (join: one)** Discriminator is the special case of Partial Join where the *"join"* property has the value 1. In this case instead of 1 it is possible to specify the special string value *"one"* which is introduced for symmetry with *"all"*. However, it's up to the user whether to use *"1"* or *"one"*. Processing collections (with-items) ----------------------------------- YAML example ^^^^^^^^^^^^ .. code-block:: yaml --- version: '2.0' create_vms: description: Creating multiple virtual servers using "with-items". input: - vm_names - image_ref - flavor_ref output: vm_ids: <% $.vm_ids %> tasks: create_servers: with-items: vm_name in <% $.vm_names %> action: nova.servers_create name=<% $.vm_name %> \ image=<% $.image_ref %> flavor=<% $.flavor_ref %> publish: vm_ids: <% task().result.id %> on-success: - wait_for_servers wait_for_servers: with-items: vm_id in <% $.vm_ids %> action: nova.servers_find id=<% $.vm_id %> status='ACTIVE' retry: delay: 5 count: <% $.vm_names.len() * 10 %> The workflow *"create_vms"* in this example creates as many virtual servers as we provide in the *"vm_names"* input parameter. E.g., if we specify *vm_names=["vm1", "vm2"]* then it'll create servers with these names based on the same image and flavor. This is possible because we are using the *"with-items"* keyword that associates an action or a workflow with a task run multiple times. The value of the *"with-items"* task property contains an expression in the form: ** in <% YAQL_expression %>**. The most common form is .. code-block:: yaml with-items: - var1 in <% YAQL_expression_1 %> - var2 in <% YAQL_expression_2 %> ... - varN in <% YAQL_expression_N %> where collections expressed as YAQL_expression_1, YAQL_expression_2, YAQL_expression_N must have equal sizes. When a task gets started Mistral will iterate over all collections in parallel, i.e. the number of iterations will be equal to the length of any of the collections. Note that in the *"with-items"* case, the task result (accessible in workflow context as <% $.task_name %>) will be a list containing results of corresponding action/workflow calls. If at least one action/workflow call has failed then the whole task will get into *ERROR* state. It's also possible to apply retry policy for tasks with a *"with-items"* property. In this case the retry policy will relaunch all action/workflow calls according to the *"with-items"* configuration. Other policies can also be used in the same way as with regular non-*"with-items"* tasks. Execution expiration policy --------------------------- When Mistral is used in production it can be difficult to control the number of completed workflow executions. By default Mistral will store all executions indefinitely and over time the number stored will accumulate. This can be resolved by setting an expiration policy. **By default this feature is disabled.** This policy defines the maximum age of an execution since the last updated time (in minutes) and the maximum number of finished executions. Each evaluation will satisfy these conditions, so the expired executions (older than specified) will be deleted, and the number of execution in finished state (regardless of expiration) will be limited to max_finished_executions. To enable the policy, edit the Mistral configuration file and specify ``evaluation_interval`` and at least one of the ``older_than`` or ``evaluation_interval`` options. .. code-block:: cfg [execution_expiration_policy] evaluation_interval = 120 # 2 hours older_than = 10080 # 1 week max_finished_executions = 500 - **evaluation_interval** The evaluation interval defines how frequently Mistral will check and ensure the above mentioned constraints. In the above example it is set to two hours, so every two hours Mistral will remove executions older than 1 week, and keep only the 500 latest finished executions. - **older_than** Defines the maximum age of an execution in minutes since it was last updated. It must be greater or equal to ``1``. - **max_finished_executions** Defines the maximum number of finished executions. It must be greater or equal to ``1``. Workflow namespaces ------------------- Mistral allows creating workflows within a namespace. So it is possible to create many workflows with the same name as long as they are in different namespaces. See more at :doc:`Workflow namespaces ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/overview.rst0000644000175000017500000000630600000000000021543 0ustar00coreycorey00000000000000Quick Overview ============== Main use cases -------------- Task scheduling - Cloud Cron ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A user can use Mistral to schedule tasks to run within a cloud. Tasks can be anything from executing local processes (shell scripts, binaries) on specified virtual instances to calling REST APIs accessible in a cloud environment. They can also be tasks related to cloud management like creating or terminating virtual instances. It is important that several tasks can be combined in a single workflow and run in a scheduled manner (for example, on Sundays at 2.00 am). Mistral will take care of their parallel execution (if it's logically possible) and fault tolerance, and will provide workflow execution management/monitoring capabilities (stop, resume, current status, errors and other statistics). Cloud environment deployment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A user or a framework can use Mistral to specify workflows needed for deploying environments consisting of multiple VMs and applications. Long-running business process ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A user makes a request to run a complex multi-step business process and wants it to be fault-tolerant so that if the execution crashes at some point on one node then another active node of the system can automatically take on and continue from the exact same point where it stopped. In this use case the user splits the business process into a set of tasks and lets Mistral handle them, in the sense that it serves as a coordinator and decides what particular task should be started at what time. So that Mistral calls back with "Execute action X, here is the data". If an application that executes action X dies then another instance takes the responsibility to continue the work. This use case is described in more details at :doc:`use_cases/long_running_business_process` Big Data analysis & reporting ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A data analyst can use Mistral as a tool for data crawling. For example, in order to prepare a financial report the whole set of steps for gathering and processing required report data can be represented as a graph of related Mistral tasks. As with other cases, Mistral makes sure to supply fault tolerance, high availability and scalability. Live migration ^^^^^^^^^^^^^^ A user specifies tasks for VM live migration triggered upon an event from Ceilometer (CPU consumption 100%). Rationale --------- The main idea behind the Mistral service includes the following main points: - Ability to upload custom workflow definitions. - The actual task execution may not be performed by the service itself. The service can rather serve as a coordinator for other worker processes that do the actual work, and notify back about task execution results. In other words, task execution may be asynchronous, thus providing flexibility for plugging in any domain specific handling and opportunities to make this service scalable and highly available. - The service provides a notion of **task action**, which is a pluggable piece of logic that a workflow task is associated with. Out of the box, the service provides a set of standard actions for user convenience. However, the user can create custom actions based on the standard action pack. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/rest_api_v2.rst0000644000175000017500000001511500000000000022110 0ustar00coreycorey00000000000000REST API V2 =========== This API describes the ways of interacting with Mistral service via HTTP protocol using Representational State Transfer concept (ReST). Basics ------- Media types ^^^^^^^^^^^ Currently this API relies on JSON to represent states of REST resources. Error states ^^^^^^^^^^^^ The common HTTP Response Status Codes (https://github.com/for-GET/know-your-http-well/blob/master/status-codes.md) are used. Application root [/] ^^^^^^^^^^^^^^^^^^^^ Application Root provides links to all possible API methods for Mistral. URLs for other resources described below are relative to Application Root. API v2 root [/v2/] ^^^^^^^^^^^^^^^^^^ All API v2 URLs are relative to API v2 root. Workbooks --------- .. autotype:: mistral.api.controllers.v2.resources.Workbook :members: `name` is immutable. tags is a list of values associated with a workbook that a user can use to group workbooks by some criteria (deployment workbooks, Big Data processing workbooks etc.). Note that name and tags get inferred from workbook definition when Mistral service receives a POST request. So they can't be changed in another way. .. autotype:: mistral.api.controllers.v2.resources.Workbooks :members: .. rest-controller:: mistral.api.controllers.v2.workbook:WorkbooksController :webprefix: /v2/workbooks Workflows --------- .. autotype:: mistral.api.controllers.v2.resources.Workflow :members: `name` is immutable. tags is a list of values associated with a workflow that a user can use to group workflows by some criteria. Note that name and tags get inferred from workflow definition when Mistral service receives a POST request. So they can't be changed in another way. .. autotype:: mistral.api.controllers.v2.resources.Workflows :members: .. rest-controller:: mistral.api.controllers.v2.workflow:WorkflowsController :webprefix: /v2/workflows Actions ------- .. autotype:: mistral.api.controllers.v2.resources.Action :members: .. autotype:: mistral.api.controllers.v2.resources.Actions :members: .. rest-controller:: mistral.api.controllers.v2.action:ActionsController :webprefix: /v2/actions Executions ---------- .. autotype:: mistral.api.controllers.v2.resources.Execution :members: .. autotype:: mistral.api.controllers.v2.resources.Executions :members: .. rest-controller:: mistral.api.controllers.v2.execution:ExecutionsController :webprefix: /v2/executions Tasks ----- When a workflow starts Mistral creates an execution. It in turn consists of a set of tasks. So Task is an instance of a task described in a Workflow that belongs to a particular execution. .. autotype:: mistral.api.controllers.v2.resources.Task :members: .. autotype:: mistral.api.controllers.v2.resources.Tasks :members: .. rest-controller:: mistral.api.controllers.v2.task:TasksController :webprefix: /v2/tasks .. rest-controller:: mistral.api.controllers.v2.task:ExecutionTasksController :webprefix: /v2/executions Action Executions ----------------- When a Task starts Mistral creates a set of Action Executions. So Action Execution is an instance of an action call described in a Workflow Task that belongs to a particular execution. .. autotype:: mistral.api.controllers.v2.resources.ActionExecution :members: .. autotype:: mistral.api.controllers.v2.resources.ActionExecutions :members: .. rest-controller:: mistral.api.controllers.v2.action_execution:ActionExecutionsController :webprefix: /v2/action_executions .. rest-controller:: mistral.api.controllers.v2.action_execution:TasksActionExecutionController :webprefix: /v2/tasks Cron Triggers ------------- Cron trigger is an object that allows to run Mistral workflows according to a time pattern (Unix crontab patterns format). Once a trigger is created it will run a specified workflow according to its properties: pattern, first_execution_time and remaining_executions. .. autotype:: mistral.api.controllers.v2.resources.CronTrigger :members: .. autotype:: mistral.api.controllers.v2.resources.CronTriggers :members: .. rest-controller:: mistral.api.controllers.v2.cron_trigger:CronTriggersController :webprefix: /v2/cron_triggers Environments ------------ Environment contains a set of variables which can be used in specific workflow. Using an Environment it is possible to create and map action default values - just provide '__actions' key in 'variables'. All these variables can be accessed using the Workflow Language with the ``<% $.__env %>`` expression. Example of usage: .. code-block:: yaml workflow: tasks: task1: action: std.echo output=<% $.__env.my_echo_output %> Example of creating action defaults :: ...ENV... "variables": { "__actions": { "std.echo": { "output": "my_output" } } }, ...ENV... Note: using CLI, Environment can be created via JSON or YAML file. .. autotype:: mistral.api.controllers.v2.resources.Environment :members: .. autotype:: mistral.api.controllers.v2.resources.Environments :members: .. rest-controller:: mistral.api.controllers.v2.environment:EnvironmentController :webprefix: /v2/environments Services -------- Through service management API, system administrator or operator can retrieve Mistral services information of the system, including service group and service identifier. The internal implementation of this feature make use of tooz library, which needs coordinator backend(the most commonly used at present is Zookeeper) installed, please refer to tooz official documentation for more detailed instruction. There are three service groups according to Mistral architecture currently, namely api_group, engine_group and executor_group. The service identifier contains name of the host that the service is running on and the process identifier of the service on that host. .. autotype:: mistral.api.controllers.v2.resources.Service :members: .. autotype:: mistral.api.controllers.v2.resources.Services :members: .. rest-controller:: mistral.api.controllers.v2.service:ServicesController :webprefix: /v2/services Validation ---------- Validation endpoints allow to check correctness of workbook, workflow and ad-hoc action Workflow Language without having to upload them into Mistral. **POST /v2/workbooks/validation** Validate workbook content (Workflow Language grammar and semantics). **POST /v2/workflows/validation** Validate workflow content (Workflow Language grammar and semantics). **POST /v2/actions/validation** Validate ad-hoc action content (Workflow Language grammar and semantics). These endpoints expect workbook, workflow or ad-hoc action text (Workflow Language) correspondingly in a request body. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.093567 mistral-10.0.0.0b3/doc/source/user/terminology/0000755000175000017500000000000000000000000021506 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/actions.rst0000644000175000017500000000336200000000000023704 0ustar00coreycorey00000000000000Actions ======= Actions are a particular instruction associated with a task that will be performed when the task runs. For instance: running a shell script, making an HTTP request, or sending a signal to an external system. Actions can be synchronous or asynchronous. With synchronous actions, Mistral will send a signal to the Mistral Executor and wait for a result. Once the Executor completes the action, the result will be sent to the Mistral Engine. With asynchronous actions, Mistral will send a signal to a third party service and wait for a corresponding action result to be delivered back via the Mistral API. Once the signal has been sent, Mistral isn't responsible for the state and result of the action. The third-party service should send a request to the Mistral API and provide information corresponding to the *action execution* and its state and result. .. image:: img/actions.png :doc:`How to work with asynchronous actions ` System actions -------------- System actions are provided by Mistral out of the box and are available to all users. Additional actions can be added via the custom action plugin mechanism. :doc:`How to create a custom action ` Ad-hoc actions -------------- Ad-hoc actions are defined in YAML files by users. They wrap existing actions and their main goal is to simplify using the same action multiple times. For example, if the same HTTP request is used in multiple workflows, it can be defined in one place and then re-used without the need to duplicate all of the parameters. More about actions; :ref:`actions-dsl`. .. note:: Nested ad-hoc actions (i.e. ad-hoc actions wrapping around other ad-hoc actions) are not currently supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/cron_triggers.rst0000644000175000017500000000067200000000000025114 0ustar00coreycorey00000000000000Cron-triggers ============= Cron trigger is an object allowing to run workflow on a schedule. User specifies what workflow with what input needs to be run and also specifies how often it should be run. .. image:: img/cron_trigger.png :align: center Cron-pattern is used to describe the frequency of execution in Mistral. To see more about cron-patterns, refer to `Cron expression `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/executions.rst0000644000175000017500000000434000000000000024427 0ustar00coreycorey00000000000000Executions ========== Executions are runtime objects and they reflect the information about the progress and state of concrete execution type. Workflow execution ------------------ A particular execution of specific workflow. When user submits a workflow to run, Mistral creates an object in database for execution of this workflow. It contains all information about workflow itself, about execution progress, state, input and output data. Workflow execution contains at least one *task execution*. A workflow execution can be in one of a number of predefined states reflecting its current status: * **RUNNING** - workflow is currently being executed. * **PAUSED** - workflow is paused. * **SUCCESS** - workflow has finished successfully. * **ERROR** - workflow has finished with an error. Task execution -------------- Defines a workflow execution step. It has a state and result. **Task state** A task can be in one of a number of predefined states reflecting its current status: * **IDLE** - task is not started yet; probably not all requirements are satisfied. * **WAITING** - task execution object has been created but it is not ready to start because some preconditions are not met. **NOTE:** The task may never run just because some of the preconditions may never be met. * **RUNNING_DELAYED** - task was in the running state before and the task execution has been delayed on precise amount of time. * **RUNNING** - task is currently being executed. * **SUCCESS** - task has finished successfully. * **ERROR** - task has finished with an error. All the actual task states belonging to current execution are persisted in DB. Task result is an aggregation of all *action executions* belonging to current *task execution*. Usually one *task execution* has at least one *action execution*. But in case of task is executing nested workflow, this *task execution* won't have *action executions*. Instead, there will be at least one *workflow execution*. Action execution ---------------- Execution of specific action. To see details about actions, please refer to :ref:`actions-dsl` Action execution has a state, input and output data. Usually action execution belongs to task execution but Mistral also is able to run separate action executions. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.097567 mistral-10.0.0.0b3/doc/source/user/terminology/img/0000755000175000017500000000000000000000000022262 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/img/actions.png0000644000175000017500000006343200000000000024440 0ustar00coreycorey00000000000000PNG  IHDRH ^ffIDATx l]Yu}x3PSb CM1WXU"5*QJUDԠMEJ %д5m *A֥V'@3" C3 wRϹ~>Ϲg;kN]ESE[pvh6[igf9;ݾuG~8Ml )vh[P{LۦƗ\h6;hWo0Zgq)nU8n>n od;lSk+n魰L[?@w_z%r/߮6[N2z="@b<P5P;劌9k$YQ,;n+)9EnܬeQ@:P&7y v¬ӈ+Z]5]uwi0xwGb[ј<ܙw7OcIr] f#+8=丞f+h/Dʓ>C+C^07z&,)vɋ`(K;9ͰϱXC;l eCFp]rC9wْMFZpr:r q:-&3"Ov@ 2<^h #6>r|gSe}T(6ȍҶᘋ^쌈#-0Myk&*Fܱw6EB5iұvO 36-cq2Da٨vb8~׶ *.44Oz,h@$b2Zx=SXnxl,jD=!0h4~eD|wd#~OYjw;X쌈΁ Q2MG{Jj{'~l^ȁHtߨhIoDۏDG@>m6C ">(W5]m6A ;u2WYTRII 0k\O53nF gRn^F7"H'$6tV!$UH)qsI<דq4T5=@Vb{'+g@YQp*|;Lt)ƪܚ4F$;!@}7)lNg/g<3r'F Yt8! Dи1kV'XXqf{m rHN%t0)juv4"ݯMڮMunMʓLdkA4gOd1oz5'4B$t3)ٶ7ێ7&l.Ԝn;_~kB"d1nDFedDhzv(TRwPqW_̬ivej>ܒ,G$& f3~$_Q?3eވ:R@ #E~F Zp7RͲ#}a0ho\4ݲ3X&X \6Xqr0YYi*ןyؑ"^XSdo8)/t:)%,*H9ra&_sggI(J#=IvHEU'꼞DOl- nCg64b'gzO'Y Vz"@Nv4ivB=hg\ )oiS`/T]E`3r\Jw 6%]Ս~h:එegֳVwrĎ[_HEoΘ`[v}lYwA x@x0hp [͞lt~.91yU+6@Pq}; x!lD)~kh-Hdv$>,5>v*Awh 6obƴA/@D/'u\Y'OO,Cn{%nKnٹ=*C, 0Rkj e;>s~9>4-wNc,uB~ [7▹ںlDX,_  tPb)bf $@\&NtsB*ѷ}#c-H8JXvA/:(F[)f Nlι4t0eȾ=/B!MN4{$>Ǻ;X}>@|_ Y!EnW3Z~D~Lw"B3Q<ו %7;P!0|4A1:H\skFjH{\d!I M9 נ :(FkC$|sn@w肋jf?J O+C<ɷw2.מ3$/>h 3 H~ݩ6p?9R]HVd"h :(F[I7!9,8r.7!tw a7_&E7-}ϻєvLeNan=$/>o B//'Oȩ|Yٷy'Xt-όُ.mqm^Ll5tF;GB& Hn=_  tPz#\F6#us,rw/19[8hSHWhG[n,YYo5<9Cuᣁ?2ᣁ/=gJs G^~/cw=vu#A/:(m)luv n+_wr  )l=ǿ« v $Dvj}}.WH 5|4j]kEN ͌ M!֞@҅)ekaH!6L v0 NSURuPvbr^|_KhMB~II'oڴ鯋lWa $= (\*/'[t쉯*Q.m  GھSk[EVcJLct>Mnlw!` $[Bvf¯I[^BF54b/KAu>BJTXY39h|$ @ ,䄅6TUɁv&Dv]Ej,h= #[VdU5n 2[Q#㱳5̌6|*7ګw_u7pcE=EZ'dlQ;E< R%B оYIiLW 38>OVMšvi0Vۣ:֢-&富J$J aB휮Q]&!t"r_ HUp.|L#f#> prblXE¶~6|CN ͻByιoq EۙMv}7ޫ%)ܺSkYt_ J IHĤYX"$a"s֓lF/d#]Sɚe 6|\;á'j( u~'~wsn @B A z-WX@^DP5aStoݱXfw"".0;ij~sjn}?Tn[Ҕh)/ꌳR[Q۟6Uah{|9Yy_1os"T͸(5WM6ߣ7rÑl -e%H H5,k(,,ld#H* PX&f\OYwV[5;K \Ѯ}͸Ւ=ܺnEӖ 3w9Q[w>hs(\TlɉnKf(rkCiwn}[*cmuYtgm]p˝ssϺs#xg]L9=r~ss@HоI3lR*%|V$6aWS:_yװ^eXTL Iyvْ$>pop@:Dوۈ!6LRw=NlY|HɈ-G=p b!` $U̖U l^f)IDyN ׈iyR8TNnfzmjQ-HEgpd2Hf\T)&6/ $NcCNN`e&STpnN;QF0PN4!Fc*ZJ~. @jwTLzo 3J opd F\E8D4RCi>c=Ml0NHS@9 f8l:邋LGb8t%˵jDf2ϹR2C !֬IRƧc?U0[g}WxtulnY )m,3g%Iߞ@ 6&~rnk$ㆢ&.{|rF<NwHR}NN^l{F،Wy HiTVt'X6W% 6S'Ĩ-d=P:i@RFJ;qEH L!b=i;/ ZD=; M(bco#|ݧF$bFʖ^d}&1+6dk" )e%tZW S۬ Ϲ}G?[jm;>!ZID ~$= ;X<$+f$n6a"$iR-1JRi<:+C Rԑ()/؈@H$dYZIX6EQ"\ *ME__=}+ڣ%ly5J)?lh ι"Q$:(@ju(kz(| _|K o86X ihۤM `G>?s{mS@RRjHJF$r[ vΫ{$@%]HXaW7+c=YXmВMBVrSݞyRRNSm]0 (?&\! )K(IHvL վu$:(@j0( {a G^y_0B @RHiam"NTDɊ0Y[Smnl*bT$iZZG/BF-KBLmDj"T=V@8R{tNl[%mYC 5Dh~QaHǒkI06^RS5&7{  6'VŐb,g!FBHVlU+abz8| @B B;n-}l0SdÚaK-I-H*"'[0k+&xVHۈ+ g)i _O ;ڢ@ st}?R>I=y$ ƒ730 E"IlS\ PMWIҶQ@Œ\e$ L+͜z:l}kXM+LZϷ/ _O 0~9zG=yH$ CO߳^}-}/}~W'07/>HXt>tt Mϼ^@b$֖o gwn䦋! ]0vI[7u\=# H$-;_ ѣ?n !HXv6E\1C !|vw b$/@B at"[6]w_s?7Z H$;_r[BY! })ځ]SnQxeF@b$F+ҟܘ_j˝JU}!0|4 ?`O{OW7O@LNJHA ah@ aJ֫J._Qxa׭ "Fk/=avzY-q:_͉~ 5f\tQ@[!HtP:j%r?S)CMh Ο5&֍H$:(oU;7=ȍ@/:(nb"7 >XvDn1|4Av|DnR¡GkWm >e?SƷ;،?bt)K.+l0v'ShWbtzv OameYߥStD\6yUwVp  "$Ce]; t) @t`ًfS ~Op÷v58Y#(b Ɍm")-?g|9cْyI:eӶ!#.iXʱeHi$x\ nHdm/ΚϺWӽ|8rCS'Ei~h@DAd(/qRdbْ~榽f^`u:QnٝcL>yR_;y,gɞ9'kQ6fE#ǀ-c4Q{16oD` @\O>as7$33kgSnA4"S /vbB(vwgPLFڞݑ171d=yRuWUFN:MF۶.PX7"+bXv$"R顤'h3Mx\Oޒć:.HO$l6$bo౛fMTHc3rsrYћ-U $Y93`ƸYlp}\*H5i"Ep=d,:1Yv.(x1uDn¨TRMiFwLD m$N>&#WH'HN"(4<As l,kϱdy'nhi].ʡ:@Fdd[7kb n' TƵї,G\Kq!3J7bm5@MIΦ,p'deL爤E.$mݘ$I 0I{deSfOdFPmKgbb״9R>H$kd̷zTJ>b$@7=a1|ʼn[pd6t5YYԱ$}gF%@Ħo1VN)J^2:+ݹ`;F]MVofג̭=O_J#r)ɋ󴻎 &塒 B/# AO&i; u,-gWFu) G/l7Іp _0gz13t]uQpحۉ%I.{8Y@Ѕ mXN>疻h:D4瓕5vdB—"ۭzhy_`k6 }Ij¶#U(Y穫'2մ3v$!+J3)h[lhgTZPrN 4M`FcG ؆%D@}Lz [HS 7ֹI$!H !H }`V13!ⶴ#H *cf3H{669}xm՝zn E_[+ͺ1ϦCbt 6P Kʓ,wP79;C'1ϦCbtuLn~@41uh׊ֵ4"1ϦC!l|tf $1,3~p'{=^w^vp֔!rOgl/ڜ)ְsev tH(Ș֛,:Ԓw}>puQ( ^v[*M-va3HzC`3_Hsfay̴qEϖ [bB7ϦCbtս$F3Fv}"~REa>6!i>2m /mnCl@oD]\OK9>??gfK.]@bR99zI!ۂkb{{dv+.vuFԖ챞Dܮ( tHζd91U7k-:];]$LpMww<44X!s Ŏղ@tHζ1R dejT׵h-&ֱ(P)KT, g!1:H*S$ΘxSВ 7$gMBdp">֡@Z]S(:,ͻ53$ר!6߶~v.'.;1s.Y.i3Χ$[{B*p$1ϦCbt5ʘcf|gٖcdy[LQ2:XXd&! 'RN92lC2.ӎ~w8axEềc?g > > >#/|ay-o;=g!1:?~ "v̿tH$>!}/և o?/|者bs Ml^Ff.Z/G(ٛ}_=\: ?+C ׷@?-z_b(Op3_!z[<]܊uD[Ϸـ@B xuD̗dŜ_([+  g5o\k-#|64=T~0x }:UyG?[*ß*$f\#AN~3%Q\0d4?&u[ N )d ] Oԅ ß>He˫Ɵc۶3'w꠱Hcbi,*=٤U]gm Za3iۣ?[)lHPV( aEBa#zzIZ~V(R-b!`] $vUSGZ5U+di*ily%j[Y9H=tR_PuFϘkqgۑ(°ؑWޗ*͌ wCi ;&|Q,iBӰ|Mֶ>7U#gv$RIG`bFr >/]WcP;E N;lu|MCk;-lkg4a0%fWG>>Y4HzƷ{o8d֌7 "v@~WHP Թ|5V6ޮkC\B"ky/\>J z*ɅeQ4/dYl5RpȫU&rB )E@~RV, )}턣 ZOQ"_W RZHQۊ 6!`] pFYœZӈV@ ;:~bgzr ;la a3E, X"4($a1c@JW iSUmWu^l a3S P@+'i#b& J+\ $ kϺeӏQ@R>MA//l@XM(ci@°ճ$ @Z)oIHA^FF!0y׾pmo$ @ZIf5-o{5"8rc_, Zowjɤ5H$`[\#ºC=i:H2i5$ҴZHQN!`$jf 6[ mQFNG aX~{l%e.mw$H$IbSn_Ww0$i^BHE5-Ҫ_^fKT۞X%Y%a^)i%HoRxW Y %*HkJmBSl Úozۋu}:YG$ \S?DރP1Λ^pWI$aX+/^_0ҋk_wmU@HձzoUkO^.l؝fyvKGGn4 o}6J%kE5cjSb fFH5mio3i"i Zdyug{ =޲dw#j+&lTޙ_φ )Os$l?3M5YleB =%]zٷ()FtVH7t6h;(?^2hh;[MΖV $H~Eϻ_8{hW^I$klߏ_( Llbkzʲb/~%C_[zX>Kk=}:dϋ~}@H'd~hFNjnd [N7~+04 $Me\ߧR*-_ϯXO)k ^B%BzS m[B.k*kßg#0@k[)F-Hc{ BYa(&+$ cC(*LhOX7D߇3CQ*MH i[oEH)fI|ihʾbɮ'ѠdEEfm$1?o~(>pyV84g۪!k]AR3|6 C Iw}_+ځuHl2VX}+!O ^J(KXI}a'R@ʓFYz9J;^tl@h%n Y?>d6 Z'^_߶3|y_;',@99g!1aT4 86i&YWX!&JgY}B~ces.aϦCb$ؠ-vEu,FM ~G?[Q E$ \ )ڿ;x!BdvV\,Xi/M^eKT#副NYI$:$@6HegdrllBĔM^V0GIBMIpU(N, $0oɶs6w+, myvDHčdu&$d4M3>%"|h mg)dŌf}PN%(Z$$$=6J*(}[MP$:$@u$/`K%Nlt %+r$s6' *[h1p^( 5)PBJdFHtH X <;e_m~AZ6!.YVZ-N8 م=w++Z $ m.`iW}׆  ָ?PLjL#Eigare<ETyZCbcR1EJPDTJ _eha틉+ m'V$$b/ y]ml@1ϦCbt6cl:$Fg1ϦCbt6clC᳁Ig1:$ >t6:g1|6!1:g1|6!1:glCbt6tHc ~:s_3Not6@8Ʋq.m@Eal4 C  0 C @b k`&0RDMXkF\'Nh>&[N'7f lI}IctkERm_|wU;D:︥U'[|gN VeI$Q*v>6u$$7BC@;sۦ޻{| ڎM6}@}ϽM]#V mHQZKt&@pkGcXkEןnyum-@;d;;7}I˞;ޅB;xǙc)ކOTJx>6Xgx:=IG^y_?qOBc~|Sxm%}]rO@P=^(!,(_[8")ܞNj_ItB_\nHieݷ{AmLnD-QTrFН;ttAhcJCi*Rk?.﷧;Mmfw9Zɍ|&!YK>}3S^wsww+Ei97ЀJ)^OMI˻۱a-̊vGQ6uʩ4KG|Nl6{o2[(AY0?ִ:$%ya1H6λoq>Yɉd-DTB.@}i8z"8p1j +5>xzڐM\hZHqke7_t!V>xngHS|ӑ ֌@CWK<wenݽ5lsm7֞~w_9Z.nvuw"F1HG+E9vumCiFL'Ev <1^puG:Gsm7@pOnS9T[wέ9'D'C#n{yU9m {2Y9f[۵97tm tUZ]ܕjiZHbۖ./#C\6<Ϩ[[H2dt܍}%yJw[w ^7;;!rҵ#.w [sӎz^d;~y׆j,'v.mYp.d ".hJH;g39Y7^J՜!hD|t<"NL!-/DHXx=w6,[ŋl;]4 MV@ZuMʫn)'M ņ }?]I5ߙdPUhs^|s'+yw@򑨭)׍&J$Xh+1oҜF5ϐ~('|4b?,I"[/I(~0@3֗,ϒD2\ce/@ʓ\$oLLbh9Y9YXxosx.C e"(RsBa.!X m9 tF4kÇ\rCO5}Rl.I T>w 8N7S/5U@,3mlqlmydea#H9/FK756bNRbO:l_HDn=8F$_cÎUﮰn#]J$q㋕6r>Ll¶'+߁q@J;p?%zaBfEl*y&3Xy֏Y)~xc ڜ!:ݽ fYlR9痓ZLvp) T/O0O0Pÿ?lOS#wHCt#(VkPkDw&0͹=Vz?L4=l vz=vlo9c)A{U\/]<l(҃ϾGX,|XrQ*=x"9~{]ƿaĴ},ӻ~ >'rλ>#s$nik&z-" eÜ2Px(䐿]Ţ]j"P PTJ ^Qh&e{͠@ @;zABBtIÊ=A_71 &8$hMbnbQHhƩhHvpZJLuro& c\a _ ܛlPw>pun{a _ (munxt:0:&Fcjcbt:0:&Fcjcbt:0:&F@tW???t:cjc?᫁I0:&N@ta _ tLN@Ǵ;=r޴:(['w|`:u}lrt:hOzG?[ >tjy|?s,G("T](X0_y yw:ifN%:W_^>OXQ[_;J ` $ <߻̐@Ȕݶ$XK~Xz#S?xw&kQ )4\FS="ÔmQEV$\$%?<,?؊j#I2E`,=l*'fo.҃ͿܗK¦YIf ROs0WmTD-<xz5tƷ[,.wPxkO^.}=+Y;@"ZC_vm_ѶAvpѦvhsvn`9qp~Fa$l$\#WbGOc(ZoWHھg(GYHXdW0=cHß>`-7@j颳GOOx{% Ყێ@j|{h#unwwѮ]seku5|lBg";40E2V0C~S%I~=-ԶIϗ 9=Ǝ7o7t _IPiʏ)l=' !H plZ&6--Y $BzΌSؙ$3\@l)ڢ4]\E;DzxwZKil l?\t{8?H(uA˽OF}v‰2 $XR>"H1g#aL;^Cwyr} UD  tO ;=ITJ֮fƴC 5 x5زf~N?#I QDI92iyPbYcY~4!ɶ'<^^M~R.+6cJlK8+#бd<IO*П9 a[ <^aӑ}Oa:N}+t-{lKsv1Ϩ@r#/jȐ_L eO&yJ> Awғj?;tӟsNvMWMX 5oͭܲ'RP!i ۇ}(m@p^քEtRBHio]$9BH ߕ TcyciIc8|5 !p uװ U;|v p7kAc}7㖻)sݜOϮ2Ѥ[O@qi{G\/vH)'$vV87fX\2o|~DnkQypm}_;a Y|>e(by@oY97/d}5f"Er4fd Jǣ40[8{ R g%|R3-HMgƦ:;,7HjDo3"&14>Yϋ=,667 CaflJr )2wHІpHu}>r$c(q>2@OH.eW S(nKi@jًRMV!֝X5I@U Q"[}6EMHܘcذ۳ѡ@L[ Fw|\6{ JTHsd,Xdv h8(!l% )dE)KNQ6@B lJoY!0Hc6#dknY&aF9!vOb+C -$9?iu$˵]tO9A'6/%jUHSD)㽔qzHɱyQb#[!‰:nSjIZT2Z}H1W:/0$RK,|RT=|5ɞTd0us3 plo{W .S N mtPNu=.dʅB ɊĤEt!k۪$ZK&oۑ)L! UzJ$RL"SAǼo|(VI3_:Lޚ6H>g0E,ְ'v.'DNe,{@JRןpǺ3瞈yw"h#ߝ3̓$M,E -*ͶҐ-hM1&lFH~XFUvJNk6a.iz8}xmiC-:5KKF !0R=YT猘vldm4.bvm7+nh|`x^#n?sn5V#{n'|)rM"̓'%I}r]$3>!}S4{;z̶^ aUmG#@$@j$;Ӽ]Is~_Z¹1ULKG'70ct+GnSeݷ>4|_oxi/ᜦHK^$HjhAϖ w%t+43wb?W9t:Q;n-ɾ0@p>{h OhaɒЩB!633z/FמS?xӾnh3^=[}lL!?aH`H0Y~jh@%c?Xt=l+ eBJѦؐDEro ۘ⣿p2v_{rMQғڼi-wܲ =BE1w ]*uNYg<򅱼G>?ɟjd=o%lMj_?`+m#7ۼGgY[IAjKl_ZG#fe!\ WYn y.:-6+o)0Ӯl7 #o{L6 k-5Q$5+bdؼ__U43CB5Yݲ(ca + 鸲u?du:>f$R $u{bb()X7X׿] /-$Ѷڣh[y#ӭYi3#Q(I°VfO6_5k[u(Ҟ6pEi !Ȑ V(nWdKRUHmj:602dQ;|VX 0ӇiBT&+Fz&H$¼>IrVl{$c;S==Z'.:e=P0°հ5uʼn0b'Hy7@U\>A0mG',6W mv=+ulnF !ng#-1S K8!4Addb.ڣ~oPHa_8e?eH=aQl dQ.`N=և[FH6g/5bP( ԔH J%u 9 p5Iر"R6Mv̼R{f3#ʦq3} C 'QQ{EÜ[ ~6߳9V@Yd@v{P%jdt|i$RQnZl}obZMq3;}  }P8Վ{ն'&D SM0Rs^V3H)6b˭T-bR~U%ڄh)t^yH6HVlT*>fpR2F$YmT"Gcag9O{bR{6@N̔[:Waa;$DLFYyA $c# v6&gWHT Cd= 3@jc^6.|z g!VSQ8V 6b䅚MNAjF{6@FϺy17W ~ؾ@~4-$+3+X-)L.vȊpVLVXD_H1;f,6{K ~Ė %g"lo3ks,@Z%?xa ,6 l2u5n҄,dgIو&cC-bۥoB !6kHH(.VU΢Y n{bѧv-@Z?yCⶊ( U#bu<I߅ [yұ$O pPYִR:q8ެȌ T gŪ~gu:_jm:5¿b$0H΢-z}Ў»_~/zH 階%(5M?]x I+xk$cjcbtP+* c0:&NIv%)rc0:&hG4w5~W1tuŰ;./1|511:~W1|511:~W~_ tLNa|5??)bl۝lW11:T`wRA}l˟ OameOl1?p@o{S^?Tw5%:l&:j??$6%c^1aB nubUjB%M@[ 1B V`jZQC rf8@,@SyeY79@o,5Kn {Pˡzk,P`{UҐ9le%ږS?Kn @$#l(y}` ϖ<lZnكà: oX͏5WF5|ZߗJP2Vf[//l.TozWЦıjcqߗzMv9@ٖ.kX־Ճ,<??W_מWAm{fO`rl͟ಎE#50Dj;}o¡5}S3J-تw hEfD,clFτ`s~`iUj&:Fyy h"^|{3 IɰN`g!5w,gE9^:/龱Nw x싉i=u-y̷_S^PfmXg'WFUwaE6t݅;9ϓyUly] _eorX޲n?ՍͲ .} 3 'w-i.,&XC#B8YVXˉ~lY64=KN{J ۻiLmb19BP7TM/gS0/ݢqd#n_Oe 5y|(;^klF? 5`xi;l;z7oTV~78\BTv7hr $ۺS4.e++ϐ]C7B4g0 BҼH,c!G#{n`\W/T5׺Jۢj|ܿ1+;g,m-kU\t{I{{oٷ,{3ҼyD4ή`[JOZ&oXp9t"uϞL:Y[~P7Ámv"N%#{H wo@5^h3&u#w{wT;9 6y3)o/]qS `3}^׍`\|!݊9ĢXp W7`.#ЎgE]8n[h0yq^}{C*?vlF@Vu[~~OI;\+-@۵Eېcq&m:iXmI؆{qZ )GzF&-OXb2Wn1oy XͶ=y ls gfC`ÑTs4'"㞡z"?a1vO5>=A!.ݝ+ﰜ2fCqB'! (VXS7i'FڳAv;Vm: G35M&K`1`clun{/U5&-j_1 [Y3ִ4f{h#pFL$&fMTp/e(gpZةj|KF^@)x}(& 6o#'O0 8-A]`ju ֜n$/meac5JqvtPPб8{Oېj|I3u0_*S}Fu-4 `Sqq&Oud/17)7dpcm{䡪|{T/:fv_ tU_-n]uҁ0r={xyR~lUU=-Uzذ#0RP%`)}7ӱ^rqnY>? zoI fnnw< @K3ZOZ\ϚXƟ9$zo.Jpt\{ڭb@7J9о.ɻA}bl^h ݞ7/lawl٧AՃ`lQ\j](Cz]2[TAmUclNgB@-S}l`L[(*G~VLe՝_(Uyl`( ݋'(VPyД7(uЋa(vG3@Ⴭ0>3@Ⴭ0>3@Ⴭ0F0@a}n!'wIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/img/direct_workflow.png0000644000175000017500000003435300000000000026204 0ustar00coreycorey00000000000000PNG  IHDR,7g 'iCCPiccxڝwTTϽwz0z.0. Qf Ml@DEHb!(`HPb0dFJ|yyǽgs{.$O./ 'z8WGбx0Y驾A@$/7z HeOOҬT_lN:K"N3"$F/JPrb[䥟}Qd[Sl1x{#bG\NoX3I[ql2$ 8xtrp/8 pCfq.Knjm͠{r28?.)ɩL^6g,qm"[Z[Z~Q7%" 3R`̊j[~: w!$E}kyhyRm333: }=#vʉe tqX)I)B>== <8Xȉ9yP:8p΍Lg kk Ѐ$t!0V87`ɀ2A. @JPA#h'@8 .: ``a!2D!UH 2 dA>P ECqB**Z:]B=h~L2  5pN:|ó@ QC !H,G6 H9R ]H/r Aw( Q(OTJCm@*QGQ-(j MF+ 6h/*t:].G7Зw7 Xa<1:L1s3bXyeb~19 vGĩp+5qy^ oó|= ?'Htv`Ba3BDxHxE$Չ"XAP44077&9$An0;T2421t.54ld+s;# V]=iY9FgM֚k&=%Ō:nc1gcbcfX.}lGv{c)LŖN퉛w/p+/<j$.$%&㒣OdxTԂԑ4i3|o~C:&S@L u[Uo3C3OfIgwdO|;W-wsz 17jl8c͉̈́3+{%lKWr[ $ llGmnacOkE&EEY׾2⫅;K,KhtiN=e²{^-_V^Oo§s]?TWީrjVQ=w}`嚢zԶiו8>k׍ E  [ly邟~_Y53rW򯎼^{7so}x>|쇊z>yzBybKGD̿ pHYs  ,!IDATx\[׹N=$/uii8m_f؉'={ ^fcc Ƙ f[-񀤭$t$!]{99`!pX8, pX8,Xel*)8,Piyh&Pka "$"H< DLz!Pi6`ZaKn:\J VgLX8,?sGU@nhvD-(_5([![cE6 "(%/8@{N P98,PXϻ@Y7(ϸ8,@'xay@%\ Tr8,pX8,P&@%LB0K bՂE&a?+;Ͻh8,Bn?}Ku#ڗ_X7qX\"hBk W&+8#_Fl}y$̺qXO;gnH#{>8@a-TE؃2 AYŊGY!"2 k\|aUaMsO2RUTPPuґNhZva!lyi>>م=d yVrC`W&XhUz|z\ BVq.[L&F/u䭗~޹8d,K2  puOwU>ު_?l#w.JU\~j*pZҲ!RNfXct{., Ţmb 󗟏 :5bes|ޛox"og!f6ˎzC"\ܿb{V-iJW\:Iovtt e{u92>;p;uSYs4)_[I᪵<r9T3m}nZ\,yX:_![݊" mvv^MsDmUФB-d4si.+ %07۝&]9GF0sQ -}]R6dqF[ܢCWQvGvhfXժK17)R Z g( (y+r;lkWkZcD ԡTބhQRn6j6V,e_"jB\{:tFf;ziVV^ uZ[@gɊmc윆YAAɵoS1jd=x pB6l : m ]x6əmj_^nt³,yz_Vy|ʂ! /lߞH +չ|#ZiPdF"2S.x<~[2HN>0rDwT>VkY,藊rB}H݆n勵iI ˦wU>Ho|H:ai_˪hIC;3l{~sVsƩLuX5-Yy?9Gb1윻n4b3[\Mѹ񆉭msXݜ\/]j;,˶zaEvִ|rZ;"}Pz_,<ņ7uH5Yqf9̙[}425@g-{g%)Sal 5Bk{3(ҥFx@4ryjy[a_b-XlP__7 zxŌWcXkp kKNUc2kuXȌpvP6MY6,_iJa^7ٱKX^ c{X1N]`n_ޫ)wDc'h6MXkꕢظ!ZZal\(R!`!CB0s/w`m"ֺ `Y [}1`A ֜!ziE1w`138L!ρti.Xk1ebzWS=}ΤkS[>">q++ɽkR,$,TUfFҥ& X|=AEy- W +k4A]lIȯFǘWt <ǀhˏ)~їn)}d"YXsb_xILٚwkh˹s) 5_("|MAu,Ŕw1꾮y΁ads:œ0{`MĎKKb:[uӽ3U&IIqt^A}v ^!` -\E*+xOju8`xj;{DxZiؤ>Ҫ-?CmG@oW-]*DVZ}J"'ƔJ>$",h&LZam!$ T&^Un&P|X 2$yEͳVkSݕ8jQ`Aca_IO[už Mms 1Bf/TM?m+ +EV?ܪ~-<,jV^V4k"l[gމUfXG18sɗDž?1%bAA}:<=5Pɯ#51mE5Z(/.JaUX%$'Ej΁찐vA>?T,F 0׮uSMsP[S;,ҕkwPTּ-Ȓ~=Y s'o +,s8ȃs窸zQF@Uu0;jV @l8xϑFG`W5joQ?kQ&cbX&U$ynb# |дjyaYet~Y)6deO4;*X)DDF.̎mnBmaUp8G _^3>i{";n9ruk9SIp%o?݅"mg( `źcGv*b Ѵp[͏sFTa1NHCB5+B!N?iq8@lqÝ_݂²]62u=Զ6w r91`e`HrUc-7c0#jC¶Ct|&VsCf ~G˂5tZETl/_@ψQCjV {5OԟŲ{PNyi*ҮcIz 9i%C\/?0l -,\IS)G0A,rBp%G m5_9B\ 1-I*qC;^%<4Bp,;QS|k~K_ȪzYg Qɢ@8bQ#zesz[Pͬ|XWq\n 5rh[Zq6R$g=)bi7b10ҶqDL%1{"#syћc E A+]1Ztu$}Ȃk7\MU^iXQ><~̙\;XtTn1J\o|ű+G>g}D aKaO?ɖz $tg]m_ EC =_|^]iYnijw{2ZM-RWwn z:kb, ZO~O'|AXK,U4 òZD&sF=uT4ӛ&Eqz@ ^ (Mhk`fs?~leQumsԺ8k)P~D 㰶e19Tb2/wZԘwt8,p`Q5 GlD GME9EP+X'GN7%|3_1b,k^ܲXv<(Ҫ\w _؆Kn_DZrf[6}E[d5q87+{3Kr]KW_B۵$ )Ri:+z #%Hk*˰! ‚n ;T݆ې$HHOq].id’ tn-2}Svb\&K{ZCYEQl#!iһIl-PߏJ,˜V 1XW7 j-N2R>!%K-ZEZ+V1B9FtzXR%Iphh=iA|GwIr/`ܥ-WiM`S$971*k4B3Gx2**'ٝdr4 me͌ŊhuBM0ښKvIךYmaf189\Q5I@vֲתGPJqlW_j[um[L_xXpTvPJrP #UK,NCUMY~_>屺uoZ{[9@; { (Bt!놸xkI`%*c8U=W)xj`ؓjh[.}:^DKo$K0 b;xl iB02@[$*H5>qYfQ H +)y^Oy_dT{%$7*{\ѕ[sTe:2Ӱо[. 9Pws)q(zcKb^ES+v"(ԨFvNNgF wbyh&' YP*5hnbq3? ӧh$A] >n yʞ_tᕉ9P(ur¡@; %\8I 9rb-Ek`AI֤^yJ#,tY'UbPWD +ތ+6{LJ ֘"H*~#t`!/m7_ }Bv rؘa\geJ01j\J,S l,js:*-꧚mx.TkK Vׂbi,kʪvҸb#ɅhRziKKCAͅ{c5\S=lY<;+AlS<րΑ_dO@IKkc&r$t^5<£m>}Ztk$6W|--]L/"+Apݝ̫mF2baI}Է0Ds'4Zl˪`-ȱ g,'$vR_ɣ4/V,gqB#G XYs!"X9j$؂a) $FĊ 2{i X# ղ%r2v /Hjh bzXthIi-(Ɔ/LOl; y|vDRCՐoDat'`1DKjhUg֑螀ŌH,{`]^A\I cQ!N G`A7g%+ی( ,Mk-{enfbdx|ءKc736EZ4SJ𞁅d䡚$'cKzN.{~'.P9\jrEOϪDwzbvpebuc"幫9ځFy ySU {[Mj%`͸щ}Jo;-BżkÈʧPkeVdbg{SJu8Ab=$![zހ䣙'J5׏gaBz= =:b v+I;,ӰmmFbHimYe <"P.Qt-zkTդ)8-WT}ҥ :ԐuHa\HY"F 4f"=Yq@?-5w\ e"F(Zv  *uD婾ftB=9Dxs8 r=|Y ME9_ʸW*a]?ij0͹Bm&Ua! du\^f+,eѡ )ʼnܺUQ4GU!8@nn<~- q-\)BX,-d&[1Z[LS}T ٺ.iz>)ŸCe[AhU3jQ&u +sX#Խuh˥cM < l,a /xJQdMOS ΎڅްPJ;wVHX EK/uWrG$ްJ nW2<lv4EQg}1ր~nRζUn9z;m-BbK!,F53e9ʵ8v=H=2:{Dyuj&7~)RUVB?o4;Am\mVR J7Œ)ໟ/ӼBPϱH64)!0Dafa c opT ; kƩJbroLJzh^p̞g1_3_*g}+3NB9bqXFGyhZ'nVν/NVqd<;NW5,.!Xb+Pz.OJSG*YXhˋdX?Hxv g ?G9¾@wYߗe3W݄hĨ%ю k}B&݅N by9Z:]1$☇pݥ9tN].Y](1RsqA`+6a"?z`wa! げ6t]X(=FľI^*[F`C@{XhHU$Z6D7FeZ":KY~\Dg%XºˎlB2Մr"U*~R`tS!\@e vI(]",e"LPa,tT/]E]-pT3;-ldXav2%*Z- 7H,3(,tBSb50u=YȆoƔM?GܜX &.ފ_->an72=WHMJش:AyhI?J҃W):}]buڦtBπ"%:[X,ɐXjYir-:ڳD7f1D|n{F{hucGw<,v)n@* LHQ~=0tr:i]ol7mlAl07KVX>huswzZ{:YO H*\BMQg*rl0XZe#RXP?O,r#a~110{[BSk1wC0)e ҾnʰJAJ_<Zu2K$,vǐL{\eS|{QiTR|GW w@ҪǦѢ;i;/ i|w=08am6ńy}{'~7]qX\ŒW_Etޯ^=i6k¥]Ef>apALr[z:#4}H _3+>\}GYx*'ܶ)Fp9q2W޾m5X8u5"v?qXH' HY_j]eyW4}T5ۃ )p%T Zx-['Pd/u\f~>'ð:m ?}WlI܆…;eqИEGuƧ^/P&,8/s !7,#!~-7,}OnX'|9 ~x{Kvaq_Pم%eocy,:LN4QyYXOV^ITva-V E?rEa1^p? B^u+- B_N`UQYp/!2 %i<g>EemR /A6k@*E5*X/HGwAKό :hFQ%u!_ʽ(76p9, )f'ތkC-^;y|a}+:t'bCACqX j.:yː"k;Pk;RZ="ĻQp]W^NٽRXr_}^ț{ZX;|׏=|]̞ݽ#)>hC>>y C4xGam*{UT 4܋Z 鸓=S8,@*x9 LO xpX G |0@xXa=cHa ,$~:y}f K,8Hؔ/8,.2Kάi$G-q'{V+<8la:dv,ha:@aJ~G PAh3  T sEpXR;|  T8,P%@%lB TA8,P$@%6J@e"apX8,\pX8,֮aIUtEXtcommentFile source: https://wiki.openstack.org/wiki/File:Mistral_direct_workflow.pngO%tEXtdate:create2014-09-24T17:27:30+00:00,'%tEXtdate:modify2014-09-24T17:27:30+00:00]wFtEXtsoftwareImageMagick 6.6.9-7 2014-03-06 Q16 http://www.imagemagick.orgӳtEXtThumb::Document::Pages1/tEXtThumb::Image::height485tEXtThumb::Image::Width343tEXtThumb::Mimetypeimage/png?VNtEXtThumb::MTime1411579650;tEXtThumb::Size29.5KBBTqHtEXtThumb::URIfile:///srv/mediawiki/images/5/5f/Mistral_direct_workflow.png-IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/img/reverse_workflow.png0000644000175000017500000003021700000000000026400 0ustar00coreycorey00000000000000PNG  IHDR^BT& 'iCCPiccxڝwTTϽwz0z.0. Qf Ml@DEHb!(`HPb0dFJ|yyǽgs{.$O./ 'z8WGбx0Y驾A@$/7z HeOOҬT_lN:K"N3"$F/JPrb[䥟}Qd[Sl1x{#bG\NoX3I[ql2$ 8xtrp/8 pCfq.Knjm͠{r28?.)ɩL^6g,qm"[Z[Z~Q7%" 3R`̊j[~: w!$E}kyhyRm333: }=#vʉe tqX)I)B>== <8Xȉ9yP:8p΍Lg kk Ѐ$t!0V87`ɀ2A. @JPA#h'@8 .: ``a!2D!UH 2 dA>P ECqB**Z:]B=h~L2  5pN:|ó@ QC !H,G6 H9R ]H/r Aw( Q(OTJCm@*QGQ-(j MF+ 6h/*t:].G7Зw7 Xa<1:L1s3bXyeb~19 vGĩp+5qy^ oó|= ?'Htv`Ba3BDxHxE$Չ"XAP44077&9$An0;T2421t.54ld+s;# V]=iY9FgM֚k&=%Ō:nc1gcbcfX.}lGv{c)LŖN퉛w/p+/<j$.$%&㒣OdxTԂԑ4i3|o~C:&S@L u[Uo3C3OfIgwdO|;W-wsz 17jl8c͉̈́3+{%lKWr[ $ llGmnacOkE&EEY׾2⫅;K,KhtiN=e²{^-_V^Oo§s]?TWީrjVQ=w}`嚢zԶiו8>k׍ E  [ly邟~_Y53rW򯎼^{7so}x>|쇊z>yzBybKGD̿ pHYs  #IDATxw\׎sc'O$8N]|#ǯ-ǯcVGF{_X:HD@B@ ] &.maaa{I3ۘQe`v<{~=@B" xI!xIxI: g$K2aʁ䒭o. VCP"i0kf 4_3 qkvIO iH&y@&TVph6DSD9jq*W\w0GeWwY+Ҩ9Zw݃I: ,wyUīZ|eH:I(RīL:BBXD.ʇ^~>!OFͦHEPE{0ċQ0i/ƅ_QJRT)7(pWvγ~dwQѷ ID"W@(dZ WNVVC[Iz_8J$:/XkTSjj+$^L"?9 0{=~wۆؑ.@W䕃J<?eQ_%O$^yxE0s H(KEٕ%~a(ɤ IgDِW2ӒNHs76$a\@ /O$%"cM #1W;Hja7O'Gf g˥@});rīl4ϯR^xя(*ilikLbxMzw޴ &>%;^fm_%sPf>u{6;ͬ3bcYhi[Ȕ O{L .74;P3&.[*-<qy7izXܗ87WM2߀m*ƀELA 5M/Ea̼o~7m>=0LiM w__ml(;YsqC'FOD sKߖhtWqމS놵C\|0Vyk% ‘ Pኳ Kv~o}jSʼ#\-JfEe+v#Wqp#S|2A]B̷[G҂91-條N* rQ/ I+:ēvIѱ65!5{Eg-^K)^;K b9g^: e J0\Y+79pkxV/Qz G=9<فN0b!PhHIo3"@W)mDzfT~붗h+g|ïȡhbd4P}YR͉E9e.i"X 3E6T›vrF3] 9\5NÌLjZ~[A ]yF\x7UjP(˖ZN'}'A d5Su:vo@Ӫ "lxKOMK~8N UmpcF,> /xTP^_7zkNL x'ߧP(?gbz[vtSk0/p|J[~_{ˊK%<E_Ļ!e8e+k?W|xx+vOly+gTlᒅͱ]S{B~={ a"N .,31ī!*^P [%7lxƌ0G??֭t=qL^'S*m^PsA'u2R≋UdvARd%H U7,]l<Rwuy"; :aR_}FJ־y_77^3h]OZD^Ѱ( /#QDT=Vxn܁cG\c, lOp9U=Աr)aʔ+V:ooYi6 =:.v#;zfS3,?j!.^c^N:K:ߡr73`wX!qf듍o?n:gB]mgmyr3{k_.>=(~C,;vbvz>]+0q&ca0f kX;A{SPe' ^qӘ?8 #.޹-~QnMr~֞Ee+GP^ y<]VQ/:}m,`)dX9^4}wz &^y[HJ,Lew8yz>S˒!‹a+-MeC%s& L k.w f;Ћ0Qsp[=eEeLDߐORj&0{+ Lh5,9V1>m3D,^DF܃LMr%\9$$^p+ UEZca HΌuM,ool<;pɼCYZdg:1z7hh3f̌};ɋZ#^ \nt}8q(>BMOԲNpM2@ ]Q Xvocxqq%*iãm(;^8zX?`X[&.IY;؊s6 b x[p̄U^&=ښn7.EvNj>Ns9Gߴ;+*vբ>1>O9t[q:tyufAf:w:[Aɶh%Á똽'g]CD4ފS Ƌ6b!x|wtx oEP^~0b,A^J[v2gx+E˸vY@=h'OZ}9xT pkt}[8̖\4wQc ȸJKt sz.$7 $,xBɨ\-UR-_Yw_^TayhKw s ǻzG$])^S}6-iJWD<~rzÿ|y\Q=pđ5zLW*w)T!Z-]ZIXl< B6}zܚ2q|oR`E87 ޹<-tggC睃+?G4fOR'I.IŶ_J<Y71's4AuCa=ҡ9Bwˬ;0L>}莋 Ϸϖ8zE$ږ<˰aF  t51ӫ'T&3ܚ'E}[ !4cCSc'.tV%x!h(,LhŇ0X& T>՜KQ.n1fdKKJz&:FMJHE2/X8l94VKMu\ wTGǴWb辮@6iZi^䂕 λSk*; _dء„/D Ӳh°7 ؍g탓+PŊF0]Rp}=*iEM^5|\{!}l]<܈TޕgyMe?RL*R,TG~)+h_%ǂwꚦ+s`de )ğ0 ѵ /꽥S@,Xb0ۥ`.'HPI0Y*W *a$v %r<_2hv j w8VBHhP(-R|KS I|orRElxer.Hu#/Q(-PJR$|լ8{WhL}ߡPsFEU-渚``ږj5'3)VpU庙ml) SF޶&DXgmM^yߝ(z]u9'ո߬}j!P-K֞  G+0z ^AWוK6GT+WJ}-c8{ֈ.x0dO7΋xhω^DؒM:`~n`t 8momu{ iFf[w9*cgGXX$[> VG/ht>RrP]9'^L| rPDt :tr}jmϝ9t*wE6a]f'!A[Dxs1a|G습.6gSL6:I#iL|-*h8qGIQL8/ Jx}~^O"ӟ?P5gs3m)%QxG;U(eq?A";ߣzp,Tjdr72N }$^,"eGnWrQ w$^Lyu]J%H)tI!x !BE$dWYuH,&4we$^hCUӯ9H:0gmt'ۆWkޚ:īL|([8Vww_c'+nHsD*kC{_s& НxWvޱbU,{AAk#Fg~ykWDf-4d$^|훅?}2 C ͂Ty% HxIxI$^RH$^/)$^/2;{eצN9UxQ z֮91q+wv+@52,Ll(Hriȗ2`Y"/*iՌm}/ \v߹+MG>4۾]:? +\/,j󢻜c4bwX =1n{fz$b>CnQT=E!]QԎߥܼlGm.z8-Sw]xR ] !hDӡ8ǜbD&W8$^x^mNj ֫ÖɫZ枵fN],co*Z^yB!B!B!B!B!B!*ήͳ]9")lKy:Uv¬ 2VRnҵ-Sc#Zbwv+~Vqwmo[uïiq@l2Soc 'vol7NҮK)韋Gvoukx}xJr, 7ߩ?oh㻋}/ }ٳƩ zϒrbU[t7[{iRJ9yr@*Ğ-OؽcrIc rӯ{6Sߜ)};Ifk)RJyddE"mepEtڿ]ҁ,{8][/Qm9f\abgMӿшrS=LsL޼앏4^)^KȻݪ8F'J)vU.rPJ)GVq6Ùt_J)iM>xw)˧J)ȱ;]G3[J[R x?Q} :hO`r WOTJ)GQ ( J)TD)ǐ{)|7hxIy^D J9rDͼXjU8g…cRSN9袋/Q(R* (D)DQA y1˿׮][,Z k֬)۷c|nݺko/\J9{۷rq|Ν[ous___Y &_{_R)^!|^lY 7Pqy_o 㔇8vDNW[V('N,?N(/bŅ^X~q6bp'jF(p[G//F (qW<ÇՕ"@Udɒ>򾧟~!aKRw?S˿MC &myܗD\G(DyHYNϜL+?R 2'"G΀p͙3$Ѥ\<7ٱcGY'*fpi]_|r6y .]R,鬘6@gh\LiCrs3;]\5ħ~b Q 38?3 nHY1 q`&J͛W>#C813oG5k.ԩ Dfp(BGcǀ#箮s&vM"Q1T(GX')TA "4΋$$f (*b3I7.]Lge2ꚯ FqD^Y &\Ut@Yqy!`"ҶiIQ(r1??Ę~#?D`3 3߱^STcKJC<88#Ž΋n1Јz)RMf P bHE4df=4"A̘-FTo:C&MQ6Ę]z饕ml`&G=^[Q|ACX"DQATbKֽ [a0rH]QLBftf1gQ4uLn[ҥ[CDSuڪ5DO&clUX*BAc$ތW `~W!HwdBG:16Ƅ@V>A$5}ҶP"6;ޓF5^%ʄЦώY5΋Bڠ{h:U(D_[aR!S1Ҧn4(΅{T%Vft̬|uwRw3[70@[/07s8s}ʴ/ ԙrHQ(r 18)DQdV2]TDv_^tcHfZRA W|/~6ʕ++˗}}}c:+A5\]jb b "͙3ذaC!Ļ[twwvlٲ1)~ _(G}dLjQMM7^De;F] mV>3LYѲMMqݺuQXB/WUi]vY9xMA|衇g&b4mSAlSA\zuiT7J_ei#lMA |G1 g?ӻD%b-c!ڦmTDK(BTDP * mSAT :BAڦ t(MQA4Qh h 6DѠ#Dm* AG(BTDP * mSAT :BAڦ t(MQA4Qh h 6DѠ#Dm* AG(BTDP * mSAT :BAڦ t`߾}ņ ~鹯zqw_׋~ضm[q1-> 㶹wަ+mVOo`7oVx4ߔe i&M*υYf3fwuueQml6A 'L0`_җJMۤo29^Sx, "3C[jUc&F[ R}k?, 3(?+BAhy "MzHyvrUiڵq_)/@O>Pqdq_n/9<*<*AtmT~_׳6Y6Hr_b)|_%p+  AR' 4A#9s攎3'6rNCSA$\\DZ .\%㬳*ϣ|?>G$x:O;#mb3aM.l3=\[RmMGH{R,ezpm>U3g<ā# 3q&Ig\5:87FRA 8I@1{HBgI9 3WAA 5tFɵ1 –c-M1lRJQ4zݺu&B4l<%\zF(@lZi8hp*ݻˀR5y:35xV."Cc ~<ꐦx> 0IpT6_A2Ұ|/b&b Z[cp~GyDy3Y~#27 e*A"f~!XZFPxꩧD{4ZC"䩛f<"Dr%kIi3sqt};\K׏әY.Sm Q 68L6?7vHb]Igy#ʛ+BAl k|p}3eip:'oU/35fA'Cl!1r|f `+2.| &X +C+bm0.֏z^*?fh DDLXQ*#QF1JuAGUiw j&M= boRd5gԩupl p<٧ m"{QF=. bl܉YhFFl5230ĒBBN7|Zh+ ":Fpzk#Ίs1fvԟgV60bD:PCZ 3P#(5zUf? ^y괙 28Ğ)ݢ1dua b"Oǀv q|gZpf` p\Ώ*醞4uڊ pSs#:)mj8v9E&.l;m Y4u.H2D&3Cꔾ{ K:(K7p>$Bys6t(BA=<èJ(o[H"/4ALLN[إGp({SĖG7S88lQ^%l 6D:` Bni&?f Dkرf}1_(C_g< Fo',>fnzE ק]0f=˟)g"3Ai4_xnwAjImobfAz}킵f=6h)* )D!DQA  QQAT((D Q(B(BA D Q Pm Q(BA6(D jBA PM Q(ڦP(DmS(BA )D Q Pm Q(BA6(D jBA PM Q(ڦPxG}T{TM1ڶ , jz8 {)F}QחϽ1 g.{=M>VXQƂ Tqǎ z"F66DiӦ.+FT#6E##b b FQoFSM1 ^4(BT46fDmSm* bL@RM1 * mSAT :BAڦ t(MQA4Qh h 6DѠ#Dm* AG(BTDP * mSAT :BAڦ t(MQA4Qh h 6DѠ#Dm* AG(BTDP * mSAT :BAڦ t(MQA4Qh h 6DѠ38ݻ_;cT˰f͚K_mݻwmk.bΜ9׿DA 1+Wgq!f̘Q̚5k'Ng*wuWqim[0aBYtuu7oެ X bƍŶmF?p1iҤkDmsms߾}=2H8+:,Q(5oUpVN+6l(YحZq)(փ`U#yWR F+m/_6zL[Nmb_zG+ w7S8 'xn  y$*l8,m8Fܹp"p2m%0u# bg FAwJ|{SA6E={ 8|/Y.q>裢*ڦh={W\qEi#> 9ٳgFh&aÆ6< )XbEi ,(}QAnڴJ)i*nd{G)nx衇FU MY.W\Y̝;+عsڜ7nX\s5IDXDMY7xccǎa7QA<$s (ۛO+TD) (RATR* RJQARJ *RJ)DQJ) J)T!AR W-q<7AR gW)w.T} B}n. VJ)G_=8C >gSo^q 8wim/~JL$T6}_26(oj  !B!B!B!B!B!_[oD !hk E?B!DQ! ! B ! B ! B ! B ! B ![՞?k =źOW3jυ?orycpZBAlA\?A?ZӒRAQ?s? ( q 9~sw&CE ! "ͬ8~^}Ԟs}M Zw\G:ٵg9wfMd9- "uz-9/%셵*Į^ڔ8Р~.^Ӓv~Z=ne`Rw?CAB(g7a̫vgs^MPڿ7\}y?no'L>&d=w7*AnGu9Tbska^?f)g%ǿ﫵%Y$;WAB(D\li5$xF؄5yƶZNggԂډ,kS׎-&{k٢ N~&-b_-:z z}7+kghoӓ/+ BA wA7+yd6E:~.8/fܝW*:CPŹ j]ߢ vtTM_[Ģ&)L bÂ:mq@AB(Ľu? ¹ nfw)ΝP)f}_dʌp Q>W[?AӦQ- b5orB }7+Tabm{P b XՀkU%ownQVVf?I!3y;c$ bD _ګ ";>I]fxKEg/]w|MAB(q[ ^ "s]cZ>~vy^̰HqjGC%I9fTEM[ĎZ[Ŭyw6kk&gծA gjgP b 5q@Ku.`Ϩxjߝ\⼟'HS, :w+ν㓝ot|3_{m$3jmV'd(PbmY@ֵk69uqf2Ú ́DBnwU')YӍ.ӒެIgR6#q{Z9 k+qz./ k'm !^|iluz=Y]{_M}6;PD|&A[Ĵ} Iy6&^=꼯^&Qgzo2F2 yjg!W53@mJ%i 1fgk۵YL6${k8^ΩaFvV׮{vI9kPo=ť{&SkӤj"@&SkZ+w[W-$c'u*W)NtN*xFmvV߳6Q)G!B[pB ! Bq ;B!ƍ ! B ! B ! B ! B ! B ! B ! B ! B !2,1vܫs}&^p|sex{Wľ.yӏs{1vs:?B8CB!D!BAB!D!BAB!D!BAB!D!BAB!D!BAB!D!BAB!D!BAB!D!BAB!D!BAB!D!BAB! \_18fBq㡚6wm&!:6a3 !81m"!킪G/yB "mq&m!ziӫl!5<>5*ܵZӯ{b*n]m?[fq?? ))ͧ5M o./e[Xϋ_tSR_v {暭ou\)v{vg~5}>?.ZSR/ݵ'ǵ vvBAqR5>8K]OIyd7 - AqRֈ Ձ))ϟD)D)'QJQJIARARRTԟD)D)'QJQJIARARRTԟD)D)'QJQJIARARRTԟD)D)'QJQJIARARRTԟD)D)'QJQJIARARRTmK)D)DRJQJQRARAԧTT))D)3۷رcGo(R{ҧWATG .,.!?bժU㮍N9o=N9+h ;vSGߎGyoOo}ĉy/.X4~~3O>bqQAT!7|s8zoo "#9sQ) xO?K"2҂dɒr0{_># Bs A|ˠ#ZQA<1D hoC} =Zti/B]~o/֌$͛7p===Pޗ^zi7}/X#8vl͚5sw]y晥z%sus&O\%~|= ^Xrw9̜>owùQʶ޹samDiȅ-[|n>}Ia׮];p /P9thڟ+m.zugLj4MrcghѢbϞ=G vuu;5CN:}VifϤYE||/~mB](3Zqv */Cʁų?X~az { "Ty1- -szPAyFRF7w}͹WӺlʐ)(R @cs0yAQip'B 3Fc4FpN__}"*W峹>5N Sqegy, 4HqT 0\w pH 6|NS#ݐ 19Y. N@#Pq(; I9ӑ+}K{FTiBʳh#8U.iʛםvly K*tc܆c1H 3^ڝvM8"BVmA xgʄPȵŰCzvp v3wRgb@`bs%b ~OHݨ vV/SVm/S7bϧ] +7v "L!}M{ͱTӺSFMxu|<3d4 bCittdc섎'Ni8{`C SD;A VaӺb8"ݐ [8Nύ1MIl3I31 2ӋAk04,ܟG@|}cb.LqsF45C>TL *S83 s c!)O;G^y pUv3k ;~.GuT(7v>H[6J37M9NitɳhC>D C([.ҍR1HTэq;qmİzt &E"_#S;N#rn+ <ʂG#<:,\i_; G=Rr>NI٪pa8xn7h5k#"ڜ8%ĠMgXA-6srU j bkv"ZIr09V1OUVӦR[L`we"HP4"AN{^nć1`9 !F8V pkʝfe%(K&ubfsgU3,9FuvrUF{RQ'cԋe 1=U_VOtNBmeL1ry~_HzKs!0F z1N4ꞏs 051,7DC`f''R1^FU~FA B`i|Ռ1`O#p[|*_׳xnb$ZKU/fd bK|hS-b 7ff[Zu' "1Y%96|AzOq)Qi@#XO/f`1R@G.|ϊ1#g߹CQHW3(wgEQaG<`Uc֜ήܰH9ŌO,-RݍvKoP`u]i Q&H2"zU”.5D~V{RfHw c12ifqq ,]Qog\NH:-B0t &y5*ALMyyfS.1i |W(/`*>y!0iZE\|@5(A4vӵ"QԴ{ B,APFvİsWXB% Ҿ/fi 3ىOPΰFf6Bdž4b34ʳ)s Nc"Ul36Ue'/!34DzgGŎb# aT.s6ͼ}>;}<Ӂr !'zb<3M6@,γc?H?1- Sf@"#T Jd#FdQMP8/ÿԿ꺪r0 eU~G!y6fiQ>S<3-3NEۅPο=/^ ZO4J_y))ο{/@g϶w.y~td@lɻşzs`$[:oYC?{ZG j7X6][/mO윿eP-cY&L7)ۉwm^3))ѧl>C!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B5f\at߲Hv䫶OI9}j1kÄ)][?{ˇ,lKvm~_Ч>5ZޟS z?[ŭkߑ-Qr֏pbISW]S|j&OBxzoKv|i׽TjwڑԙKԧJQָ)`D9:ЧdsVul1{2;?_X nCo=碻vٵyĮ[(ʟkIYu%.ԧ<2~eqEG S{8)kdSPXrIARARRTԟD)D)'QJQJIARARRTԟD)D)'QJQJIARARRTԟD)D)'QJQJIARARRTԟD)D)'QJQJIARARRTԟD)D)'QJѶRARA-TT))D)D}JJQJQRAlC]xp7(VZU{R|xcx4駟.֯_?&71]n]q뭷w}wcQ=2r)ō78O>dߝ#jU{%K |y$橧Z,\PA<̎Gα.hTwuu?1w)>+&L(NQUJ)'3όȳ{R H_*pwҥI'T0,8m &-˩ytqF,1BsB=˹}Ârooo4A֬YS~?~H*|Q^ <ʐ /XU,M#}vpΝp,PL7J/\BڄzU9lcdy饗ϡQݢ_⬌=ܶĴhH-rMUVYgձ4b#8-~g4{ҙUe: ^x6 "5j+//MqLF'o-ڕPF_|!T?q"`N<8N8c0}1 c}褉'&UD4iP@j~zq 7 ?3ߔ7/BRA$pOAO&'Lg=rS4`{fQG}@=)cԯ8 ƨ~b΢>P_~yV4s9s"~#@)#eQԯl;5c>s9vM-ZTdz!/[쐁uܣ*e "7o^i\ {gbMlNſL/nX7ԉxmGD 0aD $f L ǁ4# #ΈA癌389Ǩc1LZ: pD`bbLk E"P)2$İzKG38l'i\IqN9DbH lFogSĤi*Qߪgc})Qo%UeaDD"_Wca10q+5fbFh, # эt\x{hBH{} 4|I8Έ*%`u$?"ȕcg"x|#A$ L/kԭ^l|fǠ[I>flD0= u *9\fcV`]a|q=d>*Wć_x"1#6/0CH#q4Ub6D_-1ۘtT)wdQAMUbT rx3)aT파FTm|SNM܋2QzMF?=O Q\HY 8(ڛ`Eܿd#AonTU6=OU!h Z\ ?!Ml(01JyJ1|jGrj<#>^Uxvr '>QOLLlKĆ4޿M5ily87ϰ3bPuVq"ʨ ǁtO+(ý1z #8ThӵfAqA^h>ubvᾈ!xԔ^X/s±yVkP/6]  bI#?  flcq2vNY {k$Qnl-ͼ|$RwU'Ae4"fg(wl\bA3Aln")GAA΀&Q#7{.<ϏqĚ(A?= QAdxV0vE\)]ʄPNXcIl~Igci#4f3l<7/,KQAWMA=AvbWpUb#α| . t]+ȿi4r4RTHc̿܃z (-6]n1Ή{HYcH{3PHwm@yl2 NϽWb jz:-W+;$KRx cƓnbj:ewHUA ۷U9qM/bXM_anWGOcAx(_cERlES> aBdzxvq{ ĿlH{#em1$&i<`x91cL^77[3Aqm= 8aFZyplۄZarJs 4 ^bwXT3__I'$+/5s=k)pn(iG)Oo^+uǽA #V=RAl at)adD{1cr=Ĕ_GOKk0,?XT V#E@G<}7u-fl~)D)u`}JJQJXRAR֧Tԁ))D)u`}JJQJXRAR֧Tԁ))D)u`}JJQJXRAR֧Tԁ))D)u`}JJQJXRARVTԁD)D)u`QJQJ J J)D)D;NJQJQRARARn;N? SRutt]8)kߗSoJ͝ ջp|{͠[Ү<|zo2Cs޶ΡWlw8JA<-gtw{/K3>L[Ó.ҽysрwL/+LqaǤzX`S?yLL]qM 7t0H|;bOv?嵢O:68Z@j{ˏk]yM1G"p䮭t8~泦tm e{q7Mӧ>:B!B!B!B!B!B!B!B!B!B!B!B! ?آȌF`IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/index.rst0000644000175000017500000000023700000000000023351 0ustar00coreycorey00000000000000=================== Mistral Terminology =================== .. toctree:: :maxdepth: 3 workbooks workflows actions executions cron_triggers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/workbooks.rst0000644000175000017500000000477600000000000024276 0ustar00coreycorey00000000000000Workbooks ========= Using workbooks users can combine multiple entities of any type (workflows and actions) into one document and upload to Mistral service. When uploading a workbook, Mistral will parse it and save its workflows and actions as independent objects which will be accessible via their own API endpoints (/workflows and /actions). Once it's done the workbook comes out of the game. User can just start workflows and use references to workflows/actions as if they were uploaded without workbook in the first place. However, if need to modify these individual objects user can modify the same workbook definition and re-upload it to Mistral (or, of course, user can do it independently). **Namespacing** One thing that's worth noting is that when using a workbook Mistral uses its name as a prefix for generating final names of workflows and actions included into the workbook. To illustrate this principle let's take a look at the figure below: .. image:: img/workbook_namespacing.png :align: center So after a workbook has been uploaded its workflows and actions become independent objects but with slightly different names. YAML example ^^^^^^^^^^^^ :: --- version: '2.0' name: my_workbook description: My set of workflows and ad-hoc actions workflows: local_workflow1: type: direct tasks: task1: action: local_action str1='Hi' str2=' Mistral!' on-complete: - task2 task2: action: global_action local_workflow2: type: reverse tasks: task1: workflow: local_workflow1 task2: workflow: global_workflow param1='val1' param2='val2' requires: [task1] actions: local_action: input: - str1 - str2 base: std.echo output="<% $.str1 %><% $.str2 %>" **NOTE:** Even though names of objects inside workbooks change upon uploading Mistral allows referencing between those objects using local names declared in the original workbook. **Attributes** * **name** - Workbook name. **Required.** * **description** - Workbook description. *Optional*. * **tags** - String with arbitrary comma-separated values. *Optional*. * **workflows** - Dictionary containing workflow definitions. *Optional*. * **actions** - Dictionary containing ad-hoc action definitions. *Optional*. For more details about Mistral Workflow Language itself, please see :doc:`Mistral Workflow Language specification ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/terminology/workflows.rst0000644000175000017500000001157300000000000024304 0ustar00coreycorey00000000000000Mistral Workflows ================= Workflow is the main building block of Mistral Workflow Language, the reason why the project exists. Workflow represents a process that can be described in a various number of ways and that can do some job interesting to the end user. Each workflow consists of tasks (at least one) describing what exact steps should be made during workflow execution. YAML example ^^^^^^^^^^^^ :: --- version: '2.0' create_vm:   description: Simple workflow sample   type: direct   input: # Input parameter declarations     - vm_name     - image_ref     - flavor_ref   output: # Output definition     vm_id: <% $.vm_id %>   tasks:     create_server:       action: nova.servers_create name=<% $.vm_name %> image=<% $.image_ref %> flavor=<% $.flavor_ref %>       publish:         vm_id: <% task().result.id %>       on-success:         - wait_for_instance     wait_for_instance:       action: nova.servers_find id=<% $.vm_id %> status='ACTIVE'       retry:         delay: 5         count: 15 Workflow types ^^^^^^^^^^^^^^ Mistral Workflow Language v2 introduces different workflow types and the structure of each workflow type varies according to its semantics. Currently, Mistral provides two workflow types: - `Direct workflow <#direct-workflow>`__ - `Reverse workflow <#reverse-workflow>`__ See corresponding sections for details. Direct workflow ^^^^^^^^^^^^^^^ Direct workflow consists of tasks combined in a graph where every next task starts after another one depending on produced result. So direct workflow has a notion of transition. Direct workflow is considered to be completed if there aren't any transitions left that could be used to jump to next tasks. .. image:: img/direct_workflow.png YAML example of direct workflow ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :: --- version: '2.0' create_vm_and_send_email:   type: direct   input:     - vm_name     - image_id     - flavor_id   output:     result: <% $.vm_id %>   tasks:     create_vm:       action: nova.servers_create name=<% $.vm_name %> image=<% $.image_id %> flavor=<% $.flavor_id %>       publish:         vm_id: <% task().result.id %>       on-error:         - send_error_email       on-success:         - send_success_email     send_error_email:       action: send_email to='admin@mysite.org' body='Failed to create a VM'       on-complete:         - fail     send_success_email:       action: send_email to='admin@mysite.org' body='Vm is successfully created and its id is <% $.vm_id %>' Reverse workflow ^^^^^^^^^^^^^^^^ In reverse workflow all relationships in workflow task graph are dependencies. In order to run this type of workflow we need to specify a task that needs to be completed, it can be conventionally called 'target task'. When Mistral Engine starts a workflow it recursively identifies all the dependencies that need to be completed first. .. image:: img/reverse_workflow.png The figure explains how reverse workflow works. In the example, task **T1** is chosen a target task. So when the workflow starts Mistral will run only tasks **T7**, **T8**, **T5**, **T6**, **T2** and **T1** in the specified order (starting from tasks that have no dependencies). Tasks **T3** and **T4** won't be a part of this workflow because there's no route in the directed graph from **T1** to **T3** or **T4**. YAML example of reverse workflow ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :: --- version: '2.0' create_vm_and_send_email:   type: reverse   input:     - vm_name     - image_id     - flavor_id   output:     result: <% $.vm_id %>   tasks:     create_vm:       action: nova.servers_create name=<% $.vm_name %> image=<% $.image_id %> flavor=<% $.flavor_id %>       publish:         vm_id: <% task().result.id %>     search_for_ip:       action: nova.floating_ips_findall instance_id=null       publish:         vm_ip: <% task().result[0].ip %>     associate_ip:       action: nova.servers_add_floating_ip server=<% $.vm_id %> address=<% $.vm_ip %>       requires: [search_for_ip]     send_email:       action: send_email to='admin@mysite.org' body='Vm is created and id <% $.vm_id %> and ip address <% $.vm_ip %>'       requires: [create_vm, associate_ip] For more details about Mistral Workflow Language itself, please see :doc:`Mistral Workflow Language specification ` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.097567 mistral-10.0.0.0b3/doc/source/user/use_cases/0000755000175000017500000000000000000000000021110 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.097567 mistral-10.0.0.0b3/doc/source/user/use_cases/img/0000755000175000017500000000000000000000000021664 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/use_cases/img/long_running_business_process.png0000644000175000017500000011516700000000000030555 0ustar00coreycorey00000000000000PNG  IHDRW( AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs  iTXtXML:com.adobe.xmp 1 2 1 9tj@IDATxTUHESXښba->CC[|@Ԅv |v }wzfvgf|{瞿{s31 LӀi4`0 LӀi4`0 LӀi4`0 LӀi4`0 LӀi4`0 LӀi4`0 LӀi O5_r yk; V֏.{9}R(1+4` }9[HTS5kO<+?錒VN>U~/x@lg&7Gn3w1bf5kGuFnL-8G193GL;##٭! Z*œ1-p~ܤO~5nWR A[-"&2^,/;tq8Kԕ|V)nAaqM+v4`0 Ln:`O/qS#`49?Oo_ʍmI0)!/K`{ d.;WH;^->)DH L4(#iw.!y`{ҝ/+[%rA~fMLӀi4P(<'ζ[n3y_ y?߽Co?oo<~lxr.`ؑbʉxYvkvq:׋ht0ȅNAx&qu$﹄B+D2W,]-|P`Ӏi4`0 >n{ݱ2m:|7lwucey<:߻׳<s>ҦwΝ7f!%edMesD<pC傉i 3N'?D#?l 4:mЬzi4`0 :zSz_ꍾmSyMwwyڟuµٺ~#Awsκ.yb^$ J8!&Ew#]܀wN{eYӀi4`ȡ=_aߛ;{7atqyL0mϼ׎O%q^a#|p+!7"*t$׽:傗0a)i` IMVE0 Llio׫ʺSil]V bЃ͝S;Eqc EP8  JQ:.q[l:  3Ӏi4`0 };u;KSw"^ά<+;^|ey{}'Ā_V7دanֲH7;@q (E<+x!CP wqbbW9qGh&iǮL@4phFy?{}ʱ{g[u~,^;Czq~vx@׻n7᧽ :>$Q`W`Z-. *MӀi ؿ'1?^=ۧ?<{fQx0LF];?g?EUA v%*w z @uh)L@5Pi#Eo^8I7msz{߯aJiQ윻&&g4i^ͭ#]]r3Uw{X*?OO[ޕ7ߛ-ӀkMrMǎ}—On3iH4pg^<_|N?a-^|rV=& Aar=._`0xi)o mm(X ZoS_u-ˋ ~:q@':31.yW]dҷ1bg+cZ'_7yxc20`ڶo}˟ +<01 J| Uwpq7oq;o;A|O}2u Pfu:oV%'^uHc^U}F+C?Yk󼯞wqSd|ا:R+e z&$33zz{e>夯dilZMn VK{?^ʟ|]~, ?}wF^m=&P^&hڡ;tviȀIڍxz<;R' _'֏K@\ۥ2 Cԗ]O#هi 9 $?H.Vݷm{Q >xZ_\Ss/?܋?_~⹁민J/~Toqޯ~19W^0XZD_lo}c}دnrj%?M2~+^ fvδ|~>k }y.AS sR;nKV/hNyڋ?~kY}O@ } mطOysifU~\y{2'\.rvVskʳ^L^+~uo$| 2殛+{?_ӤQ^8g^Zi q@Qw?ڴZߢ>+ ySƿ rx.b0x~Y^[{mYbwd䀽D'ߑfI7kGY"|=mӝhI 5+;}iώކڼLIkĨ[4鈉F`oٳ㺞4i*9Lk` +; EAM5xi{AG8λN>'~wVN>GtMtEGՠx?h8m8j<پM+`tLܥ&<]IeDU%>eMҠZ&U7#O͙qY]?^og'.gqj&W>=Io%'`׌6 `k<-mWv Cw{ uA~ +xz>fq>(7/{W]'T|o&3_=?}ә$}}=9[/ӟ;1[?%"&@$_ԉpDapHW ߱J7L$G~+w~xwVQvV[<1wxtw~&=-[8ۨ+s|i?۪eޒ?+v)?5[~M#k?N:k1C 4&tPc[e?vX<,6y~1ɘ~[xލGУ[]zM]n<۵b70ɉDgjyUv'a;ߣ5?.%+c~vu1I)J3/\=FKo{}]*Q%Z8Ӏi4Ш 7/ԉ[~czPq\;ݦP{]v?_KKVz?}?yc5N Mw~|T y~N*Ӏi4`HZ*ゟDW !g vx&  M4`0 Ҧvh'6HWb Il;5 LNv^!yѸ*Il mӀi4`0 lPwf0 L8( ?T|it5`t4P_MOTӀi`_ XW'c( `&yeg ,iH4oˋ~;7A=]_'S Ҡr~R8~Gǿ[{G'??2ة:GkX;euhHk?ҭmK4PZbr2‡L~7w\l)u n)kT&(Ұo{2珃jn_c??WM 6[ _PC?S~4 ԾdBfkKx=RY+K_Scьa?s-;ݧwG0 Ϣ?;wї[om;p4++.}Bƞ_HxD'Y>xW/=5mzǝ|?8s2Ƌܞ?='-w8ءѨ|^/Ø4Oh}[ȧeeɒXulX`ik_qh<,F =+J^r }?*N?>‹N+wV~Gw^$\t__{qNi?-5VH?n0`L\$!%,$qS P-^-M&xdUʘ~k\0Yʮw[ ydvx70ɉDgjy*' d V}L'~1qr~v[EK8JʗgJ La-! 갨U<YSfrOWqU1V|W3P2Z8T_ {KXEMh_J: DGqe9{'T\"2XȵJG ' />S4w!PұݿPTA ' Vr%Ư1:q1Ql._ǻPy=(V[L@5N0FgCf+ۂ?@I+xB*Ip1NAq1A0Ms6)ı(Ӏi4bmf1mNU϶v DZTe:\#F0ɢlEe[VӀi `uO.|m(KE:%G 2M v 0ɢlEe[VӀi `UNl JOnxNk&-@K܎z]lD N;Mh4`0 BD` F V N[>^>mXVAXr<GĹ?a9HISa,Ӏi4W]tX Kv 7Oi='r!n/1HψE<"=3ebE&yȓa0 Lh-_NqX}FsxB:1GL 4V0<X,rMe2w*i]wa S._Wqwu_DkU$w2Q:V;煁vM,0y=$e`\21 4Ps/C)V"&@vLb\tQ#!X}8XUm?Q` *i4P_:N(H0 bƨ7+& CU=&7"' !Ӏi `IBkYr5+ULj-<haҎc%s&~ 8C I@IUk gs.K1 3*du1 &{ &Lbq]2BĨj&E2a`3) W &szl65l6`\ }T(gZh[n 2.Va|']kƱH :6C av_&D1 ͈+8R6: QMN0(ȱSB]UEQI4QIoL&间V!b-NL0C,7ܙ1 +ut/i2IoRq[/n#A LFA 4$ bµwUWudNa%EЎ &OXZ CQJ1;81>0ȅ1 H"n'BJz0W0ƚ0@.UĒQ Fz!nNk)`gB(mL5ioMHܐbBCUimr,8G7>("u$}`QNaʢg`rOB@#(B L=̔f[nQÀ{ܙf (5T`*U8mZ08&\ 11iXHrt_Dܴx+]jL-20i 4K'(># :3f:In4}`oBX7%'ñ뤇##+RI5.1LV7i\$7cY`a]J01 8 X9B#x~Mv~٬[sy^ٮ߀"E]cЫ+)HUPJg Y\/k}`A3Q LGĒ֎7bi Fn8*Y|\Svde".V3(#)€4wBD'RNz*>$]46Kh'ĄXY.L 02{Bq)S + V [p=A ?02 @8]}0yJ- TIb`$oxZ>0}9asX%M[ֈ0cU7QR =apJFȒI;aPy,`'X*6)L G6?f7}aҘЮH9לvZAk :(}u2!a@&Ɠ)`g-mFŋh4!|J؟I7E>Rz# YyMH%&?%TKS+y`{DX3R`90O=ݢr1`gKL@[o| >D'nsXG c".}m_( =q_?WWѦk;:rbyI.Vye`͖LPN(Dh8eM+_jϤ1谀%*DsB`%BKTɄs364@'!iU= txʳB F `0̵~O6w}Z$ V-dJӵBٮw2uC$`hI3n gBbo-*!% &yRrcULt^yR1D*[ dsSZ0 h $1e" cc8RH/B0![k@G425Z`3aXu0k鞰BW=ZJgD>]; )` DXKAɑ(:26/I= OQaP* e, E\:N<#,2 2wUh$HĞI'7+ cm!,ִ?&?H;,%RTb++L:ϴ<$m(b;2E]`\΢Q0PdүCRGL:9]Up.L kE 9p|nԡJwM"-LahUkJ|-Xpfv \tĢ U`eW(Yaw&l" ?Dߜ1km wsǺF_@hLHBoh`w4`hAtQP L uTc/1038ƒ<cĂyPj!Na=\3^I=4:51 VleLcq/4U#, 6o3P֋P=:PfS<4kc8E/HKD6IJ0E&@5GyEMR UQ"I7#$+TIɦcAY31t1Hvd/Uk4`F&x1h1 bxr>tMc ad"x&$dcE[# NBrsҨaQ/0VVt ϶3rfM(=Gr$]t;wzWgٔZeFYtl tT7't:jD5w\yiIhk* J'+Em$"Gy"&'+ XdaJNKӕcH0<+^ʄ4ʸk*1_2eX!z!PNHIDUџl9F Fk9Y0@}IPd2b!L`ԇO9.б0iYZ֑0 40 E  kDg)PyZ6UT*A]2ȷ{<DpK/(W^O V&L &dF7IB3$HSm c}O f/Kl Kn`ߡbV<݀}dJ!%'^ ld[!𕺌Ln k+:}jhF%q)Ta?4`Iת-#3i&]V_."9E6ۧێ9U401 dRUJ|dRM*PϤwkL.)ך6Ǥ{Eۻ"MBFBس$gA[JU<;HY }ъcUx]im1f-飵y2A[7/ -qAg/R}"WګS o?i`vlPv\UdB)Q G DH+0 >xBkŅSETtRDvH*VbIUBضLqJ/lp~',WN[T1ET5D*FqGJ)X$@k ULv0`Nj`܉3xwMyk$/EaP`0H cp:VG_WE-R"Џ  'S.d]I2Q%  BA*-LhB ܧ>o{n4@׸ϐ.X.\ o;툶1[EPB:M[);|hN(k)@ك J Ed@׈=2)tSeJ.[3XJw`pٕreu0 r "'G 8T ?!Z/_Pv1G, ~lIEǧȃIb@+ݳS [L$U`pm˕#B=w* =41W<)0R#MS-:Z8 IIoSHX,hA9`hB??O\6Bg&* ז e'A\-ϫI^JDYڦ@Wnh(e;`q#xa欄D`0a@#0:-IHC+0fHE@`>:/5Z8*rGǐ@wadD#tt8^ܯ2d^Cqa/Жh[!IXw{'W+g> 5h #؞Ic@ᄶ"r5uXDpÝQ.wl<ϜkMKE0tvJc9 7HT Nub7)P"Rح‘g;"N0t*Sru'-:Ub$bI<{ ^`ҁo SĈW3IÁlue 0E*K2UqBAhK+}W'1y 0IݒVFHc ^H!DiIWHe<1# A"Ek  D7e΂N{'0IR]DExRK=:AwK 6FKt\V1vKaqT^&06JgE&L5VgGҨR0rRYh3ݏSX+: rݍ\uLo ?\ tՒ(K^^NBr4ft nCQT @N5@'%˜8SD7a[=-}9 bۋcWE01OaQ+j1_00Vuperܷ*!AW'v i.e!’ 'gJC(Gm3CF~ 6+$" 8QcIHu -w:O'C}q]]; SV/r ;Mu}S*=zJ^tV:b3Rv'\Jp3qrğxqZ$,;:r;xopF!r^c!z3$L@EY''bq U 6P4l0ud*I7qT\@`Վn<|6r!Ɉ@HOZA!AuyșBz{d 2N$ Y/i2^-:K!I3fpq\^qY3EhNw.;UahMLjc'񃁐"c[j?_D'=R1XnmRrer-fܸUn!O)",ȗx)Bw=rҮ:\tgVz31 @.T.{``f N5, '=䠭!s7`33h>K8t %=QNNn ˻^> 90Ax##M\ڟZ9VD.֑L}WDMEHPlIH1AYmҜ^kcPc0̶1*SλQvj@C-[nd ƃaϹYpiəawJp⒖ $UYlveR-0KDJqS"7GGYkNBrNt$:^"nTvڋl yw'1Wdq/M 3T-/.HT 4@gW$BEi rlo %#n%=brq8BE:d% ?FNH>) 3"(@'!9\p;u ,"Bb- 0,tqvv` <#X\c T٘Ab$4^ 8:SLMt+e.5ʃ@ k۬U6E;+XwaQ+pr1$q 4[&clnaB=)ԟv4fԁsA~` q}2rE.daxpo8ԋ'u!6P=:=>j <)^$h;Nȋ~? =#*m<8&f5 ;<*#QLM ll/M@JGG*^X&1[tkj3OL824ʽ@pF.J2B[Z'+@/\="Gw΄`X!h?ן; ?܅,Zn& QF >ոQ@IDATd LX !G<ډk UrCPhCKw'};,aCjG4 OSt]n6d*'BE&R1_W`2E`d3,vg1Bygzɍ{(vAW ɏk&ViMb?2+ 4qbĿB~gb\+tkT#SG&nx[+83B`' ƃC~;I;^oZoz(6D@(W5Õ0N6Rg]=Xv:j# h25 +V"'a95:0݄ZHT)=bI:KsILz&B x@Īo!֩ ʜo;Qt`%lTXWw1JNGm+S&{l-nYYYW' ۍ3<&'od}!m+:qwRL_Bugv/zMּ*'X*b7krUPY0JحvrU7ju^8~Q5$ː վJ/x,L8 <' Yg+QX4``NTV"o` [ G54[Z> kvp8;tIa=es]8a?k =34P`PӜ~|Vlb+02P#NBTX!U}*Bg9XҦӀi 5"deL#sS`ֈ+w&$Fd"K4G?>56 _! ۖk[aӶhիVaRx`ܵɅWRItw0-Z"Xx`\ @L Bɖ VFت'MZLPd*^k#vD$;l񟘝,-@`vT&b3kt﫬6MOLCWU(+P&?Y΁Gq&40DmBy v<ʣJ&QJi)OT]ef?3R'fl":0-/i_ %93ƅ"RΪ3!_`W^R4UD <뫎9LP!(b)se[XЉT3 ^ 5SV4} Kb ?-9n1vX3I b&dLSCOJ[KHUWVNْ,Gfŀ먎ּӫ 9'&FU"&*FQ ֞8 V(RpF3 887i4 M/ԁ>V{-wIE/qA;.բF/W]rƒ]a<|Σwn8ƖdP@ah۲ŀ)bscuէ !)3}I?/!d"DePgE %R,]) n֚ȊE_1Ml_YE):9]s4_t+TkaxS(cc߸\V,UUD]s6IE9zܽp #d:,y LQ=be4. =.ek"Z@,ƹfy>l.,F' jAf/0絺V;,H /!#,r%x %8@Ϲ2."L7a2(ǒ+ (1]; X).F`qD:Ɵ 3p/ī!Q%jy<9GX!񜚰{ܬ 0h n89ʖb-[\O& {Duj>\-iJ}El$ _Xj T ( ʬ 1᝸~D8,ypdpv9M8wx$~qqᜯKMxv DI4/E0(sR,?veg_RLej7^)VF7k:w:Q8X-Խŕ!ērvf,|x!d&->^Q'aD2Ys:uNT ˶#:}ibk Wy] ǸC,K=ɤRlk&h S 3/al#aqX( 6qtqKc4O]rc Gvy#8=QFX\*Byz780Q-ȣA b9U(t~4S tk&]2 d[lg';Bq-߽:,rAm$& +ƔQ;ܟcL#) ~,~S%8$7G !¬w5rV#u"Xƙ xQ#& CQ[Ԋe}+Ӏi 5 tQ4ƒubS`01g}gcobY+h +LM'gƍ7[R-i,A'΢Bn@f0A&\ W V :*93(vi r/1U#Dab0 X!Q11 v1,U1L=Kn6(,B>G%0 $,kF-ʃ@\COnTNBYĒnl/N +#:Qh@p.q@W ōbi4_tg2ÂRƯb!+bq*=nCdf('.gg\EPQt[EHV̢ Vl]7=W&-c" F~ /kEDKh/LLls e/-JE0 ,/fxu.$35IZO fdv9| +Qr# nf +!>A٪*p3vHs!i'?YR+)#רg(UCW@<,K"nP8PNr35I KȹQP Ԩg|3JeyckpsCICËA^o&Mpc,O0,ׂ(w]У݋KVc`#ZH,ܢ%4J!LnK藱H\y&)h S/PЀ0><61 LhC`x$Ng7{ݨu:on-r͙ VvӀi ^ 牰:_)![:ITCcL)n&@12)n0  B3{GE0 +:1W^Q,/o4.JP<1wz4`R1  CZ(2+N #R[nMB k3 IAO^<{I Y~?fQ~a#'@EXtNhg7Oܰ ='.;w4'!]Ep_^mX]OIxo?_;/y|'oޟ]~Ōל~I3s?3ޗOf[SϘY( |ȾDCzqN=Mb+N&2p7q/w,ƉgD7?i,=0~WɂK-AiA>|'\(\q(JA &@h7sqC^.~q];*o0 oyL;S7}(9]NR]ɏ^1K$v=E0Yj/ t\(zT)sǹtpjA2FzU %rcJqq39FeNʅ #S.F261 hO~^JrF|ޣ^B" t #ѸjD4# -ҮƈYC#O6Ϲ8oy7޳d ق: +qEOAS+!WN_|R-`+8,l<4]&u?6i+6{ ~^>+k0´@'cfӵ"L[WX+n-3+|s\xLQq:>ܛE"HމSaLq4>gxq>ۿmwxg fO*u yM۩<ou?kad+^_7O" xwG$d?1?ݾϤO>X)>` !(%4"Z*dv?t-]V/R +ZV>R'F ңli8KeN.kE61 hS N;K?i= `߽徯O^}ٛ𓻽c7P5M,jۑ]?wzϮک7aX%[*JEsT|7|g^tN <,.N*/0h," \v=fBA~OX"ȷQOPy@;k~?_/N[OwJDcC1GuYpa xʏK`Lr@oo2~  VhIb!քt M EF,RY:Kc!<Ɩ-`߭Wo[ʊo/Tu9xEN ּ7_zǀկ-vDNa)!Ae\svkE  .b}W-r6yӱ+K*ODxl0w^{t5w~5?Gm \?GYQzkތɏ%XaDX8q`5jhP1 <d_煘#Fp' \[gr? t?+}~)p˂q"A^V&L: 'GH i#F<>:8eW6j`t2:ĥN:i4UY0p9k}5׻+v\.qIsD+)=:=4.l4ۅ'm&%!mJVh t 1]`g fu9sUn"VQ'٢Bd^ssqS'4h&iPb%VyW f ԬSI &ic0&D i$.UqMXoYŪab0 n" JD@J0)l _/WEbT$8 @Iu}-$ĪCqx|pԢ#`( ])]A R" ȅ5&@l bx̾~sf֎/=%En0XR@GݮL*lq@du(pDioyQ"ŵ `t6:^ҩK # +`W !h\SlmrED^IKQ RqO`?E4@j[PØ8q]D X(`%?'= `O'h2w,ҰV *!hC@"S@I#-ZLE~E#/UCb0g: 6ӔqȤdX+0 P\>*``lM yxxx$aR%׈qCoV x]QWWOG%VXN?|<mq#:_S`wD {o~l' +`.U`xXO# 3A+[NN|7frSޏR>7oH&"#b$1a\5D[ܻD}7Ď d~1?I40>>{EN +`ơ"aqhtL j8 Ű"?_!2:[^ǚV`88[!<ɉ]\('ug܂}1D0FT!Я\s:GYEkhtşݴ{f! 2GhMpuV Pc1W;Ygt~܃1Tǃ=bت1lVA*G,EGFw(Yֺvx8rxXFqWfL,5ePJf"!ZLIGz16"펙 -j 1% Ԯ/1ٲ8M"j{`ֺ3&Xs"ȉY"L3I,PF{gSĉ2aҭ1\{E P) S # pֈ "q"a@ưX4O=b2 y?v-X9:\)AA\^ IIGXEn:`WK/ƞa**0b#[;>iv6K"t!OwFyxIv䵂2@6"/ # ȍ#.ٶ+m]9hC@"*`RMZÑϊ|8?&@Ű'/FqscWg? ۛ~!5Ѩ Oq"QXv)k8;(?a*)Mż_ ? 9*rTD$\#/W<.bҐt#!cÆD0@+CX>xpȑ} pW,֣=c"/$!w"N< qZ\!F*}/6:T)rvPخ,kvΆHEdBNSD XQ*>`, ݭ;=b|=v:Y!pBmt]uT>T`Y, X+VabC wJ,>`V XNP F o(aƟ?8C3AZz#~6u%B1~܋?'f$(/VN"@Ng|+Ć@D7= ,R5a5Aqaڮ@^5ԺAEqUd*ȉcYIb"0\9!V ν[oF/D>ɔ^Gvū.: ϫ$ر3v8 8rrI|}4s0R9C[X 3ltv?R`RE&Aqh?֩ˌzˢ#9,ΛD\v+٥byX)sX`fLR8'HxٴaXH nS@Q.x{ij"!Vq|2U' P2¢ bS"PW0HaO$=-RN%`(0~,(2o( iv"_,VkȪgL b#00N`+i{j1q܂/0`ǿDN>|\X98.`H[bO2^CE+b<V`B 纪wL# +^N*2ĞC\#YC/I׈sbQD TDFnbww` Pvv,`2Px"`|]uk>vaX8xq>;ڏ0S̉00,HO%5ƙQq9Ѱ@N->,2١1VX[4vϋ0`1LYșPٝ#1 +HI)ÚȿQ.ȉE<:u>fTTcxbN4@`cr<+2xwYѰA06!=K8ŕ)7Y_闋+V{Hp@B@.8r CT(E엛 +JfU{E,@DgX,,$gX+P]2W!4FaR2dPdE^t2V` T2xdC`ںQ(.4XV X+\0zbs1£&y +櫚 GTikV ?xd  T^PY' _~ޤ6V} h֮I iչR4@- `8B X*! sr oX(bP")dz7nzZ*#!|ͣ]UE,K_F] VjƏO͉Ǒ̷&9oy: WR`RۆhT G|ݦo|Y|Ω+t"'|z +PU& b8[Īc|ݨ5+^+`@(hQ0kȌ7N(#AN?(c1.k)n5[K X+fXu A='jlۡ ` )5YWd_aV I6X4ŽZNi`oH:S OZC^ʚUF+iX+0zxD[\S!pTaaJW ٵ!  Wуb=nͪ;*NV*X/"-) Xf+Lxl"?GQEOfY;c`gxB4@+l`2y,h&aȋqC$\yP/:ݚN?:xFn\ X ~aq[gXvZ0ꞻsV`t p.;`V`˟IGm+ls pΉՓ~՞o+|hP!]NiFh}7,؍,95#R Ғr\7oG [_jQB%5@SO#ը| FhKHK?mcn/˔(b<0-YT; lP׊8+A.V}OR{$KeJ4jԣ7_ Uw#M'DzqƤ =QVy;="u>qx@;E *~@>>EHA1I"yuLDuш_*>."qbemKjcnP.sλ7M j5KKS7I|R,R}ɬV*$V sL듯x"c"#'tp1 ] `"bRŠE bQd$t0" 4;0”zD\}a-?Ȋ%ed$2~b<8X,bH#Vꃢ/;$> __$Ņ' Ik/uM5)'I'ŀLir[iavSDa2SХɧl*o|˕ zޟΖu{Vd8zLٸ?e}apS32DN|vVߖga sʾYj6Kۘ+!/5CNoV)yfq?"`Tk+984W(bh\SiLL4YA DV<%~V|HL^hS#ЖAmtoy|m{5̤ty[?|Ts⮨PN"#ODvL4E>g'˾?ÅIuL$||eh D~N#fEv@"q{Ev'sӣ"u'R4 8-Q%7E,{([U@ *X2_lA٭.r*(I>Lesu # U.\L="Oi.aS`UW^qW~/dӞ9SΜ|y09s?sZ"a?׾p=ԷOK?<9صiP5✈a6Z~O<2ڂ ?wG.~^IRDd gqۏg5D11 YvkF()WݭTzi?S/Jst9͛<#ծ('>v gcbhʼnk {U`4TF)7n#@`͆loTi[n̦%}}[ud"x}z`jf2| ϻ5}D_+LVi Ll$p|(// Dnp^P'^ukc@C1_"t)ʻi|Gd _x-DΟ :W2V5fM"_}nˑ_N ]w%zaW+~OTn@kDV4@7+ O?}jrOT vZd uw%[+`.(POQo^%1.3zc-,Ye]* ]^|HNmrs*/n5p7@Tg t Evv_6+`=8w?$n"aҮj`06?vL|T]֥ j msl֐AlRJ{^[EG2}c7eb/J _\}Cth9\*xְP {^fV)9nZkD'몔'HP ]K3+V{t]KntHΐEwlz;m[?#>k?=+owJO|ˁye-Mt,2HqPnM +PEZ/2pZȵb|TH\) b7'+$H|=:8*EiOadE#,b(cbQzI)fHL Ebb9.QMχܙQ8&7W "Vb1^70Xvv7auMqPpğd~%?uu[qE_JW-ox R\}_ 0x(۰Ulq IDATQXNfxBdNe`R7q?KQ*`'2O1Q@v =b2dBⳈ bI$_I ^P 1ccpďgDT<%/!&uOq׉Ow}_L Hϋy\d~T䝇 (}J!nI7UONqWD}Pa}@ƧEʥxH\%сoi0c)ko'~덧<0a/|fႏy/g|jW} so/ȯSuf?5e~mgq.ٱUy奨2W̌¼hzN'P։O̩!v8E|Fd 0OAQEMI"m%-$m_UyE>sDf6RXɿDr[_.! P2 U% xҐsoʝu>ŐAݠ({e`QNP׈z'R(o^6hL*ܯN 0~:WC0@3(\x즳<1`H~v%+E1^qX Ï ]IpO[p$~ZqxH{Db%"ʽA\Dk/GMfMe~nŒd^ï=g/oq\|Ad +JU?+q k̏/ĿC[^q8@"ѝ O{,[?"b(0ů)Xs;tnGu1_4R  17m pmuHȃ"0h}C;Tq(|O܌"}CY 㥍GŬgE`z[ sA(ŸUIgW1zZ, C'ְRcVp}bQ) ,ڵ];(2RFE I0(Y-|Nd~A^\U q]d>[iJ`^^ҧ 2E" V"7>_ n8qE#EnK" _|䡎~ZG E>x@$]{䋷% µ׳EPO)ঽ.Pe߅??g&"_,0kX+b0AsRǢlVie&nR%XzC`0qZpCB< y.8,M2upAo0>iʷ +`u+0`V td,MRDfP\]_5B.BkωV X1SS Ra{MRFݺxp jwr;gEY0V #Z+`c; ~z3;V X+^&y~88b X+`hw\fI`X+`H7G}0$M~V X+6!l 4KebXnA. X+`eV/Uʺ\+`V X+`V X+`V X+`V X+`V X+M R(IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/use_cases/index.rst0000644000175000017500000000025300000000000022751 0ustar00coreycorey00000000000000========= Use Cases ========= This part a collection of articles describing use cases in more details. .. toctree:: :maxdepth: 2 long_running_business_process ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/use_cases/long_running_business_process.rst0000644000175000017500000000643700000000000030024 0ustar00coreycorey00000000000000============================= Long-running Business Process ============================= Introduction ============ The use case described below is not the most important driver in the current development process in Mistral. However, at later stages, when OpenStack itself becomes more mature there will be a number of cloud users who would want to build enterprise systems following the idea of this use case. Problem Statement ================= Looking back at the industry, say 20 years ago, it’s fairly obvious that things have evolved drastically. For example, instead of having one information system for everything like accounting, financial planning, reporting enterprises tend to have multiple specialized systems in order to address performance problems caused by constantly growing amount of enterprise data. However, we may need to define a business process, or a workflow, that would span several systems and for some calculation steps may even require that people interact with the process by entering data manually. Those people may be accountants entering primary information, office managers, finance directors and others. The formal challenge here is to define and maintain a sequence of operations that need to be executed one after another and to be able to have some logic (conditions) driving the execution of this sequence one way or another. In order to make this workflow scalable (ability to run some steps in parallel), tolerant to failures and observable for external systems there has to be some component that would play the role of a coordinator. By “observable for external systems” we mean here that we should be able to see all the relevant information on how the process has been going on, what steps have already been processed and what are left, whether it’s stopped with a failure or finished with success. Maintaining a history of already finished processes would also bring significant value. Solution ======== Mistral is a perfect fit for being this kind of coordinator. In order to illustrate everything described so far, let’s consider an imaginary workflow of calculating employees’ salaries in an enterprise. .. figure:: img/long_running_business_process.png :align: center Figure 1. Mistral maintains business workflows spanning multiple systems. Given an employee full name (or id) such workflow may include the following computation steps: * Calculate salary base using accounting information system. * Calculate the employee’s bonus using a different bonus system. * Request an approval from a manager for calculated bonus. * In case of any error at any stage send SMS to a system administrator In this scenario Mistral always knows the execution state of the entire workflow and in case of failures (network losses etc.) it can continue the workflow from the point it stopped transparently for a user. Additionally, Mistral can run calculation of salary base and bonus in parallel since these two tasks are independent. All these things can be flexibly configured by a user. Notes ===== The example above is just a simple illustration of what Mistral can offer in regard to taking care of long-running business processes. In real life Mistral can take care of much more complicated processes spanning multiple information systems and involving real people at some points. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/wf_lang_v2.rst0000644000175000017500000016733200000000000021730 0ustar00coreycorey00000000000000Mistral Workflow Language (v2) ============================== Introduction ------------ This document fully describes Mistral Workflow Language version 2 of Mistral Workflow Service. Since version 1 issued in May 2014 Mistral team completely reworked the language pursuing the goal in mind to make it easier to understand while more consistent and flexible. Unlike Mistral Workflow Language v1, v2 assumes that all entities that Mistral works with like workflows and actions are completely independent in terms of how they're referenced and accessed through API (and also Python Client API and CLI). Workbook, the entity that can combine workflows and actions still exists in the language but only for namespacing and convenience purposes. See `Workbooks section <#workbooks>`__ for more details. **NOTE**: Mistral Workflow Language and API of version 1 has not been supported since April 2015 and version 2 is now the only way to interact with Mistral service. Mistral Workflow Language consists of the following main object(entity) types that will be described in details below: - `Workflows <#workflows>`__ - `Actions <#actions>`__ Prerequisites ------------- Mistral Workflow Language supports `YAQL `__ and `Jinja2 `__ expression languages to reference workflow context variables and thereby implements passing data between workflow tasks. It's also referred to as Data Flow mechanism. YAQL is a simple but powerful query language that allows to extract needed information from JSON structured data. Although Jinja2 is primarily a templating technology, Mistral also uses it for evaluating expressions so users have a choice between YAQL and Jinja2. It's also possible to combine both expression languages within one workflow definition. The only limitation is that it's impossible to use both types of expressions within one line. As long as there are YAQL and Jinja2 expressions on different lines of the workflow definition text, it is valid. It is allowed to use YAQL/Jinja2 in the following sections of Mistral Workflow Language: - Workflow `'output' attribute <#common-workflow-attributes>`__ - Workflow `'task-defaults' attribute <#common-workflow-attributes>`__ - `Direct workflow <#direct-workflow>`__ transitions - Task `'publish' attribute <#common-task-attributes>`__ - Task `'input' attribute <#common-task-attributes>`__ - Task `'with-items' attribute <#common-task-attributes>`__ - Task `'target' attribute <#common-task-attributes>`__ - Any attribute of `task policies <#policies>`__ - Action `'base-input' attribute <#attributes>`__ - Action `'output' attribute <#attributes>`__ Mistral Workflow Language is fully based on YAML and knowledge of YAML is a plus for better understanding of the material in this specification. It also takes advantage of supported query languages to define expressions in workflow and action definitions. - Yet Another Markup Language (YAML): http://yaml.org - Yet Another Query Language (YAQL): https://pypi.org/project/yaql/1.0.0 - Jinja 2: http://jinja.pocoo.org/docs/dev/ Workflows --------- Workflow is the main building block of Mistral Workflow Language, the reason why the project exists. Workflow represents a process that can be described in a various number of ways and that can do some job interesting to the end user. Each workflow consists of tasks (at least one) describing what exact steps should be made during workflow execution. You should use '<% $.x %>' in YAQL or '{{ _.x }}' in Jinja expressions to get access to the x variable in a data context of workflow execution. YAML example ^^^^^^^^^^^^ :: --- version: '2.0' create_vm: description: Simple workflow example input: - vm_name - image_ref - flavor_ref output: vm_id: "{{ _.vm_id }}" vm_status: <% $.vm_status %> tasks: create_server: action: nova.servers_create name=<% $.vm_name %> image=<% $.image_ref %> flavor=<% $.flavor_ref %> publish: vm_id: <% task().result.id %> on-success: - wait_for_instance wait_for_instance: action: nova.servers_find id={{ _.vm_id }} status='ACTIVE' retry: delay: 5 count: 15 publish: vm_status: "{{ task().result.status }}" This example workflow simply sends a command to OpenStack Compute service Nova to start creating a virtual machine and wait till it's created using special "retry" policy. Workflow types ^^^^^^^^^^^^^^ Mistral Workflow Language v2 introduces different workflow types and the structure of each workflow type varies according to its semantics. Basically, workflow type encapsulates workflow processing logic, a set of meta rules defining how all workflows of this type should work. Currently, Mistral provides two workflow types: - `Direct workflow <#direct-workflow>`__ - `Reverse workflow <#reverse-workflow>`__ See corresponding sections for details. Common workflow attributes ^^^^^^^^^^^^^^^^^^^^^^^^^^ - **type** - Workflow type. Either 'direct' or 'reverse'. *Optional*. 'direct' by default. - **description** - Arbitrary text containing workflow description. *Optional*. - **input** - List defining required input parameter names and optionally their default values in a form "my_param: 123". *Optional*. - **output** - Any data structure arbitrarily containing YAQL/Jinja2 expressions that defines workflow output. May be nested. *Optional*. - **output-on-error** - Any data structure arbitrarily containing YAQL/Jinja2 expressions that defines output of a workflow to be returned if it goes into error state. May be nested. *Optional*. - **task-defaults** - Default settings for some of task attributes defined at workflow level. *Optional*. Corresponding attribute defined for a specific task always takes precedence. Specific task attributes that could be defined in **task-defaults** are the following: - **on-error** - List of tasks which will run after the task has completed with an error. For `direct workflow <#direct-workflow>`__ only. *Optional*. - **on-success** - List of tasks which will run after the task has completed successfully. For `direct workflow <#direct-workflow>`__ only. *Optional*. - **on-complete** - List of tasks which will run after the task has completed regardless of whether it is successful or not. For `direct workflow <#direct-workflow>`__ only. *Optional*. - **requires** - List of tasks that a task depends on. For `reverse workflow <#Reverse_Workflow>`__ only. *Optional*. - **pause-before** - Configures pause-before policy. *Optional*. - **wait-before** - Configures wait-before policy. *Optional*. - **wait-after** - Configures wait-after policy. *Optional*. - **fail-on** - Configures fail-on policy. *Optional*. - **timeout** - Configures timeout policy. *Optional*. - **retry** - Configures retry policy. *Optional*. - **concurrency** - Configures concurrency policy. *Optional*. - **safe-rerun** - Configures safe-rerun policy. *Optional*. - **tasks** - Dictionary containing workflow tasks. See below for more details. *Required*. Tasks ^^^^^ Task is what a workflow consists of. It defines a specific computational step in the workflow. When the workflow engine processes entities described in the workflow text written in YAML it schedules tasks for execution. Scheduling a task means that it's now eligible for execution and will be run some time later. When exactly it will run depends on the system load and configuration. Each task can optionally take input data and produce output. In Mistral Workflow Language v2, task can be associated with an action or a workflow. In the example below there are two tasks of different types: :: action_based_task:   action: std.http url='openstack.org' workflow_based_task:   workflow: backup_vm_workflow vm_id=<% $.vm_id %> Actions will be explained below in an individual paragraph but looking ahead it's worth saying that Mistral provides a lot of actions out of the box (including actions for most of the core OpenStack services) and it's also easy to plug new actions into Mistral. Common task attributes '''''''''''''''''''''' All Mistral tasks, regardless of workflow type, have the following common attributes: - **name** - Task name must not equal *noop*, *fail*, *succeed* or *pause* . The max length is 255 symbols. For tasks with *join* control flow this restriction is 208 symbols. - **description** - Arbitrary text containing task description. *Optional*. - **action** - Name of the action associated with the task. Can be a static value or an expression (for example, "{{ _.action_name }}"). *Mutually exclusive with* **workflow**. If neither action nor workflow are provided then the action 'std.noop' will be used that does nothing. - **workflow** - Name of the workflow associated with the task. Can be a static value or an expression (for example, "{{ _.subworkflow_name }}"). *Mutually exclusive with* **action**. - **input** - Actual input parameter values of the task's action or workflow. *Optional*. Value of each parameter is a JSON-compliant type such as number, string etc, dictionary or list. It can also be a YAQL/Jinja2 expression to retrieve value from task context or any of the mentioned types containing inline expressions (for example, string "<% $.movie_name %> is a cool movie!") Can be an expression that evaluates to a JSON object. - **publish** - Dictionary of variables to publish to the workflow context. Any JSON-compatible data structure optionally containing expression to select precisely what needs to be published. Published variables will be accessible for downstream tasks via using expressions. **NOTE!** Mistral saves variables into a storage (context) which is associated only with a branch. For example, the expression “$.my_var” in the declaration of A1 will always evaluate to 1, for B1 it will always evaluate to 2. This does’t depend on the order in which A and B will run. This is because we have two branches (A -> A1 and B -> B1) for which the variable “my_var” has its own different version. *Optional*. :: version: '2.0' wf: tasks: A: action: std.noop publish: my_var: 1 on-success: A1 A1: action: my_action param1=<% $.my_var %> B: action: std.noop publish: my_var: 2 on-success: B1 B1: action: my_action param1=<% $.my_var %> - **publish-on-error** - Same as **publish** but evaluated in case of task execution failures. *Optional* - **with-items** - If configured, it allows to run action or workflow associated with a task multiple times on a provided list of items. See `Processing collections using 'with-items' <#processing-collections>`__ for details. *Optional*. - **keep-result** - Boolean value allowing to not store action results after task completion (e.g. if they are large and not needed afterwards). *Optional*. By default is 'true'. - **target** - String parameter. It defines an executor to which a task action should be sent to. Target here physically means the name of the executor. The name of the executor can be defined with the "host" property in the Mistral configuration file. If more than executors have the same name then the task action will be sent only to one of them. *Optional*. - **pause-before** - Configures pause-before policy. *Optional*. - **wait-before** - Configures wait-before policy. *Optional*. - **wait-after** - Configures wait-after policy. *Optional*. - **fail-on** - Configures fail-on policy. *Optional*. - **timeout** - Configures timeout policy. *Optional*. - **retry** - Configures retry policy. *Optional*. - **concurrency** - Configures concurrency policy. *Optional*. - **safe-rerun** - Boolean value allowing to rerun task if executor dies during action execution. If set to 'true' task may be run twice. *Optional*. By default set to 'false'. workflow '''''''' If a task has the attribute 'workflow' it synchronously starts a sub-workflow with the given name. Example of a static sub-workflow name: :: my_task:   workflow: name_of_my_workflow Example of a dynamic sub-workflow name: :: --- version: '2.0' framework: input: - magic_workflow_name: show_weather tasks: weather_data: action: std.echo input: output: location: wherever temperature: "22C" publish: weather_data: <% task().result %> on-success: - do_magic do_magic: # Reference workflow by parameter. workflow: <% $.magic_workflow_name %> # Expand dictionary to input parameters. input: <% $.weather_data %> show_weather: input: - location - temperature tasks: write_data: action: std.echo input: output: "<% $.location %>: <% $.temperature %>" In this example, we defined two workflows in one YAML snippet and the workflow 'framework' may call the workflow 'show_weather' if 'framework' receives the corresponding workflow name through the input parameter 'magic_workflow_name'. In this case it is set by default so a user doesn't need to pass anything explicitly. Note: Typical use for the dynamic sub-workflow selection is when parts of a workflow can be customized. E.g. collect some weather data and then execute some custom workflow on it. Policies '''''''' Any Mistral task regardless of its workflow type can optionally have configured policies. YAML example :: my_task:   action: my_action   pause-before: true   wait-before: 2   wait-after: 4   fail-on: <% $.some_value < 4 %>   timeout: 30   retry:     count: 10     delay: 20     break-on: <% $.my_var = true %> continue-on: <% $.my_var = false %> **pause-before** Defines whether Mistral Engine should put the workflow on hold or not before starting a task. **wait-before** Defines a delay in seconds that Mistral Engine should wait before starting a task. **wait-after** Defines a delay in seconds that Mistral Engine should wait after a task has completed before starting next tasks defined in *on-success*, *on-error* or *on-complete*. **fail-on** Defines a condition under which the task will fail, even if the action was completed successfully. **timeout** Defines a period of time in seconds after which a task will be failed automatically by engine if it hasn't completed. **concurrency** Defines a max number of actions running simultaneously in a task. *Applicable* only for tasks that have *with-items*. If *concurrency* task property is not set then actions (or workflows in case of nested workflows) of the task will be scheduled for execution all at once. **retry** Defines a pattern how task should be repeated in case of an error. - **count** - Defines a maximum number of times that a task can be repeated. - **delay** - Defines a delay in seconds between subsequent task iterations. - **break-on** - Defines an expression that will break iteration loop if it evaluates to 'true'. If it fires then the task is considered error. - **continue-on** - Defines an expression that will continue iteration loop if it evaluates to 'true'. If it fires then the task is considered successful. If it evaluates to 'false' then policy will break the iteration. Retry policy can also be configured on a single line as: :: task1:   action: my_action   retry: count=10 delay=5 break-on=<% $.foo = 'bar' %> All parameter values for any policy can be defined as YAQL/Jinja2 expressions. **NOTE:** It would be rare to use both break-on and continue-on in the same retry block. *break-on* should be used when one expects the action to be in an ERROR state for some amount of tries, but may eventually go to a SUCCESS state, thereby stopping the loop. But if *break-on* is *'true'* then the retries will stop and the task will be in ERROR. *continue-on* should be used if the action will usually return *SUCCESS*, but the action has other results that can be used to signal whether to continue the loop or not. **NOTE**: Retry task policy doesn't work after the timeout policy is triggered. You should use the *on-error* in case of direct workflow or task rerun to re-execute a task. Input syntax '''''''''''' When describing a workflow task it's possible to specify its input parameters in two ways: Full syntax: :: my_task:   action: std.http   input: url: http://mywebsite.org     method: GET Simplified syntax: :: my_task:   action: std.http url="http://mywebsite.org" method="GET" Syntax with dynamic input parameter map: :: --- version: '2.0' example_workflow: input: - http_request_parameters: url: http://mywebsite.org method: GET tasks: setup_task: action: std.http input: <% $.http_request_parameters %> The same rules apply to tasks associated with workflows. Full syntax: :: my_task:   workflow: some_nested_workflow   input:     param1: val1     param2: val2 Simplified syntax: :: my_task:   workflow: some_nested_workflow param1='val1' param2='val2' Syntax with dynamic input parameter map: :: --- version: '2.0' example_workflow: input: - nested_params: {"param1": "val1", "param2": "val2"} tasks: setup_task: workflow: some_nested_workflow input: <% $.nested_params %> **NOTE**: It's also possible to merge these two approaches and specify a part of parameters using simplified key-value pairs syntax and using keyword *input*. In this case all the parameters will be effectively merged. If the same parameter is specified in both ways then the one under *input* keyword takes precedence. Direct workflow ^^^^^^^^^^^^^^^ Direct workflow consists of tasks combined in a graph where every next task starts after another one depending on produced result. So direct workflow has a notion of transition. Direct workflow is considered to be completed if there aren't any transitions left that could be used to jump to next tasks. .. image:: /user/terminology/img/direct_workflow.png Figure 1. Mistral Direct Workflow. YAML example '''''''''''' :: --- version: '2.0' create_vm_and_send_email:  type: direct  input:    - vm_name    - image_id    - flavor_id  output:    result: <% $.vm_id %>  tasks:    create_vm:      action: nova.servers_create name=<% $.vm_name %> image=<% $.image_id %> flavor=<% $.flavor_id %>      publish:        vm_id: <% task(create_vm).result.id %>      on-error:        - send_error_email      on-success:        - send_success_email    send_error_email:      action: send_email to_addrs=['admin@mysite.org'] body='Failed to create a VM'      on-complete:        - fail    send_success_email:      action: send_email to_addrs=['admin@mysite.org'] body='Vm is successfully created and its id <% $.vm_id %>' Direct workflow task attributes ''''''''''''''''''''''''''''''' Mistral supports the following task transitions: - **on-success** - List of tasks which will run after the task has completed successfully. *Optional*. - **on-error** - List of tasks which will run after the task has completed with an error. *Optional*. - **on-complete** - List of tasks which will run after the task has completed regardless of whether it is successful or not. *Optional*. You can define the task transitions in two ways: The first is just a list of tasks. You can find the example of workflow above. The second way is: :: *transition*: publish: global: some_global_variable: some_value branch: some_branch_variable: some_value next: - *next_task* The publish defined under *transitions* can optionally define scopes to be able to publish into different scopes: ‘branch’ and ‘global’. Specifying variables under ‘branch’ will make Mistral publish into a branch workflow context just like ‘publish’ and ‘publish-on-error’. Specifying variables under ‘global’ will make Mistral publish into a global workflow context. You can use “$.” in YAQL and “_.” in Jinja to access to a global variable but branch variables can shadow them if they are published in the current branch. To prevent it, you may use the YAQL/Jinja function “global()” to explicitly access variables in workflow global context. If ‘publish’ is defined in ‘on-complete’ and also in ‘on-success’ and/or ‘on-error’ then the result of publishing will be a merge of what ‘on-complete’ publishes with what ‘on-success’ or ‘on-error’ publishes depending on the task status. If ‘on-complete’ publishes variables that are also published by ‘on-success’ or ‘on-error’ then latter take precedence. In other words, ‘on-complete’ in this case is considered a default which can be overridden by more specific ‘on-XXX’ clause. The keyword ‘next’ defined under *transitions* optionally contains list of tasks which will run after the current task finished. Example of writing and reading global variables ''''''''''''''''''''''''''''''''''''''''''''''' :: --- version: '2.0' wf: tasks: A: action: std.noop on-success: publish: branch: my_var: "branch value" global: my_var: "global value" next: A1 A1: # $.my_var will always evaluate to "branch value" because A1 belongs # to the same branch as A and runs after A. When using "$" to access # context variables branch values have higher priority. # In order to access global context reliably we need to use YAQL/Jinja # function 'global'. So global(my_var) will always evaluate to # 'global value'. action: my_action1 param1=<% $.my_var %> param2=<% global(my_var) %> B: # $.my_var will evaluate to "global value" if task A completes # before task B and "null", if not. It's because A and B are # parallel and 'publish' in A doesn't apply to B, only # 'publish-global' does. In this example global(my_var) has the same # meaning as $.my_var because there's no ambiguity from what context # we should take variable 'my_var'. action: my_action2 param1=<% $.my_var %> param2=<% global(my_var) %> **NOTE!** It’s important to note that this is an unprotected way of modifying data because race conditions are possible when writing different values for same variables in the global context from parallel branches. In other words, if we have branches A and B and there are tasks in these branches that first read global variable X, then increment it and write the new value Mistral won’t provide any guarantee that the result value after finishing tasks A and B will be X + 2. In some cases it can be X + 1 because the following may happen: task A read X, Task B read X, Task B incremented X, Task B wrote X + 1, Task A incremented X (the old one, not incremented by B), Task A wrote X + 1. Note: All of the above clauses cannot contain task names evaluated as YAQL/Jinja expressions. They have to be static values. However, task transitions can be conditional, based on expressions. See `Transitions with expressions <#transitions-with-expressions>`__ for more details. It is important to understand the semantics of **on-success**, **on-error** and **on-complete** around handling action errors. In case if task action returned an error **on-success** and **on-complete** won't prevent from failing the entire workflow execution. Only **on-error** will. The closest analogy is *try-catch-finally* blocks in regular programming languages. **on-error** is similar to *catch* and it serves as an exception handler for possible errors expected by design. Whereas **on-complete** is like *finally* that will run in any case but it won't stop the exception from bubbling up to an upper layer. So **on-complete** should only be understood as a language construction that allows to define some clean up actions. Having that said, it's important to know the order in which these clauses are processed by Mistral. :: taskA:  action: my_action  on-success:    - taskB - taskC on-complete: - taskD - taskE In this example, if the task action ('my_action') completes successfully then Mistral will first process the 'on-success' clause and schedule tasks 'taskB' and 'taskC' and then process the 'on-complete' clause and schedule 'taskC' and 'taskE'. In most cases, this processing order is not so important but there are situations when it matters, especially when both 'on-success' and 'on-complete' lists have `engine commands <#engine-commands>`__ that are explained later in this document. If 'on-success' and 'on-error' are both defined in the task definition, they never clash because they are mutually exclusive which means that only one of them can be processed depending on whether the task action failed or succeeded. Transitions with expressions '''''''''''''''''''''''''''' Task transitions can be determined by success/error/completeness of the previous tasks and also by additional guard expressions that can access any data produced by upstream tasks and as workflow input. So in the example above task 'create_vm' could also have a YAQL expression on transition to task 'send_success_email' as follows: :: create_vm:  ...  on-success:    - send_success_email: <% $.vm_id != null %> And this would tell Mistral to run 'send_success_email' task only if 'vm_id' variable published by task 'create_vm' is not empty. Expressions can also be applied to 'on-error' and 'on-complete'. Engine Commands ''''''''''''''' Mistral has a number of engine commands that can be called within direct workflows. These commands are used to change the workflow state. - **succeed** - will end the current workflow and set its state to SUCCESS. - **pause** - will end the current workflow and set its state to PAUSED. - **fail** - will end the current workflow and set its state to ERROR. Each of the engine commands accepts a ``msg`` input. This is optional, but if provided, it will be stored in the state info on the workflow execution. Workflows that have been ended with ``succeed`` or ``fail`` may not be resumed later, but workflows that have been ended with ``pause`` may be. YAML example '''''''''''' :: --- version: '2.0' send_error_mail: tasks: create_server: action: nova.servers_create name=<% $.vm_name %> publish: vm_id: <% task().result.id %> on-complete: - fail: <% not $.vm_id %> In this example we have a short workflow with one task that creates a server in Nova. The task publishes the ID of the virtual machine, but if this value is empty then it will fail the workflow. :: on-complete: - taskA - fail - taskB When the engine commands are used with task names in a single list, they are processed one at a time until the workflow reaches a terminal state. In the above example, the ``on-complete`` has three steps to complete - these are executed in order until the workflow reaches a terminal state. So in this case ``taskA`` is scheduled first, then the ``fail`` engine command sets the workflow state to ERROR and ``taskB`` is never scheduled. ``taskB`` would not be scheduled if ``succeed`` was used in this example either. The ``pause`` command pauses the workflow. This means that the workflow can continue when its state is set to RUNNING by using the update Rest API call. YAML example: :: on-complete: - taskA - pause - taskB In this case when Mistral processes the 'on-complete' clause it will schedule ``taskA`` and then set the workflow state to PAUSED, and stop scheduling new tasks. However, if the workflow is later resumed manually then Mistral will schedule ``taskB`` because in the 'on-complete' list it goes right after the ``pause`` command. Given the order in which Mistral processes 'on-success' (or 'on-error') and 'on-complete' clauses it's important to understand what will happen if both clauses have engine commands listed in them. :: taskA:  action: my_action  on-error:    - taskB - fail - taskC on-complete: - taskD - pause - taskE As was explained above, 'on-complete' is always processed after 'on-success' (or 'on-error') because it plays the similar role as 'finally' in most general purpose programming languages. Let's consider two scenarios that can happen in the example above when 'taskA' runs, i.e. its action 'my_action' runs. - If 'my_action' fails then Mistral will schedule 'taskB' because it's listed in the 'on-error' clause which is processed before the 'on-complete' and then will set the state of the workflow to ERROR. This will prevent from scheduling other new tasks so neither 'taskC' nor 'taskD' and 'taskE' will be ever be scheduled. In other words, the whole 'on-complete' clause will never be processed because the 'fail' command in the 'on-error' sets the workflow state to ERROR. - If 'my_action' succeeds then the 'on-error' clause will be ignored and since 'on-success' is not defined then Mistral will process the 'on-complete' clause. And while doing that, it will schedule 'taskD' first and then pause the workflow because of the 'pause' command. 'taskE' will be scheduled if this workflow is resumed manually at some later time through the API. This illustrates that, while designing a workflow, it's important to know precisely how Mistral processes 'on-success', 'on-error' and 'on-complete' and engine commands. Engine commands and tasks ''''''''''''''''''''''''' The **on-*** clauses in direct workflows can refer both to tasks and engine commands, as demonstrated earlier. It is possible to use the engine commands as names for tasks. For example, one can create a task named `noop` or `fail`. These tasks will override the engine commands, that is, the action defined in these tasks will be executed instead of the engine commands. This is a method to succinctly extend the default behavior of the Mistral engine or provide side-effect free workflow examples. The order in which task names are resolved is the following: 1. the task with the given name is searched 2. the engine command with the given name is selected The first option that matches is executed. Fork '''' There are situations when we need to be able to run more than one task after some task has completed. :: create_vm:   ...   on-success:     - register_vm_in_load_balancer     - register_vm_in_dns In this case Mistral will run both "register_xxx" tasks simultaneously and this will lead to multiple independent workflow routes being processed in parallel. Join '''' Join flow control allows to synchronize multiple parallel workflow branches and aggregate their data. Full Join (join: all) :: register_vm_in_load_balancer:   ...   on-success:     - wait_for_all_registrations register_vm_in_dns:  ...  on-success:    - wait_for_all_registrations try_to_do_something_without_registration:  ...  on-error:    - wait_for_all_registrations wait_for_all_registrations:   join: all   action: send_email When a task has property "join" assigned with value "all" the task will run only if all upstream tasks (ones that lead to this task) are completed and corresponding conditions have triggered. Task A is considered an upstream task of Task B if Task A has Task B mentioned in any of its "on-success", "on-error" and "on-complete" clauses regardless of guard expressions. Partial Join (join: 2) :: register_vm_in_load_balancer:  ...  on-success:    - wait_for_two_registrations register_vm_in_dns:  ...  on-success:    - wait_for_two_registrations register_vm_in_zabbix:   ...   on-success:     - wait_for_two_registrations wait_for_two_registrations:   join: 2   action: send_email When a task has property "join" assigned with a numeric value then the task will run when at least this number of upstream tasks are completed and corresponding conditions have triggered. In the example above task "wait_for_two_registrations" will run if two any of "register_vm_xxx" tasks complete. Discriminator (join: one) Discriminator is a special case of Partial Join when "join" property has value 1. It means Mistral will wait for any completed task. In this case instead of 1 it is possible to specify special string value "one" which is introduced for symmetry with "all". However, it's up to the user whether to use "1" or "one". Reverse workflow ^^^^^^^^^^^^^^^^ In reverse workflow all relationships in workflow task graph are dependencies. In order to run this type of workflow we need to specify a task that needs to be completed, it can be conventionally called 'target task'. When Mistral Engine starts a workflow it recursively identifies all the dependencies that need to be completed first. .. image:: /user/terminology/img/reverse_workflow.png Figure 2 explains how reverse workflow works. In the example, task **T1** is chosen a target task. So when the workflow starts Mistral will run only tasks **T7**, **T8**, **T5**, **T6**, **T2** and **T1** in the specified order (starting from tasks that have no dependencies). Tasks **T3** and **T4** won't be a part of this workflow because there's no route in the directed graph from **T1** to **T3** or **T4**. YAML example '''''''''''' :: --- version: '2.0' create_vm_and_send_email:  type: reverse  input:    - vm_name    - image_id    - flavor_id  output:    result: <% $.vm_id %>  tasks:    create_vm:      action: nova.servers_create name=<% $.vm_name %> image=<% $.image_id %> flavor=<% $.flavor_id %>      publish:        vm_id: <% task(create_vm).result.id %>    search_for_ip:      action: nova.floating_ips_findall instance_id=null      publish:        vm_ip: <% task(search_for_ip).result[0].ip %>    associate_ip:      action: nova.servers_add_floating_ip server=<% $.vm_id %> address=<% $.vm_ip %>      requires: [search_for_ip]    send_email:      action: send_email to='admin@mysite.org' body='Vm is created and id <% $.vm_id %> and ip address <% $.vm_ip %>'      requires: [create_vm, associate_ip] Reverse workflow task attributes '''''''''''''''''''''''''''''''' - **requires** - List of tasks which should be executed before this task. *Optional*. Processing collections ^^^^^^^^^^^^^^^^^^^^^^ YAML example '''''''''''' :: --- version: '2.0' create_vms:  description: Creating multiple virtual servers using "with-items".  input:    - vm_names    - image_ref    - flavor_ref  output:    vm_ids: <% $.vm_ids %>  tasks:    create_servers:      with-items: vm_name in <% $.vm_names %>      action: nova.servers_create name=<% $.vm_name %> image=<% $.image_ref %> flavor=<% $.flavor_ref %>      publish:        vm_ids: <% task(create_servers).result.id %>      on-success:        - wait_for_servers    wait_for_servers:      with-items: vm_id in <% $.vm_ids %>      action: nova.servers_find id=<% $.vm_id %> status='ACTIVE'      retry:        delay: 5        count: <% $.vm_names.len() * 10 %> Workflow "create_vms" in this example creates as many virtual servers as we provide in "vm_names" input parameter. E.g., if we specify vm_names=["vm1", "vm2"] then it'll create servers with these names based on same image and flavor. It is possible because of using "with-items" keyword that makes an action or a workflow associated with a task run multiple times. Value of "with-items" task property contains an expression in the form: 'my_var' in <% YAQL_expression %>. Similar for Jinja2 expression: 'my_var' in {{ Jinja2_expression }}. The most common form is: :: with-items:   - var1 in <% YAQL_expression_1 %> # or: var1 in <% Jinja2_expression_1 %>   - var2 in <% YAQL_expression_2 %> # or: var2 in <% Jinja2_expression_2 %>   ...   - varN in <% YAQL_expression_N %> # or: varN in <% Jinja2_expression_N %> where collections expressed as YAQL_expression_1, YAQL_expression_2, YAQL_expression_N must have equal sizes. When a task gets started Mistral will iterate over all collections in parallel, i.e. number of iterations will be equal to length of any collections. Note that in case of using "with-items" task result accessible in workflow context as <% task(task_name).result %> will be a list containing results of corresponding action/workflow calls. If at least one action/workflow call has failed then the whole task will get into ERROR state. It's also possible to apply retry policy for tasks with "with-items" property. In this case retry policy will be relaunching all action/workflow calls according to "with-items" configuration. Other policies can also be used the same way as with regular non "with-items" tasks. .. _actions-dsl: Actions ------- Action defines what exactly needs to be done when task starts. Action is similar to a regular function in general purpose programming language like Python. It has a name and parameters. Mistral distinguishes 'system actions' and 'Ad-hoc actions'. System actions ^^^^^^^^^^^^^^ System actions are provided by Mistral out of the box and can be used by anyone. It is also possible to add system actions for specific Mistral installation via a special plugin mechanism. Currently, built-in system actions are: std.fail '''''''' This action always fails. It can be used to manually fail a workflow task.. :: wf: tasks: manual_fail: action: std.fail The action can be passed the `error_data` parameter. This data will be used as the action return value. :: wf: tasks: manual_fail: action: std.fail input: error_data={x:1,y:2} std.http '''''''' Sends an HTTP request. Input parameters: - **url** - URL for the HTTP request. *Required*. - **method** - method for the HTTP request. *Optional*. Default is 'GET'. - **params** - Dictionary or bytes to be sent in the query string for the HTTP request. *Optional*. - **body** - Dictionary, bytes, or file-like object to send in the body of the HTTP request. *Optional*. - **headers** - Dictionary of HTTP Headers to send with the HTTP request. *Optional*. - **cookies** - Dictionary of HTTP Cookies to send with the HTTP request. *Optional*. - **auth** - Auth to enable Basic/Digest/Custom HTTP Auth. *Optional*. - **timeout** - Float describing the timeout of the request in seconds. *Optional*. - **allow_redirects** - Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. *Optional*. - **proxies** - Dictionary mapping protocol to the URL of the proxy. *Optional*. - **verify** - Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. *Optional*. Default is 'True'. Example: :: http_task:   action: std.http url='google.com' std.mistral_http '''''''''''''''' This action works just like 'std.http' with the only exception: when sending a request it inserts the following HTTP headers: - **Mistral-Workflow-Name** - Name of the workflow that the current action execution is associated with. - **Mistral-Execution-Id** - Identifier of the workflow execution this action is associated with. - **Mistral-Task-Id** - Identifier of the task execution this action execution is associated with. - **Mistral-Action-Execution-Id** - Identifier of the current action execution. Using this action makes it possible to do any work in asynchronous manner triggered via HTTP protocol. That means that Mistral can send a request using 'std.mistral_http' and then any time later whatever system that received this request can notify Mistral back (using its public API) with the result of this action. Header **Mistral-Action-Execution-Id** is required for this operation because it is used a key to find corresponding action execution in Mistral to attach the result to. std.email ''''''''' Sends an email message via SMTP protocol. - **to_addrs** - Comma separated list of recipients. *Required*. - **cc_addrs** - Comma separated list of CC recipients. *Optional*. - **bcc_addrs** - Comma separated list of BCC recipients. *Optional*. - **reply_to** - Comma separated list of email address. *Optional*. - **subject** - Subject of the message. *Optional*. - **body** - Text containing message body. *Optional*. - **html_body** - Text containing the message in HTML format. *Optional*. - **from_addr** - Sender email address. *Required*. - **smtp_server** - SMTP server host name. *Required*. - **smtp_password** - SMTP server password. *Optional*. Example: :: send_email_task:   action: std.email   input:       to_addrs: [admin@mywebsite.org]       subject: Hello from Mistral :)       body: |         Cheers! (:_:)         -- Thanks, Mistral Team.       from_addr: mistral@openstack.org       smtp_server: smtp.google.com       smtp_password: SECRET The syntax of 'std.emal' action is pretty verbose. However, it can be significantly simplified using Ad-hoc actions. More about them `below <#ad-hoc-actions>`__. std.ssh ''''''' Runs Secure Shell command. Input parameters: - **cmd** - String containing a shell command that needs to be executed. *Required*. - **host** - Host name that the command needs to be executed on. *Required*. - **username** - User name to authenticate on the host. *Required*. - **password** - User password to authenticate on the host. *Optional*. - **private_key_filename** - Private key file name which will be used for authentication on remote host. All private keys should be on the executor host in **/.ssh** directory or absolute path of the key should be provided. The file needs to be accessible for the user account running the executor. *Optional*. **NOTE**: Authentication using key pairs is supported, key should be on Mistral Executor server machine. std.echo '''''''' Simple action mostly needed for testing purposes that returns a predefined result. Input parameters: - **output** - Value of any type that needs to be returned as a result of the action. *Required*. - **delay** - Float value that defines with what delay (in seconds) the result should be returned. *Optional*. std.javascript '''''''''''''' Evaluates given JavaScript code. **NOTE**: std.js is an alias for std.javascript i.e, std.js can be used in place of std.javascript. Input parameters: - **script** - The text of JavaScript snippet that needs to be executed. *Required*. - **context** - This object will be assigned to the *$* javascript variable. The default value is None. To use std.javascript, it is needed to install the `py_mini_racer `__ and set *py_mini_racer* to *js_implementation* parameter in *mistral.conf*: .. code-block:: bash pip install py_mini_racer Other available implementations: - `pyv8 `__ - `v8eval `__ Example with *context*: :: --- version: '2.0' generate_uuid:   description: Generates a Universal Unique ID   input:     - radix: 16   output:     uuid: <% $.generated_uuid %>   tasks:     generate_uuid_task:       action: std.js       input:         context: <% $ %>         script: |           return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {                   var r = Math.random() * 16 | 0, v = c == 'x' ? r : (r&0x3|0x8);                   return v.toString($.radix);           });       publish:         generated_uuid: <% task().result %> Another example for getting the current date and time: ::   ---   version: '2.0'   get_date_workflow:     description: Get the current date     output:       current_date: <% $.current_date %>     tasks:       get_date_task:         action: std.js         input:           script: |             var date = new Date();             return date; // returns "2015-07-12T10:32:12.460000" or use date.toLocaleDateString() for "Sunday, July 12, 2015"         publish:           current_date: <% task().result %> Ad-hoc actions ^^^^^^^^^^^^^^ Ad-hoc action is a special type of action that can be created by user. Ad-hoc action is always created as a wrapper around any other existing system action and its main goal is to simplify using same actions many times with similar pattern. YAML example '''''''''''' :: --- version: '2.0' error_email: input:    - execution_id base: std.email base-input:    to_addrs: ['admin@mywebsite.org']    subject: 'Something went wrong with your Mistral workflow :('    body: |        Please take a look at Mistral Dashboard to find out what's wrong        with your workflow execution <% $.execution_id %>.        Everything's going to be alright!        -- Sincerely, Mistral Team.    from_addr: 'mistral@openstack.org'    smtp_server: 'smtp.google.com'    smtp_password: 'SECRET' Once this action is uploaded to Mistral any workflow will be able to use it as follows: :: my_workflow:  tasks:     ...     send_error_email:       action: error_email execution_id=<% execution().id %> Attributes '''''''''' - **base** - Name of base action that this action is built on top of. *Required*. - **base-input** - Actual input parameters provided to base action. Look at the example above. *Optional*. - **input** - List of declared action parameters which should be specified as corresponding task input. This attribute is optional and used only for documenting purposes. Mistral now does not enforce actual input parameters to exactly correspond to this list. Base parameters will be calculated based on provided actual parameters with using expressions so what's used in expressions implicitly define real input parameters. Dictionary of actual input parameters (expression context) is referenced as '$.' in YAQL and as '_.' in Jinja. Redundant parameters will be simply ignored. - **output** - Any data structure defining how to calculate output of this action based on output of base action. It can optionally have expressions to access properties of base action output through expression context. Workbooks --------- As mentioned before, workbooks still exist in Mistral Workflow Language version 2 but purely for convenience. Using workbooks users can combine multiple entities of any type (workflows, actions and triggers) into one document and upload to Mistral service. When uploading a workbook Mistral will parse it and save its workflows, actions and triggers as independent objects which will be accessible via their own API endpoints (/workflows, /actions and /triggers/). Once it's done the workbook comes out of the game. User can just start workflows and use references to workflows/actions/triggers as if they were uploaded without workbook in the first place. However, if we want to modify these individual objects we can modify the same workbook definition and re-upload it to Mistral (or, of course, we can do it independently). Namespacing ^^^^^^^^^^^ One thing that's worth noting is that when using a workbook Mistral uses its name as a prefix for generating final names of workflows, actions and triggers included into the workbook. To illustrate this principle let's take a look at the figure below. .. image:: /user/terminology/img/workbook_namespacing.png So after a workbook has been uploaded its workflows and actions become independent objects but with slightly different names. YAML example '''''''''''' :: --- version: '2.0' name: my_workbook description: My set of workflows and ad-hoc actions workflows:  local_workflow1:    type: direct    tasks:      task1:        action: local_action str1='Hi' str2=' Mistral!'        on-complete:          - task2    task2:       action: global_action       ...   local_workflow2:     type: reverse     tasks:       task1:         workflow: local_workflow1       task2:         workflow: global_workflow param1='val1' param2='val2'         requires: [task1]         ... actions:  local_action:    input:      - str1      - str2    base: std.echo output="<% $.str1 %><% $.str2 %>" **NOTE**: Even though names of objects inside workbooks change upon uploading Mistral allows referencing between those objects using local names declared in the original workbook. Attributes ^^^^^^^^^^ - **name** - Workbook name. *Required*. - **description** - Workbook description. *Optional*. - **tags** - String with arbitrary comma-separated values. **Optional**. - **workflows** - Dictionary containing workflow definitions. *Optional*. - **actions** - Dictionary containing ad-hoc action definitions. *Optional*. Predefined values/Functions in execution data context ----------------------------------------------------- Using expressions it is possible to use some predefined values in Mistral Workflow Language. - **OpenStack context** - **Task result** - **Execution info** - **Environment** OpenStack context ^^^^^^^^^^^^^^^^^ OpenStack context is available by **$.openstack**. It contains **auth_token**, **project_id**, **user_id**, **service_catalog**, **user_name**, **project_name**, **roles**, **is_admin** properties. Builtin functions in expressions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In addition to the current context (i.e. $ in YAQL and _ in Jinja2) expressions have access to a set of predefined functions. The expression languages come with their own individual included functions and operations. Mistral adds the following functions that are available in all the supported languages. This section will describe builtin functions added by Mistral. Tasks function '''''''''''''' Signature: **tasks(workflow_execution_id=null, recursive=false, state=null, flat=false)** Description: This function allows users to filter all tasks by workflow execution id and/or state. In addition, it is possible to get task executions recursively and flatten the task executions list. Parameters: #. **workflow_execution_id** - If provided the tasks function will return task executions for a specific workflow execution (either the current execution or a different one). Otherwise it will return all task executions that match the other parameters. *Optional.* #. **recursive** - This parameter is a boolean value, if it is true then all task executions within nested workflow executions will be returned. This is usually used in combination with a specific workflow_execution_id where you still want to see nested workflow's task executions. *Optional.* False by default. #. **state** - If provided, the task executions will be filtered by their current state. If state isn't provided, all task executions that match the other parameters will be returned . *Optional.* #. **flat** - if true, only list the task executions that match at least one of the next conditions: * task executions of type action * task executions of type workflow that have a different state from the workflow execution they triggered. For example, if used with a specific workflow_execution_id and the state ERROR it will return tasks that erred despite the workflow succeeding. This can mean that there was an error in the task itself, like an invalid expression in publish. *Optional.* False by default. Example: Workflow definition: :: --- version: "v2.0" wf: tasks: task: action: std.noop publish: all_tasks_in_this_wf_yaql: <% tasks(execution().id) %> all_tasks_in_this_wf_jinja: "{{ tasks(execution().id) }}" all_tasks_in_error_yaql: <% tasks(null, false, ERROR) %> all_tasks_in_error_jinja: "{{ tasks(None, false, 'ERROR') }}" all_tasks_in_error_yaql_with_kw: <% tasks(state => ERROR) %> all_tasks_in_error_jinja_with_kw: "{{ tasks(state='ERROR') }}" all_tasks_yaql_option1: <% tasks() %> all_tasks_yaql_option2: <% tasks(null, false, null, false) %> all_tasks_jinja_option1: "{{ tasks() }}" all_tasks_jinja_option2: "{{ tasks(None, false, None, false) }}" Task publish result (partial to keep the documentation short): .. code-block:: json { "all_tasks_in_error_yaql": [ { "id": "3d363d4b-8c19-48fa-a9a0-8721dc5469f2", "name": "fail_task", "type": "ACTION", "workflow_execution_id": "c0a4d2ff-0127-4826-8370-0570ef8cad80", "state": "ERROR", "state_info": "Failed to run action [action_ex_id=bcb04b28-6d50-458e-9b7e-a45a5ff1ca01, action_cls='', attributes='{}', params='{}']\n Fail action expected exception.", "result": "Failed to run action [action_ex_id=bcb04b28-6d50-458e-9b7e-a45a5ff1ca01, action_cls='', attributes='{}', params='{}']\n Fail action expected exception.", "published": {}, "spec": { "action": "std.fail", "version": "2.0", "type": "direct", "name": "fail_task" } } ], "all_tasks_in_this_wf_jinja": [ { "id": "83a34bfe-268c-46f5-9e5c-c16900540084", "name": "task", "type": "ACTION", "workflow_execution_id": "899a3318-b5c0-4860-82b4-a5bd147a4643", "state": "SUCCESS", "state_info": null, "result": null, "published": {}, "spec": { "action": "std.noop", "version": "2.0", "type": "direct", "name": "task", "publish": { "all_tasks_in_error_yaql": "<% tasks(null, false, ERROR) %>", "all_tasks_in_error_jinja": "{{ tasks(None, false, 'ERROR') }}", "all_tasks_yaql_option2": "<% tasks(null, false, false, false) %>", "all_tasks_yaql_option1": "<% tasks() %>", "all_tasks_jinja_option1": "{{ tasks() }}", "all_tasks_in_error_jinja_with_kw": "{{ tasks(state='ERROR') }}", "all_tasks_jinja_option2": "{{ tasks(None, false, None, false) }}", "all_tasks_in_this_wf_jinja": "{{ tasks(execution().id) }}", "all_tasks_in_this_wf_yaql": "<% tasks(execution().id) %>" } } } ], "_comment": "other fields were dropped to keep docs short" } Task result ''''''''''' Task result is available by **task().result**. It contains task result and directly depends on action output structure. Note that the *task()* function itself returns more than only task result. It returns the following fields of task executions: * **id** - task execution UUID. * **name** - task execution name. * **spec** - task execution spec dict (loaded from Mistral Workflow Language). * **state** - task execution state. * **state_info** - task execution state info. * **result** - task execution result. In case of a non 'with-items' task it's simply a result of the task's action/sub-workflow execution. For a 'with-items' task it will be a list of results of corresponding action/sub-workflow execution. * **published** - task execution published variables. Execution info ^^^^^^^^^^^^^^ Execution info is available by **execution()**. It contains information about execution itself such as **id**, **wf_spec**, **input**, **start_params**, and **root_execution_id** . Executions function ''''''''''''''''''' Signature: **executions(id=null, root_execution_id=null, state=null, from_time=null, to_time=null)** Description: This function allows users to filter all executions by execution id, root_execution_id ,state and/or created_at time. Parameters: #. **id** - If provided will return a list of executions with that id. Otherwise it will return all executions that match the other parameters. *Optional.* #. **root_execution_id** - Similar to id above, if provided will return a list of executions with that root_execution_id. Otherwise it will return all executions that match the other parameters. *Optional.* False by default. #. **state** - If provided, the executions will be filtered by their current state. If state isn't provided, all executions that match the other parameters will be returned . *Optional.* #. **from_time** - If provided, the executions will be filtered by their created_at time being greater or equal to the from_time parameter. If from_time isn't provided, all executions that match the other parameters will be returned. from_time parameter can be provided in the format *YYYY-MM-DD hh:mm:ss* *Optional.* #. **to_time** - If provided, the executions will be filtered by their created_at time being less than to the from_time parameter (less than but not less than equal as the from_time parameter does) If to_time isn't provided, all executions that match the other parameters will be returned. to_time parameter can be provided in the format *YYYY-MM-DD hh:mm:ss* *Optional.* Example: Workflow definition: :: --- version: "v2.0" wf: tasks: task: action: std.noop publish: all_executions_yaql: <% executions() %> all_child_executions_of_this_execution: "{{ executions(root_execution_id=execution().id) }}" all_executions_in_error_yaql: <% executions(null, null, ERROR) %> all_executions_in_error_jinja: "{{ executions(None, None, 'ERROR') }}" all_executions_in_error_yaql_with_kw: <% executions(state => ERROR) %> all_executions_in_error_jinja_with_kw: "{{ executions(state='ERROR') }}" all_executions_filtered_date_jinja: "{{ executions(to_time="2016-12-01 15:01:00") }}" Environment ^^^^^^^^^^^ Environment info is available by **env()**. It is passed when user submits workflow execution. It contains variables specified by user. Global ^^^^^^ Global variables are available by **global(variable_name)**. If the variable doesn't exist than None will be returned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/doc/source/user/wf_namespaces.rst0000644000175000017500000001230700000000000022506 0ustar00coreycorey00000000000000Workflow namespaces =================== General ------- Mistral allows creating workflows within a namespace. So it is possible to create many workflows with the same name as long as they are in different namespaces. This is useful when a user already has many workflows that are connected to each other (some are sub workflows of others) and one of the workflow names is already in use and the user does not want to edit that workflow and all the ones referencing it or combine them into a workbook. This is possible because the namespace is not a part of the Mistral workflow language. If one wants to use a namespace she needs to provide an additional parameter to a corresponding operation run via REST API or CLI. If it's not provided, Mistral will be operating within the default namespace. REST API parameters ------------------- In order to use namespaces Mistral a number of REST API methods have the optional **namespace** parameter: * create workflow definition within a namespace:: POST /v2/workflows?namespace= * delete workflow definition within a namespace:: DELETE /v2/workflows/?namespace= * get a workflow definition within a namespace:: GET /v2/workflows/?namespace= * get all workflow definitions within a given namespace:: GET /v2/workflows?namespace= * update a workflow definition within a given namespace:: PUT /v2/workflows?namespace= * create an execution of a workflow that belongs to a non-default namespace:: POST /v2/executions { "workflow_name": "", "workflow_namespace": "", ... } Resolving a workflow definition ------------------------------- It's important to understand how Mistral resolves a workflow definition taking namespaces into account when running workflows and how namespaces work in case of workflow hierarchies. The rules are the following: * If a user launches a workflow via API (or CLI) then the workflow name and the corresponding namespace are provided explicitly so Mistral will look for a workflow definition with the given name under the provided namespace. If a namespace is not specified then the default namespace (empty namespace value) will be used. If Mistral doesn't find a workflow definition with the given name and namespace it will return an error response. * If a workflow is launched as a sub workflow, i.e. it has a parent task in a different workflow, then Mistral uses the namespace of the parent workflow to resolve a workflow definition. In other words, Mistral propagates namespace to its child workflows. However, **if a workflow definition does not exist in the namespace of the parent workflow then Mistral will try to find it in the default namespace.** This is different from the previous case when a workflow is launched via API, Mistral would return an error instead of trying to find a workflow definition in the default namespace. * Workflows declared as part of workbooks are always located in the default namespace. To illustrate how this all works let's look at the following workflow definitions: :: --- version: '2.0' wf1: tasks: t1: workflow: wf2 :: --- version: '2.0' wf2: tasks: t2: workflow: wf3 :: --- version: '2.0' wf3: tasks: t3: action: std.noop :: --- version: '2.0' wf3: tasks: should_not_run: action: std.fail So the call chain looks like this: .. code-block:: console wf1 -> wf2 -> wf3 However, notice that we have two workflows with the name "wf3". Let's assume that these workflow definitions are uploaded to Mistral under these namespaces: +----+---------------------+-----------+ | ID | name | namespace | +----+---------------------+-----------+ | 1 | wf1 | abc | +----+---------------------+-----------+ | 2 | wf2 | | +----+---------------------+-----------+ | 3 | wf3 | abc | +----+---------------------+-----------+ | 4 | wf3 | | +----+---------------------+-----------+ And we create a workflow execution like this via API: .. code-block:: console POST /v2/executions { "workflow_name": "wf1", "workflow_namespace": "abc" } In this case, Mistral will: * Find "wf1" in the namespace "abc" (it doesn't exist in the default namespace anyway) * Try to find "wf2" in the namespace "abc" and since it doesn't exist there Mistral will find it in the default namespace * Find "wf3" in the namespace "abc" because it is propagated from "wf1" However, if we launch a workflow like this: .. code-block:: console POST /v2/executions { "workflow_name": "wf2" } We'll get the call chain .. code-block:: console wf2 -> wf3 And both workflow definitions will be taken from the default namespace because a non-default namespace wasn't provided to the endpoint. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.097567 mistral-10.0.0.0b3/etc/0000755000175000017500000000000000000000000014666 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/etc/README.mistral.conf0000644000175000017500000000024700000000000020147 0ustar00coreycorey00000000000000The mistral.conf sample file is no longer generated and maintained in Trunk. To generate your own version of mistral.conf, use the following command: tox -egenconfig././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/etc/event_definitions.yml.sample0000644000175000017500000000027200000000000022406 0ustar00coreycorey00000000000000- event_types: - compute.instance.create.* properties: resource_id: <% $.payload.instance_id %> project_id: <% $.context.project_id %> user_id: <% $.context.user_id %> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/etc/logging.conf.sample0000644000175000017500000000111700000000000020443 0ustar00coreycorey00000000000000[loggers] keys=root [handlers] keys=consoleHandler, fileHandler [formatters] keys=verboseFormatter, simpleFormatter [logger_root] level=DEBUG handlers=consoleHandler, fileHandler [handler_consoleHandler] class=StreamHandler level=INFO formatter=simpleFormatter args=(sys.stdout,) [handler_fileHandler] class=FileHandler level=INFO formatter=verboseFormatter args=("/var/log/mistral.log",) [formatter_verboseFormatter] format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s datefmt= [formatter_simpleFormatter] format=%(asctime)s %(levelname)s [-] %(message)s datefmt= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/etc/logging.conf.sample.rotating0000644000175000017500000000117100000000000022271 0ustar00coreycorey00000000000000[loggers] keys=root [handlers] keys=consoleHandler, fileHandler [formatters] keys=verboseFormatter, simpleFormatter [logger_root] level=DEBUG handlers=consoleHandler, fileHandler [handler_consoleHandler] class=StreamHandler level=INFO formatter=simpleFormatter args=(sys.stdout,) [handler_fileHandler] class=logging.handlers.RotatingFileHandler level=INFO formatter=verboseFormatter args=("/var/log/mistral.log", "a", 10485760, 5) [formatter_verboseFormatter] format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s datefmt= [formatter_simpleFormatter] format=%(asctime)s %(levelname)s [-] %(message)s datefmt= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/etc/policy.json0000644000175000017500000000000300000000000017051 0ustar00coreycorey00000000000000{} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/etc/wf_trace_logging.conf.sample0000644000175000017500000000234300000000000022317 0ustar00coreycorey00000000000000[loggers] keys=workflow_trace,profiler_trace,root [handlers] keys=consoleHandler, wfTraceFileHandler, profilerFileHandler, fileHandler [formatters] keys=wfFormatter, profilerFormatter, simpleFormatter, verboseFormatter [logger_workflow_trace] level=INFO handlers=consoleHandler, wfTraceFileHandler qualname=workflow_trace [logger_profiler_trace] level=INFO handlers=profilerFileHandler qualname=profiler_trace [logger_root] level=INFO handlers=fileHandler [handler_fileHandler] class=FileHandler level=INFO formatter=verboseFormatter args=("/var/log/mistral.log",) [handler_consoleHandler] class=StreamHandler level=INFO formatter=simpleFormatter args=(sys.stdout,) [handler_wfTraceFileHandler] class=FileHandler level=INFO formatter=wfFormatter args=("/var/log/mistral_wf_trace.log",) [handler_profilerFileHandler] class=FileHandler level=INFO formatter=profilerFormatter args=("/var/log/mistral_osprofile.log",) [formatter_verboseFormatter] format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s datefmt= [formatter_simpleFormatter] format=%(asctime)s %(levelname)s [-] %(message)s datefmt= [formatter_wfFormatter] format=%(asctime)s WF [-] %(message)s datefmt= [formatter_profilerFormatter] format=%(message)s datefmt= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/etc/wf_trace_logging.conf.sample.rotating0000644000175000017500000000254100000000000024145 0ustar00coreycorey00000000000000[loggers] keys=workflow_trace,profiler_trace,root [handlers] keys=consoleHandler, wfTraceFileHandler, profilerFileHandler, fileHandler [formatters] keys=wfFormatter, profilerFormatter, simpleFormatter, verboseFormatter [logger_workflow_trace] level=INFO handlers=consoleHandler, wfTraceFileHandler qualname=workflow_trace [logger_profiler_trace] level=INFO handlers=profilerFileHandler qualname=profiler_trace [logger_root] level=INFO handlers=fileHandler [handler_fileHandler] class=logging.handlers.RotatingFileHandler level=INFO formatter=verboseFormatter args=("/var/log/mistral.log", "a", 10485760, 5) [handler_consoleHandler] class=StreamHandler level=INFO formatter=simpleFormatter args=(sys.stdout,) [handler_wfTraceFileHandler] class=logging.handlers.RotatingFileHandler level=INFO formatter=wfFormatter args=("/var/log/mistral_wf_trace.log", "a", 10485760, 5) [handler_profilerFileHandler] class=logging.handlers.RotatingFileHandler level=INFO formatter=profilerFormatter args=("/var/log/mistral_osprofile.log", "a", 10485760, 5) [formatter_verboseFormatter] format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s datefmt= [formatter_simpleFormatter] format=%(asctime)s %(levelname)s [-] %(message)s datefmt= [formatter_wfFormatter] format=%(asctime)s WF [-] %(message)s datefmt= [formatter_profilerFormatter] format=%(message)s datefmt= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/lower-constraints.txt0000644000175000017500000000206300000000000020352 0ustar00coreycorey00000000000000alembic==0.9.6 Babel==2.3.4 cachetools==2.0.0 coverage==4.0 croniter==0.3.4 doc8==0.6.0 dogpile.cache==0.6.2 eventlet==0.20.0 fixtures==3.0.0 Jinja2==2.10 jsonschema==2.6.0 keystonemiddleware==4.18.0 kombu==4.6.1 mistral-lib==1.4.0 mock==2.0.0 networkx==2.3;python_version>='3.4' networkx==1.10;python_version<'3.0' nose==1.3.7 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.20.0 oslo.db==4.40.0 oslo.i18n==3.15.3 oslo.log==3.36.0 oslo.messaging==5.29.0 oslo.middleware==3.31.0 oslo.policy==1.30.0 oslo.serialization==2.21.1 oslo.service==1.24.0 oslo.utils==3.37.0 oslosphinx==4.7.0 oslotest==3.2.0 osprofiler==1.4.0 paramiko==2.4.1 pbr==2.0.0 pecan==1.2.1 Pygments==2.2.0 PyJWT==1.5 PyYAML==5.1 reno==2.5.0 requests-mock==1.2.0 requests==2.14.2 six==1.10.0 Sphinx==1.8.0 sphinxcontrib-httpdomain==1.3.0 sphinxcontrib-pecanwsme==0.10.0 sphinxcontrib-websupport==1.0.1 SQLAlchemy==1.2.5 stestr==2.0.0 stevedore==1.20.0 tempest==17.1.0 tenacity==5.0.1 testtools==2.2.0 tooz==1.58.0 unittest2==1.1.0 WebOb==1.7.1 WebTest==2.0.27 WSME==0.8.0 yaql==1.1.3 zake==0.1.6 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.101567 mistral-10.0.0.0b3/mistral/0000755000175000017500000000000000000000000015566 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/__init__.py0000644000175000017500000000000000000000000017665 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/_i18n.py0000644000175000017500000000150500000000000017057 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html """ import oslo_i18n DOMAIN = 'mistral' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.101567 mistral-10.0.0.0b3/mistral/actions/0000755000175000017500000000000000000000000017226 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/actions/__init__.py0000644000175000017500000000000000000000000021325 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/actions/action_factory.py0000644000175000017500000000170200000000000022604 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import importutils def construct_action_class(action_class_str, attributes): # Rebuild action class and restore attributes. action_class = importutils.import_class(action_class_str) unique_action_class = type( action_class.__name__, (action_class,), attributes ) return unique_action_class ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/actions/std_actions.py0000644000175000017500000004246400000000000022124 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2014 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from email import header from email.mime import multipart from email.mime import text import smtplib import time from oslo_log import log as logging import requests import six from mistral import exceptions as exc from mistral import utils from mistral.utils import javascript from mistral.utils import ssh_utils from mistral_lib import actions LOG = logging.getLogger(__name__) class EchoAction(actions.Action): """Echo action. This action just returns a configured value as a result without doing anything else. The value of such action implementation is that it can be used in development (for testing), demonstration and designing of workflows themselves where echo action can play the role of temporary stub. """ def __init__(self, output, delay=0): super(EchoAction, self).__init__() self.output = output try: self._delay = float(delay) self._delay = 0 if self._delay < 0 else self._delay except ValueError: self._delay = 0 def run(self, context): LOG.info( 'Running echo action [output=%s, delay=%s]', self.output, self._delay ) time.sleep(self._delay) return self.output def test(self, context): return 'Echo' class NoOpAction(actions.Action): """No-operation action. This action does nothing. It can be mostly useful for testing and debugging purposes. """ def run(self, context): LOG.info('Running no-op action') return None def test(self, context): return None class AsyncNoOpAction(NoOpAction): """Asynchronous no-operation action.""" def is_sync(self): return False class FailAction(actions.Action): """'Always fail' action. If you pass the `error_data` parameter, this action will be failed and return this data as error data. Otherwise, the action just throws an instance of ActionException. This behavior is useful in a number of cases, especially if we need to test a scenario where some of workflow tasks fail. :param error_data: Action will be failed with this data """ def __init__(self, error_data=None): super(FailAction, self).__init__() self.error_data = error_data def run(self, context): LOG.info('Running fail action.') if self.error_data: return actions.Result(error=self.error_data) raise exc.ActionException('Fail action expected exception.') def test(self, context): if self.error_data: return actions.Result(error=self.error_data) raise exc.ActionException('Fail action expected exception.') class HTTPAction(actions.Action): """HTTP action. :param url: URL for the new HTTP request. :param method: (optional, 'GET' by default) method for the new HTTP request. :param params: (optional) Dictionary or bytes to be sent in the query string for the HTTP request. :param body: (optional) Dictionary, bytes, or file-like object to send in the body of the HTTP request. :param json: (optional) A JSON serializable Python object to send in the body of the HTTP request. :param headers: (optional) Dictionary of HTTP Headers to send with the HTTP request. :param cookies: (optional) Dict or CookieJar object to send with the HTTP request. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request in seconds. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. """ def __init__(self, url, method="GET", params=None, body=None, json=None, headers=None, cookies=None, auth=None, timeout=None, allow_redirects=None, proxies=None, verify=None): super(HTTPAction, self).__init__() if auth and len(auth.split(':')) == 2: self.auth = (auth.split(':')[0], auth.split(':')[1]) else: self.auth = auth if isinstance(headers, dict): for key, val in headers.items(): if isinstance(val, (six.integer_types, float)): headers[key] = str(val) if body and json: raise exc.ActionException( "Only one of the parameters 'json' and 'body' can be passed" ) self.url = url self.method = method self.params = params self.body = utils.to_json_str(body) if isinstance(body, dict) else body self.json = json self.headers = headers self.cookies = cookies self.timeout = timeout self.allow_redirects = allow_redirects self.proxies = proxies self.verify = verify def run(self, context): LOG.info( "Running HTTP action " "[url=%s, method=%s, params=%s, body=%s, json=%s," " headers=%s, cookies=%s, auth=%s, timeout=%s," " allow_redirects=%s, proxies=%s, verify=%s]", self.url, self.method, self.params, self.body, self.json, self.headers, self.cookies, self.auth, self.timeout, self.allow_redirects, self.proxies, self.verify ) try: url_data = six.moves.urllib.parse.urlsplit(self.url) if 'https' == url_data.scheme: action_verify = self.verify else: action_verify = None resp = requests.request( self.method, self.url, params=self.params, data=self.body, json=self.json, headers=self.headers, cookies=self.cookies, auth=self.auth, timeout=self.timeout, allow_redirects=self.allow_redirects, proxies=self.proxies, verify=action_verify ) except Exception as e: LOG.exception( "Failed to send HTTP request for action execution: %s", context.execution.action_execution_id ) raise exc.ActionException("Failed to send HTTP request: %s" % e) LOG.info( "HTTP action response:\n%s\n%s", resp.status_code, resp.content ) # Represent important resp data as a dictionary. try: content = resp.json(encoding=resp.encoding) except Exception: LOG.debug("HTTP action response is not json.") content = resp.content if content and resp.encoding not in (None, 'utf-8'): content = content.decode(resp.encoding).encode('utf-8') _result = { 'content': content, 'status': resp.status_code, 'headers': dict(resp.headers.items()), 'url': resp.url, 'history': resp.history, 'encoding': resp.encoding, 'reason': resp.reason, 'cookies': dict(resp.cookies.items()), 'elapsed': resp.elapsed.total_seconds() } if resp.status_code not in range(200, 307): return actions.Result(error=_result) return _result def test(self, context): # TODO(rakhmerov): Implement. return None class MistralHTTPAction(HTTPAction): def run(self, context): self.headers = self.headers or {} exec_ctx = context.execution self.headers.update({ 'Mistral-Workflow-Name': exec_ctx.workflow_name, 'Mistral-Workflow-Execution-Id': exec_ctx.workflow_execution_id, 'Mistral-Task-Id': exec_ctx.task_execution_id, 'Mistral-Action-Execution-Id': exec_ctx.action_execution_id, 'Mistral-Callback-URL': exec_ctx.callback_url, }) return super(MistralHTTPAction, self).run(context) def is_sync(self): return False def test(self, context): return None class SendEmailAction(actions.Action): def __init__(self, from_addr, to_addrs, smtp_server, reply_to=None, cc_addrs=None, bcc_addrs=None, smtp_password=None, subject=None, body=None, html_body=None): super(SendEmailAction, self).__init__() # TODO(dzimine): validate parameters # Task invocation parameters. self.to = to_addrs self.cc = cc_addrs or [] self.bcc = bcc_addrs or [] self.reply_to = reply_to or [] self.subject = subject or "" self.body = body or "" self.html_body = html_body # Action provider settings. self.smtp_server = smtp_server self.sender = from_addr self.password = smtp_password def run(self, context): LOG.info( "Sending email message " "[from=%s, to=%s, reply_to=%s, cc=%s, bcc=%s, subject=%s, " "using smtp=%s, body=%s...]", self.sender, self.to, self.reply_to, self.cc, self.bcc, self.subject, self.smtp_server, self.body[:128] ) if not self.html_body: message = text.MIMEText(self.body, _charset='utf-8') else: message = multipart.MIMEMultipart('alternative') message.attach(text.MIMEText(self.body, 'plain', _charset='utf-8')) message.attach(text.MIMEText(self.html_body, 'html', _charset='utf-8')) message['Subject'] = header.Header(self.subject, 'utf-8') message['From'] = self.sender message['Reply-To'] = header.Header(', '.join(self.reply_to)) message['To'] = ', '.join(self.to) if self.cc: message['cc'] = ', '.join(self.cc) rcpt = self.cc + self.bcc + self.to try: s = smtplib.SMTP(self.smtp_server) if self.password is not None: # Sequence to request TLS connection and log in (RFC-2487). s.ehlo() s.starttls() s.ehlo() s.login(self.sender, self.password) s.sendmail(from_addr=self.sender, to_addrs=rcpt, msg=message.as_string()) except (smtplib.SMTPException, IOError) as e: raise exc.ActionException("Failed to send an email message: %s" % e) def test(self, context): # Just logging the operation since this action is not supposed # to return a result. LOG.info( "Sending email message " "[from=%s, to=%s, reply_to=%s, cc=%s, bcc=%s, subject=%s, " " using smtp=%s, body=%s...]", self.sender, self.to, self.reply_to, self.cc, self.bcc, self.subject, self.smtp_server, self.body[:128] ) class SSHAction(actions.Action): """Runs Secure Shell (SSH) command on provided single or multiple hosts. It is allowed to provide either a single host or a list of hosts in action parameter 'host'. In case of a single host the action result will be a single value, otherwise a list of results provided in the same order as provided hosts. """ @property def _execute_cmd_method(self): return ssh_utils.execute_command def __init__(self, cmd, host, username, password="", private_key_filename=None, private_key=None): super(SSHAction, self).__init__() self.cmd = cmd self.host = host self.username = username self.password = password self.private_key_filename = private_key_filename self.private_key = private_key self.params = { 'cmd': self.cmd, 'host': self.host, 'username': self.username, 'password': self.password, 'private_key_filename': self.private_key_filename, 'private_key': self.private_key } def run(self, context): def raise_exc(parent_exc=None): message = ("Failed to execute ssh cmd " "'%s' on %s" % (self.cmd, self.host)) # We suppress the actual parent error messages in favor of # more generic ones as we might be leaking information to the CLI if parent_exc: # The full error message needs to be logged regardless LOG.exception(message + " Exception: %s", str(parent_exc)) raise exc.ActionException(message) try: results = [] if not isinstance(self.host, list): self.host = [self.host] for host_name in self.host: self.params['host'] = host_name status_code, result = self._execute_cmd_method(**self.params) if status_code > 0: return raise_exc() else: results.append(result) if len(results) > 1: return results return result except Exception as e: return raise_exc(parent_exc=e) def test(self, context): return utils.to_json_str(self.params) class SSHProxiedAction(SSHAction): @property def _execute_cmd_method(self): return ssh_utils.execute_command_via_gateway def __init__(self, cmd, host, username, private_key_filename, gateway_host, gateway_username=None, password=None, proxy_command=None, private_key=None): super(SSHProxiedAction, self).__init__( cmd, host, username, password, private_key_filename, private_key ) self.gateway_host = gateway_host self.gateway_username = gateway_username self.params.update( { 'gateway_host': gateway_host, 'gateway_username': gateway_username, 'proxy_command': proxy_command } ) class JavaScriptAction(actions.Action): """Evaluates given JavaScript. """ def __init__(self, script, context=None): """Context here refers to a javasctript context Not the usual mistral context. That is passed during the run method """ super(JavaScriptAction, self).__init__() self.script = script self.js_context = context def run(self, context): try: script = """function f() { %s } f() """ % self.script return javascript.evaluate(script, self.js_context) except Exception as e: raise exc.ActionException("JavaScriptAction failed: %s" % str(e)) def test(self, context): return self.script class SleepAction(actions.Action): """Sleep action. This action sleeps for given amount of seconds. It can be mostly useful for testing and debugging purposes. """ def __init__(self, seconds=1): super(SleepAction, self).__init__() try: self._seconds = int(seconds) self._seconds = 0 if self._seconds < 0 else self._seconds except ValueError: self._seconds = 0 def run(self, context): LOG.info('Running sleep action [seconds=%s]', self._seconds) time.sleep(self._seconds) return None def test(self, context): time.sleep(1) return None class TestDictAction(actions.Action): """Generates test dict.""" def __init__(self, size=0, key_prefix='', val=''): super(TestDictAction, self).__init__() self.size = size self.key_prefix = key_prefix self.val = val def run(self, context): LOG.info( 'Running test_dict action [size=%s, key_prefix=%s, val=%s]', self.size, self.key_prefix, self.val ) res = {} for i in range(self.size): res['%s%s' % (self.key_prefix, i)] = self.val return res def test(self, context): return {} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.101567 mistral-10.0.0.0b3/mistral/api/0000755000175000017500000000000000000000000016337 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/__init__.py0000644000175000017500000000000000000000000020436 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/access_control.py0000644000175000017500000001055300000000000021716 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Access Control API server.""" from keystonemiddleware import auth_token from oslo_config import cfg from oslo_policy import policy from mistral import exceptions as exc from mistral import policies CONF = cfg.CONF _ENFORCER = None def setup(app): if cfg.CONF.pecan.auth_enable and cfg.CONF.auth_type == 'keystone': conf = dict(cfg.CONF.keystone_authtoken) # Change auth decisions of requests to the app itself. conf.update({'delay_auth_decision': True}) # NOTE(rakhmerov): Policy enforcement works only if Keystone # authentication is enabled. No support for other authentication # types at this point. _ensure_enforcer_initialization() return auth_token.AuthProtocol(app, conf) else: return app def enforce(action, context, target=None, do_raise=True, exc=exc.NotAllowedException): """Verifies that the action is valid on the target in this context. :param action: String, representing the action to be checked. This should be colon separated for clarity. i.e. ``workflows:create`` :param context: Mistral context. :param target: Dictionary, representing the object of the action. For object creation, this should be a dictionary representing the location of the object. e.g. ``{'project_id': context.project_id}`` :param do_raise: if True (the default), raises specified exception. :param exc: Exception to be raised if not authorized. Default is mistral.exceptions.NotAllowedException. :return: returns True if authorized and False if not authorized and do_raise is False. """ target_obj = { 'project_id': context.project_id, 'user_id': context.user_id, } target_obj.update(target or {}) policy_context = context.to_policy_values() # Because policy.json or policy.yaml example in Mistral repo still uses # the rule 'is_admin: True', we insert 'is_admin' key to the default # policy values. policy_context['is_admin'] = context.is_admin _ensure_enforcer_initialization() return _ENFORCER.authorize( action, target_obj, policy_context, do_raise=do_raise, exc=exc ) def get_enforcer(): """Entrypoint that must return the raw oslo.policy enforcer obj. This is utilized by the command-line policy tools. :returns: :class:`oslo_policy.policy.Enforcer` """ # Here we pass an empty list of arguments because there aren't any # arguments that oslo.config or oslo.policy shouldn't already understand # from the CONF object. This makes things easier here because we don't have # to parse arguments passed in from the command line and remove unexpected # arguments before building a Config object. CONF([], project='mistral') enforcer = policy.Enforcer(CONF) enforcer.register_defaults(policies.list_rules()) enforcer.load_rules() return enforcer def _ensure_enforcer_initialization(): global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(cfg.CONF) _ENFORCER.register_defaults(policies.list_rules()) _ENFORCER.load_rules() def get_limited_to(headers): """Return the user and project the request should be limited to. :param headers: HTTP headers dictionary :return: A tuple of (user, project), set to None if there's no limit on one of these. """ return headers.get('X-User-Id'), headers.get('X-Project-Id') def get_limited_to_project(headers): """Return the project the request should be limited to. :param headers: HTTP headers dictionary :return: A project, or None if there's no limit on it. """ return get_limited_to(headers)[1] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/app.py0000644000175000017500000000576000000000000017501 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import oslo_middleware.cors as cors_middleware import oslo_middleware.http_proxy_to_wsgi as http_proxy_to_wsgi_middleware import osprofiler.web import pecan from mistral.api import access_control from mistral import config as m_config from mistral import context as ctx from mistral.db.v2 import api as db_api_v2 from mistral.rpc import base as rpc from mistral.service import coordination from mistral.services import periodic def get_pecan_config(): # Set up the pecan configuration. opts = cfg.CONF.pecan cfg_dict = { "app": { "root": opts.root, "modules": opts.modules, "debug": opts.debug, "auth_enable": opts.auth_enable } } return pecan.configuration.conf_from_dict(cfg_dict) def setup_app(config=None): if not config: config = get_pecan_config() m_config.set_config_defaults() app_conf = dict(config.app) db_api_v2.setup_db() # TODO(rakhmerov): Why do we run cron triggers in the API layer? # Should we move it to engine?s if cfg.CONF.cron_trigger.enabled: periodic.setup() coordination.Service('api_group').register_membership() app = pecan.make_app( app_conf.pop('root'), hooks=lambda: [ctx.AuthHook(), ctx.ContextHook()], logging=getattr(config, 'logging', {}), **app_conf ) # Set up access control. app = access_control.setup(app) # TODO(rakhmerov): need to get rid of this call. # Set up RPC related flags in config rpc.get_transport() # Set up profiler. if cfg.CONF.profiler.enabled: app = osprofiler.web.WsgiMiddleware( app, hmac_keys=cfg.CONF.profiler.hmac_keys, enabled=cfg.CONF.profiler.enabled ) # Create HTTPProxyToWSGI wrapper app = http_proxy_to_wsgi_middleware.HTTPProxyToWSGI(app, cfg.CONF) # Create a CORS wrapper, and attach mistral-specific defaults that must be # included in all CORS responses. return cors_middleware.CORS(app, cfg.CONF) def init_wsgi(): # By default, oslo.config parses the CLI args if no args is provided. # As a result, invoking this wsgi script from gunicorn leads to the error # with argparse complaining that the CLI options have already been parsed. m_config.parse_args(args=[]) return setup_app() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.101567 mistral-10.0.0.0b3/mistral/api/controllers/0000755000175000017500000000000000000000000020705 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/__init__.py0000644000175000017500000000000000000000000023004 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/resource.py0000644000175000017500000001116100000000000023106 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from wsme import types as wtypes from mistral_lib import utils class Resource(wtypes.Base): """REST API Resource.""" _wsme_attributes = [] def to_dict(self): d = {} for attr in self._wsme_attributes: attr_val = getattr(self, attr.name) if not isinstance(attr_val, wtypes.UnsetType): d[attr.name] = attr_val return d @classmethod def from_tuples(cls, tuple_iterator): obj = cls() for col_name, col_val in tuple_iterator: if hasattr(obj, col_name): # Convert all datetime values to strings. setattr(obj, col_name, utils.datetime_to_str(col_val)) return obj @classmethod def from_dict(cls, d): return cls.from_tuples(d.items()) @classmethod def from_db_model(cls, db_model): return cls.from_tuples(db_model.iter_columns()) def __str__(self): """WSME based implementation of __str__.""" res = "%s [" % type(self).__name__ first = True for attr in self._wsme_attributes: if not first: res += ', ' else: first = False res += "%s='%s'" % (attr.name, getattr(self, attr.name)) return res + "]" def to_json(self): return json.dumps(self.to_dict()) @classmethod def get_fields(cls): obj = cls() return [attr.name for attr in obj._wsme_attributes] class ResourceList(Resource): """Resource containing the list of other resources.""" next = wtypes.text """A link to retrieve the next subset of the resource list""" @property def collection(self): return getattr(self, self._type) @classmethod def convert_with_links(cls, resources, limit, url=None, fields=None, **kwargs): resource_list = cls() setattr(resource_list, resource_list._type, resources) resource_list.next = resource_list.get_next( limit, url=url, fields=fields, **kwargs ) return resource_list def has_next(self, limit): """Return whether resources has more items.""" return len(self.collection) and len(self.collection) == limit def get_next(self, limit, url=None, fields=None, **kwargs): """Return a link to the next subset of the resources.""" if not self.has_next(limit): return wtypes.Unset q_args = '' for key, value in kwargs.items(): if isinstance(value, dict): q_args += '%s=%s:%s&' % \ (key, list(value.keys())[0], list(value.values())[0]) else: q_args += '%s=%s&' % (key, value) resource_args = ( '?%(args)slimit=%(limit)d&marker=%(marker)s' % { 'args': q_args, 'limit': limit, 'marker': self.collection[-1].id } ) # Fields is handled specially here, we can move it above when it's # supported by all resources query. if fields: resource_args += '&fields=%s' % fields next_link = "%(host_url)s/v2/%(resource)s%(args)s" % { 'host_url': url, 'resource': self._type, 'args': resource_args } return next_link def to_dict(self): d = {} for attr in self._wsme_attributes: attr_val = getattr(self, attr.name) if isinstance(attr_val, list): if isinstance(attr_val[0], Resource): d[attr.name] = [v.to_dict() for v in attr_val] elif not isinstance(attr_val, wtypes.UnsetType): d[attr.name] = attr_val return d class Link(Resource): """Web link.""" href = wtypes.text target = wtypes.text rel = wtypes.text @classmethod def sample(cls): return cls(href='http://example.com/here', target='here', rel='self') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/root.py0000644000175000017500000000436300000000000022250 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging import pecan from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api.controllers import resource from mistral.api.controllers.v2 import root as v2_root LOG = logging.getLogger(__name__) API_STATUS = wtypes.Enum(str, 'SUPPORTED', 'CURRENT', 'DEPRECATED') class APIVersion(resource.Resource): """An API Version.""" id = wtypes.text "The version identifier." status = API_STATUS "The status of the API (SUPPORTED, CURRENT or DEPRECATED)." links = wtypes.ArrayType(resource.Link) "The link to the versioned API." @classmethod def sample(cls): return cls( id='v1.0', status='CURRENT', links=[ resource.Link(target_name='v1', rel="self", href='http://example.com:9777/v1') ] ) class APIVersions(resource.Resource): """API Versions.""" versions = wtypes.ArrayType(APIVersion) @classmethod def sample(cls): v2 = APIVersion(id='v2.0', status='CURRENT', rel="self", href='http://example.com:9777/v2') return cls(versions=[v2]) class RootController(object): v2 = v2_root.Controller() @wsme_pecan.wsexpose(APIVersions) def index(self): LOG.debug("Fetching API versions.") host_url_v2 = '%s/%s' % (pecan.request.application_url, 'v2') api_v2 = APIVersion( id='v2.0', status='CURRENT', links=[resource.Link(href=host_url_v2, target='v2', rel="self",)] ) return APIVersions(versions=[api_v2]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1055672 mistral-10.0.0.0b3/mistral/api/controllers/v2/0000755000175000017500000000000000000000000021234 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/__init__.py0000644000175000017500000000000000000000000023333 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/action.py0000644000175000017500000002356300000000000023074 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 Huawei Technologies Co., Ltd. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging import pecan from pecan import hooks from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral.api.controllers.v2 import validation from mistral.api.hooks import content_type as ct_hook from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.services import actions from mistral.utils import filter_utils from mistral.utils import rest_utils LOG = logging.getLogger(__name__) class ActionsController(rest.RestController, hooks.HookController): # TODO(nmakhotkin): Have a discussion with pecan/WSME folks in order # to have requests and response of different content types. Then # delete ContentTypeHook. __hooks__ = [ct_hook.ContentTypeHook("application/json", ['POST', 'PUT'])] validate = validation.SpecValidationController( spec_parser.get_action_list_spec_from_yaml) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Action, wtypes.text, wtypes.text) def get(self, identifier, namespace=''): """Return the named action. :param identifier: ID or name of the Action to get. :param namespace: The namespace of the action. """ acl.enforce('actions:get', context.ctx()) LOG.debug("Fetch action [identifier=%s]", identifier) # Use retries to prevent possible failures. db_model = rest_utils.rest_retry_on_db_error( db_api.get_action_definition )(identifier, namespace=namespace) return resources.Action.from_db_model(db_model) @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") def put(self, identifier=None, namespace=''): """Update one or more actions. :param identifier: Optional. If provided, it's UUID or name of an action. Only one action can be updated with identifier param. :param namespace: Optional. If provided, it's the namespace that the action is under. NOTE: This text is allowed to have definitions of multiple actions. In this case they all will be updated. """ acl.enforce('actions:update', context.ctx()) definition = pecan.request.text LOG.debug("Update action(s) [definition=%s]", definition) namespace = namespace or '' scope = pecan.request.GET.get('scope', 'private') resources.Action.validate_scope(scope) if scope == 'public': acl.enforce('actions:publicize', context.ctx()) @rest_utils.rest_retry_on_db_error def _update_actions(): with db_api.transaction(): return actions.update_actions( definition, scope=scope, identifier=identifier, namespace=namespace ) db_acts = _update_actions() action_list = [ resources.Action.from_db_model(db_act) for db_act in db_acts ] return resources.Actions(actions=action_list).to_json() @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") def post(self, namespace=''): """Create a new action. :param namespace: Optional. The namespace to create the ad-hoc action in. actions with the same name can be added to a given project if they are in two different namespaces. (default namespace is '') NOTE: This text is allowed to have definitions of multiple actions. In this case they all will be created. """ acl.enforce('actions:create', context.ctx()) namespace = namespace or '' definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') pecan.response.status = 201 resources.Action.validate_scope(scope) if scope == 'public': acl.enforce('actions:publicize', context.ctx()) LOG.debug("Create action(s) [definition=%s]", definition) @rest_utils.rest_retry_on_db_error def _create_action_definitions(): with db_api.transaction(): return actions.create_actions(definition, scope=scope, namespace=namespace) db_acts = _create_action_definitions() action_list = [ resources.Action.from_db_model(db_act) for db_act in db_acts ] return resources.Actions(actions=action_list).to_json() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204) def delete(self, identifier, namespace=''): """Delete the named action. :param identifier: Name or UUID of the action to delete. :param namespace: The namespace of which the action is in. """ acl.enforce('actions:delete', context.ctx()) LOG.debug("Delete action [identifier=%s]", identifier) @rest_utils.rest_retry_on_db_error def _delete_action_definition(): with db_api.transaction(): db_model = db_api.get_action_definition(identifier, namespace=namespace) if db_model.is_system: msg = "Attempt to delete a system action: %s" % identifier raise exc.DataAccessException(msg) db_api.delete_action_definition(identifier, namespace=namespace) _delete_action_definition() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Actions, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, resources.SCOPE_TYPES, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_keys='name', sort_dirs='asc', fields='', created_at=None, name=None, scope=None, tags=None, updated_at=None, description=None, definition=None, is_system=None, input=None, namespace=''): """Return all actions. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: name. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: asc. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param scope: Optional. Keep only resources with a specific scope. :param definition: Optional. Keep only resources with a specific definition. :param is_system: Optional. Keep only system actions or ad-hoc actions (if False). :param input: Optional. Keep only resources with a specific input. :param description: Optional. Keep only resources with a specific description. :param tags: Optional. Keep only resources containing specific tags. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. :param namespace: Optional. The namespace of the action. """ acl.enforce('actions:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, name=name, scope=scope, tags=tags, updated_at=updated_at, description=description, definition=definition, is_system=is_system, input=input, namespace=namespace ) LOG.debug("Fetch actions. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs, filters) return rest_utils.get_all( resources.Actions, resources.Action, db_api.get_action_definitions, db_api.get_action_definition_by_id, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/action_execution.py0000644000175000017500000004427300000000000025160 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.rpc import clients as rpc from mistral.utils import filter_utils from mistral.utils import rest_utils from mistral.workflow import states from mistral_lib import actions as ml_actions LOG = logging.getLogger(__name__) SUPPORTED_TRANSITION_STATES = [ states.SUCCESS, states.ERROR, states.CANCELLED, states.PAUSED, states.RUNNING ] def _load_deferred_output_field(action_ex): # We need to refer to this lazy-load field explicitly in # order to make sure that it is correctly loaded. hasattr(action_ex, 'output') # Use retries to prevent possible failures. @rest_utils.rest_retry_on_db_error def _get_action_execution(id): with db_api.transaction(): return _get_action_execution_resource(db_api.get_action_execution(id)) def _get_action_execution_resource(action_ex): _load_deferred_output_field(action_ex) return _get_action_execution_resource_for_list(action_ex) def _get_action_execution_resource_for_list(action_ex): # TODO(nmakhotkin): Get rid of using dicts for constructing resources. # TODO(nmakhotkin): Use db_model for this instead. res = resources.ActionExecution.from_db_model(action_ex) task_name = (action_ex.task_execution.name if action_ex.task_execution else None) setattr(res, 'task_name', task_name) return res def _get_action_executions(task_execution_id=None, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', include_output=False, **filters): """Return all action executions. Where project_id is the same as the requester or project_id is different but the scope is public. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param filters: Optional. A list of filters to apply to the result. """ if task_execution_id: filters['task_execution_id'] = task_execution_id if include_output: resource_function = _get_action_execution_resource else: resource_function = _get_action_execution_resource_for_list return rest_utils.get_all( resources.ActionExecutions, resources.ActionExecution, db_api.get_action_executions, db_api.get_action_execution, resource_function=resource_function, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters ) class ActionExecutionsController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.ActionExecution, wtypes.text) def get(self, id): """Return the specified action_execution. :param id: UUID of action execution to retrieve """ acl.enforce('action_executions:get', context.ctx()) LOG.debug("Fetch action_execution [id=%s]", id) return _get_action_execution(id) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.ActionExecution, body=resources.ActionExecution, status_code=201) def post(self, action_ex): """Create new action_execution. :param action_ex: Action to execute """ acl.enforce('action_executions:create', context.ctx()) LOG.debug( "Create action_execution [action_execution=%s]", action_ex ) name = action_ex.name description = action_ex.description or None action_input = action_ex.input or {} params = action_ex.params or {} namespace = action_ex.workflow_namespace or '' if not name: raise exc.InputException( "Please provide at least action name to run action." ) values = rpc.get_engine_client().start_action( name, action_input, description=description, namespace=namespace, **params ) return resources.ActionExecution.from_dict(values) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( resources.ActionExecution, wtypes.text, body=resources.ActionExecution ) def put(self, id, action_ex): """Update the specified action_execution. :param id: UUID of action execution to update :param action_ex: Action execution for update """ acl.enforce('action_executions:update', context.ctx()) LOG.debug( "Update action_execution [id=%s, action_execution=%s]", id, action_ex ) if action_ex.state not in SUPPORTED_TRANSITION_STATES: raise exc.InvalidResultException( "Error. Expected one of %s, actual: %s" % ( SUPPORTED_TRANSITION_STATES, action_ex.state ) ) if states.is_completed(action_ex.state): output = action_ex.output if action_ex.state == states.SUCCESS: result = ml_actions.Result(data=output) elif action_ex.state == states.ERROR: if not output: output = 'Unknown error' result = ml_actions.Result(error=output) elif action_ex.state == states.CANCELLED: result = ml_actions.Result(cancel=True) values = rpc.get_engine_client().on_action_complete(id, result) if action_ex.state in [states.PAUSED, states.RUNNING]: state = action_ex.state values = rpc.get_engine_client().on_action_update(id, state) return resources.ActionExecution.from_dict(values) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.ActionExecutions, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, types.uuid, wtypes.text, wtypes.text, bool, types.jsontype, types.jsontype, types.jsontype, wtypes.text, bool) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', created_at=None, name=None, tags=None, updated_at=None, workflow_name=None, task_name=None, task_execution_id=None, state=None, state_info=None, accepted=None, input=None, output=None, params=None, description=None, include_output=False): """Return all tasks within the execution. Where project_id is the same as the requester or project_id is different but the scope is public. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param workflow_name: Optional. Keep only resources with a specific workflow name. :param task_name: Optional. Keep only resources with a specific task name. :param task_execution_id: Optional. Keep only resources within a specific task execution. :param state: Optional. Keep only resources with a specific state. :param state_info: Optional. Keep only resources with specific state information. :param accepted: Optional. Keep only resources which have been accepted or not. :param input: Optional. Keep only resources with a specific input. :param output: Optional. Keep only resources with a specific output. :param params: Optional. Keep only resources with specific parameters. :param description: Optional. Keep only resources with a specific description. :param tags: Optional. Keep only resources containing specific tags. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. :param include_output: Optional. Include the output for all executions in the list """ acl.enforce('action_executions:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, name=name, tags=tags, updated_at=updated_at, workflow_name=workflow_name, task_name=task_name, task_execution_id=task_execution_id, state=state, state_info=state_info, accepted=accepted, input=input, output=output, params=params, description=description ) LOG.debug( "Fetch action_executions. marker=%s, limit=%s, " "sort_keys=%s, sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs, filters ) return _get_action_executions( marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, include_output=include_output, **filters ) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, id): """Delete the specified action_execution. :param id: UUID of action execution to delete """ acl.enforce('action_executions:delete', context.ctx()) LOG.debug("Delete action_execution [id=%s]", id) if not cfg.CONF.api.allow_action_execution_deletion: raise exc.NotAllowedException("Action execution deletion is not " "allowed.") with db_api.transaction(): action_ex = db_api.get_action_execution(id) if action_ex.task_execution_id: raise exc.NotAllowedException( "Only ad-hoc action execution can be deleted." ) if not states.is_completed(action_ex.state): raise exc.NotAllowedException( "Only completed action execution can be deleted." ) return db_api.delete_action_execution(id) class TasksActionExecutionController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.ActionExecutions, types.uuid, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, types.uniquelist, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, wtypes.text, bool, types.jsontype, types.jsontype, types.jsontype, wtypes.text, bool) def get_all(self, task_execution_id, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', created_at=None, name=None, tags=None, updated_at=None, workflow_name=None, task_name=None, state=None, state_info=None, accepted=None, input=None, output=None, params=None, description=None, include_output=None): """Return all tasks within the execution. Where project_id is the same as the requester or project_id is different but the scope is public. :param task_execution_id: Keep only resources within a specific task execution. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param workflow_name: Optional. Keep only resources with a specific workflow name. :param task_name: Optional. Keep only resources with a specific task name. :param state: Optional. Keep only resources with a specific state. :param state_info: Optional. Keep only resources with specific state information. :param accepted: Optional. Keep only resources which have been accepted or not. :param input: Optional. Keep only resources with a specific input. :param output: Optional. Keep only resources with a specific output. :param params: Optional. Keep only resources with specific parameters. :param description: Optional. Keep only resources with a specific description. :param tags: Optional. Keep only resources containing specific tags. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. :param include_output: Optional. Include the output for all executions in the list """ acl.enforce('action_executions:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, name=name, tags=tags, updated_at=updated_at, workflow_name=workflow_name, task_name=task_name, task_execution_id=task_execution_id, state=state, state_info=state_info, accepted=accepted, input=input, output=output, params=params, description=description ) LOG.debug( "Fetch action_executions. marker=%s, limit=%s, " "sort_keys=%s, sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs, filters ) return _get_action_executions( marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, include_output=include_output, **filters ) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.ActionExecution, wtypes.text, wtypes.text) def get(self, task_execution_id, action_ex_id): """Return the specified action_execution. :param task_execution_id: Task execution UUID :param action_ex_id: Action execution UUID """ acl.enforce('action_executions:get', context.ctx()) LOG.debug("Fetch action_execution [id=%s]", action_ex_id) return _get_action_execution(action_ex_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/cron_trigger.py0000644000175000017500000002012700000000000024274 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api from mistral.services import triggers from mistral.utils import filter_utils from mistral.utils import rest_utils LOG = logging.getLogger(__name__) class CronTriggersController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.CronTrigger, wtypes.text) def get(self, identifier): """Returns the named cron_trigger. :param identifier: Id or name of cron trigger to retrieve """ acl.enforce('cron_triggers:get', context.ctx()) LOG.debug('Fetch cron trigger [identifier=%s]', identifier) # Use retries to prevent possible failures. db_model = rest_utils.rest_retry_on_db_error( db_api.get_cron_trigger )(identifier) return resources.CronTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( resources.CronTrigger, body=resources.CronTrigger, status_code=201 ) def post(self, cron_trigger): """Creates a new cron trigger. :param cron_trigger: Required. Cron trigger structure. """ acl.enforce('cron_triggers:create', context.ctx()) LOG.debug('Create cron trigger: %s', cron_trigger) values = cron_trigger.to_dict() db_model = rest_utils.rest_retry_on_db_error( triggers.create_cron_trigger )( name=values['name'], workflow_name=values.get('workflow_name'), workflow_input=values.get('workflow_input'), workflow_params=values.get('workflow_params'), pattern=values.get('pattern'), first_time=values.get('first_execution_time'), count=values.get('remaining_executions'), workflow_id=values.get('workflow_id') ) return resources.CronTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, identifier): """Delete cron trigger. :param identifier: Id or name of cron trigger to delete """ acl.enforce('cron_triggers:delete', context.ctx()) LOG.debug("Delete cron trigger [identifier=%s]", identifier) rest_utils.rest_retry_on_db_error( triggers.delete_cron_trigger )(identifier) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.CronTriggers, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, types.uuid, types.jsontype, types.jsontype, resources.SCOPE_TYPES, wtypes.text, wtypes.IntegerType(minimum=1), wtypes.text, wtypes.text, wtypes.text, wtypes.text, types.uuid, bool) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', name=None, workflow_name=None, workflow_id=None, workflow_input=None, workflow_params=None, scope=None, pattern=None, remaining_executions=None, first_execution_time=None, next_execution_time=None, created_at=None, updated_at=None, project_id=None, all_projects=False): """Return all cron triggers. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param workflow_name: Optional. Keep only resources with a specific workflow name. :param workflow_id: Optional. Keep only resources with a specific workflow ID. :param workflow_input: Optional. Keep only resources with a specific workflow input. :param workflow_params: Optional. Keep only resources with specific workflow parameters. :param scope: Optional. Keep only resources with a specific scope. :param pattern: Optional. Keep only resources with a specific pattern. :param remaining_executions: Optional. Keep only resources with a specific number of remaining executions. :param project_id: Optional. Keep only resources with the specific project id. :param first_execution_time: Optional. Keep only resources with a specific time and date of first execution. :param next_execution_time: Optional. Keep only resources with a specific time and date of next execution. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. :param all_projects: Optional. Get resources of all projects. """ acl.enforce('cron_triggers:list', context.ctx()) if all_projects: acl.enforce('cron_triggers:list:all_projects', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, name=name, updated_at=updated_at, workflow_name=workflow_name, workflow_id=workflow_id, workflow_input=workflow_input, workflow_params=workflow_params, scope=scope, pattern=pattern, remaining_executions=remaining_executions, first_execution_time=first_execution_time, next_execution_time=next_execution_time, project_id=project_id, ) LOG.debug( "Fetch cron triggers. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, filters=%s, all_projects=%s", marker, limit, sort_keys, sort_dirs, filters, all_projects ) return rest_utils.get_all( resources.CronTriggers, resources.CronTrigger, db_api.get_cron_triggers, db_api.get_cron_trigger, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, all_projects=all_projects, **filters ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/environment.py0000644000175000017500000001637100000000000024162 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from oslo_log import log as logging from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions from mistral.utils import filter_utils from mistral.utils import rest_utils from mistral_lib.utils import cut LOG = logging.getLogger(__name__) class EnvironmentController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Environments, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, types.jsontype, resources.SCOPE_TYPES, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', name=None, description=None, variables=None, scope=None, created_at=None, updated_at=None): """Return all environments. Where project_id is the same as the requester or project_id is different but the scope is public. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param description: Optional. Keep only resources with a specific description. :param variables: Optional. Keep only resources with specific variables. :param scope: Optional. Keep only resources with a specific scope. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. """ acl.enforce('environments:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, name=name, updated_at=updated_at, description=description, variables=variables, scope=scope ) LOG.debug("Fetch environments. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs, filters) return rest_utils.get_all( resources.Environments, resources.Environment, db_api.get_environments, db_api.get_environment, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters ) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Environment, wtypes.text) def get(self, name): """Return the named environment. :param name: Name of environment to retrieve """ acl.enforce('environments:get', context.ctx()) LOG.debug("Fetch environment [name=%s]", name) # Use retries to prevent possible failures. r = rest_utils.create_db_retry_object() db_model = r.call(db_api.get_environment, name) return resources.Environment.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( resources.Environment, body=resources.Environment, status_code=201 ) def post(self, env): """Create a new environment. :param env: Required. Environment structure to create """ acl.enforce('environments:create', context.ctx()) LOG.debug("Create environment [env=%s]", cut(env)) self._validate_environment( json.loads(wsme_pecan.pecan.request.body.decode()), ['name', 'description', 'variables'] ) db_model = rest_utils.rest_retry_on_db_error( db_api.create_environment )(env.to_dict()) return resources.Environment.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Environment, body=resources.Environment) def put(self, env): """Update an environment. :param env: Required. Environment structure to update """ acl.enforce('environments:update', context.ctx()) if not env.name: raise exceptions.InputException( 'Name of the environment is not provided.' ) LOG.debug("Update environment [name=%s, env=%s]", env.name, cut(env)) definition = json.loads(wsme_pecan.pecan.request.body.decode()) definition.pop('name') self._validate_environment( definition, ['description', 'variables', 'scope'] ) db_model = rest_utils.rest_retry_on_db_error( db_api.update_environment )(env.name, env.to_dict()) return resources.Environment.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, name): """Delete the named environment. :param name: Name of environment to delete """ acl.enforce('environments:delete', context.ctx()) LOG.debug("Delete environment [name=%s]", name) rest_utils.rest_retry_on_db_error(db_api.delete_environment)(name) @staticmethod def _validate_environment(env_dict, legal_keys): if env_dict is None: return if set(env_dict) - set(legal_keys): raise exceptions.InputException( "Please, check your environment definition. Only: " "%s are allowed as definition keys." % legal_keys ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/event_trigger.py0000644000175000017500000001414400000000000024456 0ustar00coreycorey00000000000000# Copyright 2016 - IBM Corp. # Copyright 2016 Catalyst IT Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from pecan import rest import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral import context as auth_ctx from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import triggers from mistral.utils import rest_utils LOG = logging.getLogger(__name__) UPDATE_NOT_ALLOWED = ['exchange', 'topic', 'event'] CREATE_MANDATORY = set(['exchange', 'topic', 'event', 'workflow_id']) class EventTriggersController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.EventTrigger, types.uuid) def get(self, id): """Returns the specified event_trigger.""" acl.enforce('event_triggers:get', auth_ctx.ctx()) LOG.debug('Fetch event trigger [id=%s]', id) # Use retries to prevent possible failures. r = rest_utils.create_db_retry_object() db_model = r.call(db_api.get_event_trigger, id) return resources.EventTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.EventTrigger, body=resources.EventTrigger, status_code=201) def post(self, event_trigger): """Creates a new event trigger.""" acl.enforce('event_triggers:create', auth_ctx.ctx()) values = event_trigger.to_dict() input_keys = [k for k in values if values[k]] if CREATE_MANDATORY - set(input_keys): raise exc.EventTriggerException( "Params %s must be provided for creating event trigger." % CREATE_MANDATORY ) if values.get('scope') == 'public': acl.enforce('event_triggers:create:public', auth_ctx.ctx()) LOG.debug('Create event trigger: %s', values) db_model = rest_utils.rest_retry_on_db_error( triggers.create_event_trigger )( name=values.get('name', ''), exchange=values.get('exchange'), topic=values.get('topic'), event=values.get('event'), workflow_id=values.get('workflow_id'), scope=values.get('scope'), workflow_input=values.get('workflow_input'), workflow_params=values.get('workflow_params'), ) return resources.EventTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.EventTrigger, types.uuid, body=resources.EventTrigger) def put(self, id, event_trigger): """Updates an existing event trigger. The exchange, topic and event can not be updated. The right way to change them is to delete the event trigger first, then create a new event trigger with new params. """ acl.enforce('event_triggers:update', auth_ctx.ctx()) values = event_trigger.to_dict() for field in UPDATE_NOT_ALLOWED: if values.get(field): raise exc.EventTriggerException( "Can not update fields %s of event trigger." % UPDATE_NOT_ALLOWED ) LOG.debug('Update event trigger: [id=%s, values=%s]', id, values) @rest_utils.rest_retry_on_db_error def _update_event_trigger(): with db_api.transaction(): # ensure that event trigger exists db_api.get_event_trigger(id) return triggers.update_event_trigger(id, values) db_model = _update_event_trigger() return resources.EventTrigger.from_db_model(db_model) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, types.uuid, status_code=204) def delete(self, id): """Delete event trigger.""" acl.enforce('event_triggers:delete', auth_ctx.ctx()) LOG.debug("Delete event trigger [id=%s]", id) @rest_utils.rest_retry_on_db_error def _delete_event_trigger(): with db_api.transaction(): event_trigger = db_api.get_event_trigger(id) triggers.delete_event_trigger(event_trigger.to_dict()) _delete_event_trigger() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.EventTriggers, types.uuid, int, types.uniquelist, types.list, types.uniquelist, bool, types.jsontype) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', all_projects=False, **filters): """Return all event triggers.""" acl.enforce('event_triggers:list', auth_ctx.ctx()) if all_projects: acl.enforce('event_triggers:list:all_projects', auth_ctx.ctx()) LOG.debug( "Fetch event triggers. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, fields=%s, all_projects=%s, filters=%s", marker, limit, sort_keys, sort_dirs, fields, all_projects, filters ) return rest_utils.get_all( resources.EventTriggers, resources.EventTrigger, db_api.get_event_triggers, db_api.get_event_trigger, resource_function=None, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, all_projects=all_projects, **filters ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/execution.py0000644000175000017500000004143500000000000023620 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2015 Huawei Technologies Co., Ltd. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # Copyright 2019 - NetCracker Technology Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_utils import uuidutils from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import execution_report from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import sub_execution from mistral.api.controllers.v2 import task from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models as db_models from mistral import exceptions as exc from mistral.rpc import clients as rpc from mistral.services import workflows as wf_service from mistral.utils import filter_utils from mistral.utils import rest_utils from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib.utils import merge_dicts LOG = logging.getLogger(__name__) STATE_TYPES = wtypes.Enum( str, states.IDLE, states.RUNNING, states.SUCCESS, states.ERROR, states.PAUSED, states.CANCELLED ) def _get_workflow_execution_resource_with_output(wf_ex): rest_utils.load_deferred_fields(wf_ex, ['params', 'input', 'output']) return resources.Execution.from_db_model(wf_ex) def _get_workflow_execution_resource(wf_ex): rest_utils.load_deferred_fields(wf_ex, ['params', 'input']) return resources.Execution.from_db_model(wf_ex) # Use retries to prevent possible failures. @rest_utils.rest_retry_on_db_error def _get_workflow_execution(id, must_exist=True): with db_api.transaction(): if must_exist: wf_ex = db_api.get_workflow_execution(id) else: wf_ex = db_api.load_workflow_execution(id) return rest_utils.load_deferred_fields( wf_ex, ['params', 'input', 'output', 'context', 'spec'] ) # TODO(rakhmerov): Make sure to make all needed renaming on public API. class ExecutionsController(rest.RestController): tasks = task.ExecutionTasksController() report = execution_report.ExecutionReportController() executions = sub_execution.SubExecutionsController() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Execution, wtypes.text) def get(self, id): """Return the specified Execution. :param id: UUID of execution to retrieve. """ acl.enforce("executions:get", context.ctx()) LOG.debug("Fetch execution [id=%s]", id) wf_ex = _get_workflow_execution(id) resource = resources.Execution.from_db_model(wf_ex) resource.published_global = ( data_flow.get_workflow_execution_published_global(wf_ex) ) return resource @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( resources.Execution, wtypes.text, body=resources.Execution ) def put(self, id, wf_ex): """Update the specified workflow execution. :param id: UUID of execution to update. :param wf_ex: Execution object. """ acl.enforce('executions:update', context.ctx()) LOG.debug('Update execution [id=%s, execution=%s]', id, wf_ex) @rest_utils.rest_retry_on_db_error def _compute_delta(wf_ex): with db_api.transaction(): # ensure that workflow execution exists db_api.get_workflow_execution( id, fields=(db_models.WorkflowExecution.id,) ) delta = {} if wf_ex.state: delta['state'] = wf_ex.state if wf_ex.description: delta['description'] = wf_ex.description if wf_ex.params and wf_ex.params.get('env'): delta['env'] = wf_ex.params.get('env') # Currently we can change only state, description, or env. if len(delta.values()) <= 0: raise exc.InputException( 'The property state, description, or env ' 'is not provided for update.' ) # Description cannot be updated together with state. if delta.get('description') and delta.get('state'): raise exc.InputException( 'The property description must be updated ' 'separately from state.' ) # If state change, environment cannot be updated # if not RUNNING. if (delta.get('env') and delta.get('state') and delta['state'] != states.RUNNING): raise exc.InputException( 'The property env can only be updated when workflow ' 'execution is not running or on resume from pause.' ) if delta.get('description'): wf_ex = db_api.update_workflow_execution( id, {'description': delta['description']} ) if not delta.get('state') and delta.get('env'): wf_ex = db_api.get_workflow_execution(id) wf_ex = wf_service.update_workflow_execution_env( wf_ex, delta.get('env') ) return delta, wf_ex delta, wf_ex = _compute_delta(wf_ex) if delta.get('state'): if states.is_paused(delta.get('state')): wf_ex = rpc.get_engine_client().pause_workflow(id) elif delta.get('state') == states.RUNNING: wf_ex = rpc.get_engine_client().resume_workflow( id, env=delta.get('env') ) elif states.is_completed(delta.get('state')): msg = wf_ex.state_info if wf_ex.state_info else None wf_ex = rpc.get_engine_client().stop_workflow( id, delta.get('state'), msg ) else: # To prevent changing state in other cases throw a message. raise exc.InputException( "Cannot change state to %s. Allowed states are: '%s" % ( wf_ex.state, ', '.join([ states.RUNNING, states.PAUSED, states.SUCCESS, states.ERROR, states.CANCELLED ]) ) ) return resources.Execution.from_dict( wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict() ) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose( resources.Execution, body=resources.Execution, status_code=201 ) def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ acl.enforce('executions:create', context.ctx()) LOG.debug("Create execution [execution=%s]", wf_ex) exec_dict = wf_ex.to_dict() exec_id = exec_dict.get('id') if not exec_id: exec_id = uuidutils.generate_uuid() LOG.debug("Generated execution id [exec_id=%s]", exec_id) exec_dict.update({'id': exec_id}) wf_ex = None else: # If ID is present we need to check if such execution exists. # If yes, the method just returns the object. If not, the ID # will be used to create a new execution. wf_ex = _get_workflow_execution(exec_id, must_exist=False) if wf_ex: return resources.Execution.from_db_model(wf_ex) source_execution_id = exec_dict.get('source_execution_id') source_exec_dict = None if source_execution_id: # If source execution is present we will perform a lookup for # previous workflow execution model and the information to start # a new workflow based on that information. source_exec_dict = db_api.get_workflow_execution( source_execution_id).to_dict() exec_dict['description'] = "{} Based on the execution '{}'".format( exec_dict['description'], source_execution_id ) exec_dict['description'] = exec_dict['description'].strip() result_exec_dict = merge_dicts(source_exec_dict, exec_dict) if not (result_exec_dict.get('workflow_id') or result_exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended." ) engine = rpc.get_engine_client() result = engine.start_workflow( result_exec_dict.get( 'workflow_id', result_exec_dict.get('workflow_name') ), result_exec_dict.get('workflow_namespace', ''), result_exec_dict.get('id'), result_exec_dict.get('input'), description=result_exec_dict.get('description', ''), **result_exec_dict.get('params') or {} ) return resources.Execution.from_dict(result) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, bool, status_code=204) def delete(self, id, force=False): """Delete the specified Execution. :param id: UUID of execution to delete. :param force: Optional. Force the deletion of unfinished executions. Default: false. While the api is backward compatible the behaviour is not the same. The new default is the safer option """ acl.enforce('executions:delete', context.ctx()) LOG.debug("Delete execution [id=%s]", id) if not force: state = db_api.get_workflow_execution( id, fields=(db_models.WorkflowExecution.state,) )[0] if not states.is_completed(state): raise exc.NotAllowedException( "Only completed executions can be deleted. " "Use --force to override this. " "Execution {} is in {} state".format(id, state) ) return rest_utils.rest_retry_on_db_error( db_api.delete_workflow_execution )(id) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Executions, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, types.uuid, wtypes.text, types.uniquelist, types.jsontype, types.uuid, types.uuid, STATE_TYPES, wtypes.text, types.jsontype, types.jsontype, wtypes.text, wtypes.text, bool, types.uuid, bool, types.list) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', workflow_name=None, workflow_id=None, description=None, tags=None, params=None, task_execution_id=None, root_execution_id=None, state=None, state_info=None, input=None, output=None, created_at=None, updated_at=None, include_output=None, project_id=None, all_projects=False, nulls=''): """Return all Executions. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param workflow_name: Optional. Keep only resources with a specific workflow name. :param workflow_id: Optional. Keep only resources with a specific workflow ID. :param description: Optional. Keep only resources with a specific description. :param tags: Optional. Keep only resources containing specific tags. :param params: Optional. Keep only resources with specific parameters. :param task_execution_id: Optional. Keep only resources with a specific task execution ID. :param root_execution_id: Optional. Keep only resources with a specific root execution ID. :param state: Optional. Keep only resources with a specific state. :param state_info: Optional. Keep only resources with specific state information. :param input: Optional. Keep only resources with a specific input. :param output: Optional. Keep only resources with a specific output. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. :param include_output: Optional. Include the output for all executions in the list. :param project_id: Optional. Only get exectuions belong to the project. Admin required. :param all_projects: Optional. Get resources of all projects. Admin required. :param nulls: Optional. The names of the columns with null value in the query. """ acl.enforce('executions:list', context.ctx()) db_models.WorkflowExecution.check_allowed_none_values(nulls) if all_projects or project_id: acl.enforce('executions:list:all_projects', context.ctx()) filters = filter_utils.create_filters_from_request_params( none_values=nulls, created_at=created_at, workflow_name=workflow_name, workflow_id=workflow_id, tags=tags, params=params, task_execution_id=task_execution_id, state=state, state_info=state_info, input=input, output=output, updated_at=updated_at, description=description, project_id=project_id, root_execution_id=root_execution_id, ) LOG.debug( "Fetch executions. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, filters=%s, all_projects=%s", marker, limit, sort_keys, sort_dirs, filters, all_projects ) if include_output: resource_function = _get_workflow_execution_resource_with_output else: resource_function = _get_workflow_execution_resource return rest_utils.get_all( resources.Executions, resources.Execution, db_api.get_workflow_executions, db_api.get_workflow_execution, resource_function=resource_function, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, all_projects=all_projects, **filters ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/execution_report.py0000644000175000017500000001270100000000000025205 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from pecan import rest import wsmeext.pecan as wsme_pecan from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models as db_models from mistral.utils import rest_utils from mistral.workflow import states LOG = logging.getLogger(__name__) def create_workflow_execution_entry(wf_ex): return resources.WorkflowExecutionReportEntry.from_db_model(wf_ex) def create_task_execution_entry(task_ex): return resources.TaskExecutionReportEntry.from_db_model(task_ex) def create_action_execution_entry(action_ex): return resources.ActionExecutionReportEntry.from_db_model(action_ex) def update_statistics_with_task(stat, task_ex): if task_ex.state == states.RUNNING: stat.increment_running() elif task_ex.state == states.SUCCESS: stat.increment_success() elif task_ex.state == states.ERROR: stat.increment_error() elif task_ex.state == states.IDLE: stat.increment_idle() elif task_ex.state == states.PAUSED: stat.increment_paused() def analyse_task_execution(task_ex_id, stat, filters, cur_depth): with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex_id) if filters['errors_only'] and task_ex.state != states.ERROR: return None update_statistics_with_task(stat, task_ex) entry = create_task_execution_entry(task_ex) child_executions = task_ex.executions if 'retry_task_policy' in task_ex.runtime_context: retry_ctx = task_ex.runtime_context['retry_task_policy'] entry.retry_count = retry_ctx['retry_no'] entry.action_executions = [] entry.workflow_executions = [] for c_ex in child_executions: if isinstance(c_ex, db_models.ActionExecution): entry.action_executions.append( create_action_execution_entry(c_ex) ) else: entry.workflow_executions.append( analyse_workflow_execution(c_ex.id, stat, filters, cur_depth) ) return entry def analyse_workflow_execution(wf_ex_id, stat, filters, cur_depth): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) entry = create_workflow_execution_entry(wf_ex) max_depth = filters['max_depth'] # Don't get deeper into the workflow task executions if # maximum depth is defined and the current depth exceeds it. if 0 <= max_depth < cur_depth: return entry task_execs = wf_ex.task_executions entry.task_executions = [] for t_ex in task_execs: task_exec_entry = analyse_task_execution( t_ex.id, stat, filters, cur_depth + 1 ) if task_exec_entry: entry.task_executions.append(task_exec_entry) return entry def build_report(wf_ex_id, filters): report = resources.ExecutionReport() stat = resources.ExecutionReportStatistics() report.statistics = stat report.root_workflow_execution = analyse_workflow_execution( wf_ex_id, stat, filters, 0 ) return report class ExecutionReportController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.ExecutionReport, types.uuid, bool, int) def get(self, workflow_execution_id, errors_only=False, max_depth=-1): """Return workflow execution report. :param workflow_execution_id: The ID of the workflow execution to generate a report for. :param errors_only: Optional. If True, only error paths of the execution tree are included into the report. The root execution (with the specified id) is always included, but its tasks may or may not be included depending on this flag's value. :param max_depth: Optional. Limits the depth of recursion while obtaining the execution tree. That is, subworkflows of what maximum depth will be included into the report. If a value of the flag is a negative number then no limit is set. The root execution has depth 0 so if the flag is 0 then only the root execution, its tasks and their actions will be included. If some of the tasks in turn run workflows then these subworkflows will be also included but without their tasks. The algorithm will fully analyse their tasks only if max_depth is greater than zero. """ LOG.info( "Fetch execution report [workflow_execution_id=%s]", workflow_execution_id ) filters = { 'errors_only': errors_only, 'max_depth': max_depth } return build_report(workflow_execution_id, filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/member.py0000644000175000017500000001402400000000000023056 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_config import cfg from oslo_log import log as logging from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.utils import rest_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF def auth_enable_check(func): @functools.wraps(func) def wrapped(*args, **kwargs): if not CONF.pecan.auth_enable: msg = ("Resource sharing feature can only be supported with " "authentication enabled.") raise exc.WorkflowException(msg) return func(*args, **kwargs) return wrapped class MembersController(rest.RestController): def __init__(self, type, resource_id): self.type = type self.resource_id = resource_id super(MembersController, self).__init__() @rest_utils.wrap_pecan_controller_exception @auth_enable_check @wsme_pecan.wsexpose(resources.Member, wtypes.text) def get(self, member_id): """Shows resource member details.""" acl.enforce('members:get', context.ctx()) LOG.debug( "Fetch resource member [resource_id=%s, resource_type=%s, " "member_id=%s].", self.resource_id, self.type, member_id ) # Use retries to prevent possible failures. r = rest_utils.create_db_retry_object() member_db = r.call( db_api.get_resource_member, self.resource_id, self.type, member_id ) return resources.Member.from_db_model(member_db) @rest_utils.wrap_pecan_controller_exception @auth_enable_check @wsme_pecan.wsexpose(resources.Members) def get_all(self): """Return all members with whom the resource has been shared.""" acl.enforce('members:list', context.ctx()) LOG.debug( "Fetch resource members [resource_id=%s, resource_type=%s].", self.resource_id, self.type ) db_members = db_api.get_resource_members( self.resource_id, self.type ) members = [ resources.Member.from_db_model(db_member) for db_member in db_members ] return resources.Members(members=members) @rest_utils.wrap_pecan_controller_exception @auth_enable_check @wsme_pecan.wsexpose( resources.Member, body=resources.Member, status_code=201 ) def post(self, member_info): """Shares the resource to a new member.""" acl.enforce('members:create', context.ctx()) LOG.debug( "Share resource to a member. [resource_id=%s, " "resource_type=%s, member_info=%s].", self.resource_id, self.type, member_info ) if not member_info.member_id: raise exc.WorkflowException("Member id must be provided.") @rest_utils.rest_retry_on_db_error def _create_resource_member(): with db_api.transaction(): wf_db = db_api.get_workflow_definition(self.resource_id) if wf_db.scope != 'private': raise exc.WorkflowException( "Only private resource could be shared." ) resource_member = { 'resource_id': self.resource_id, 'resource_type': self.type, 'member_id': member_info.member_id, 'status': 'pending' } return db_api.create_resource_member(resource_member) db_member = _create_resource_member() return resources.Member.from_db_model(db_member) @rest_utils.wrap_pecan_controller_exception @auth_enable_check @wsme_pecan.wsexpose(resources.Member, wtypes.text, body=resources.Member) def put(self, member_id, member_info): """Sets the status for a resource member.""" acl.enforce('members:update', context.ctx()) LOG.debug( "Update resource member status. [resource_id=%s, " "member_id=%s, member_info=%s].", self.resource_id, member_id, member_info ) if not member_info.status: msg = "Status must be provided." raise exc.WorkflowException(msg) db_member = rest_utils.rest_retry_on_db_error( db_api.update_resource_member )( self.resource_id, self.type, member_id, {'status': member_info.status} ) return resources.Member.from_db_model(db_member) @rest_utils.wrap_pecan_controller_exception @auth_enable_check @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, member_id): """Deletes a member from the member list of a resource.""" acl.enforce('members:delete', context.ctx()) LOG.debug( "Delete resource member. [resource_id=%s, " "resource_type=%s, member_id=%s].", self.resource_id, self.type, member_id ) rest_utils.rest_retry_on_db_error(db_api.delete_resource_member)( self.resource_id, self.type, member_id ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/resources.py0000644000175000017500000006524500000000000023634 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2018 - Extreme Networks, Inc. # Copyright 2019 - NetCracker Technology Corp. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types as wtypes from mistral.api.controllers import resource from mistral.api.controllers.v2 import types from mistral import exceptions as exc from mistral.workflow import states from mistral_lib import utils SCOPE_TYPES = wtypes.Enum(str, 'private', 'public') class ScopedResource(object): """Utilities for scoped resources""" @classmethod def validate_scope(cls, scope): if scope not in SCOPE_TYPES.values: raise exc.InvalidModelException( "Scope must be one of the following: %s; actual: " "%s" % (SCOPE_TYPES.values, scope) ) class Workbook(resource.Resource, ScopedResource): """Workbook resource.""" id = wtypes.text name = wtypes.text namespace = wtypes.text definition = wtypes.text "workbook definition in Mistral v2 DSL" tags = [wtypes.text] scope = SCOPE_TYPES "'private' or 'public'" project_id = wsme.wsattr(wtypes.text, readonly=True) created_at = wtypes.text updated_at = wtypes.text @classmethod def sample(cls): return cls(id='123e4567-e89b-12d3-a456-426655440000', name='book', definition='HERE GOES' 'WORKBOOK DEFINITION IN MISTRAL DSL v2', tags=['large', 'expensive'], scope='private', project_id='a7eb669e9819420ea4bd1453e672c0a7', created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000', namespace='') class Workbooks(resource.ResourceList): """A collection of Workbooks.""" workbooks = [Workbook] def __init__(self, **kwargs): self._type = 'workbooks' super(Workbooks, self).__init__(**kwargs) @classmethod def sample(cls): return cls(workbooks=[Workbook.sample()]) class Workflow(resource.Resource, ScopedResource): """Workflow resource.""" id = wtypes.text name = wtypes.text namespace = wtypes.text input = wtypes.text interface = types.jsontype "input and output of the workflow" definition = wtypes.text "workflow text written in Mistral v2 language" tags = [wtypes.text] scope = SCOPE_TYPES "'private' or 'public'" project_id = wtypes.text created_at = wtypes.text updated_at = wtypes.text @classmethod def sample(cls): return cls(id='123e4567-e89b-12d3-a456-426655440000', name='flow', input='param1, param2', definition='HERE GOES' 'WORKFLOW DEFINITION IN MISTRAL DSL v2', tags=['large', 'expensive'], scope='private', project_id='a7eb669e9819420ea4bd1453e672c0a7', created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000', namespace='', interface={"input": ["param1", {"param2": 2}], "output": []} ) def _set_attributes_from_spec(self, wf_spec): # Sets input and interface fields for the Workflow resource. self._set_input(wf_spec) self._set_interface(wf_spec) def _set_input(self, wf_spec): input_list = [] if wf_spec: input = wf_spec.get('input', []) for param in input: if isinstance(param, dict): for k, v in param.items(): input_list.append('%s="%s"' % (k, v)) else: input_list.append(param) self.input = ", ".join(input_list) if input_list else '' def _set_interface(self, wf_spec): self.interface = {} if wf_spec: self.interface['input'] = wf_spec.get('input', []) self.interface['output'] = [output for output in wf_spec.get('output', {})] @classmethod def from_dict(cls, d): obj = super(Workflow, cls).from_dict(d) obj._set_attributes_from_spec(d.get('spec')) return obj @classmethod def from_db_model(cls, db_model): obj = super(Workflow, cls).from_db_model(db_model) obj._set_attributes_from_spec(db_model.get('spec')) return obj @classmethod def from_tuples(cls, tuple_iterator): obj = cls() spec = None for col_name, col_val in tuple_iterator: if hasattr(obj, col_name): # Convert all datetime values to strings. setattr(obj, col_name, utils.datetime_to_str(col_val)) if col_name == 'spec': spec = col_val if spec: obj._set_attributes_from_spec(spec) return obj class Workflows(resource.ResourceList): """A collection of workflows.""" workflows = [Workflow] def __init__(self, **kwargs): self._type = 'workflows' super(Workflows, self).__init__(**kwargs) @classmethod def sample(cls): workflows_sample = cls() workflows_sample.workflows = [Workflow.sample()] workflows_sample.next = ("http://localhost:8989/v2/workflows?" "sort_keys=id,name&" "sort_dirs=asc,desc&limit=10&" "marker=123e4567-e89b-12d3-a456-426655440000") return workflows_sample class Action(resource.Resource, ScopedResource): """Action resource. NOTE: *name* is immutable. Note that name and description get inferred from action definition when Mistral service receives a POST request. So they can't be changed in another way. """ id = wtypes.text name = wtypes.text is_system = bool input = wtypes.text description = wtypes.text tags = [wtypes.text] definition = wtypes.text scope = SCOPE_TYPES project_id = wsme.wsattr(wtypes.text, readonly=True) created_at = wtypes.text updated_at = wtypes.text namespace = wtypes.text @classmethod def sample(cls): return cls( id='123e4567-e89b-12d3-a456-426655440000', name='flow', definition='HERE GOES ACTION DEFINITION IN MISTRAL DSL v2', tags=['large', 'expensive'], scope='private', project_id='a7eb669e9819420ea4bd1453e672c0a7', created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000', namespace='' ) class Actions(resource.ResourceList): """A collection of Actions.""" actions = [Action] def __init__(self, **kwargs): self._type = 'actions' super(Actions, self).__init__(**kwargs) @classmethod def sample(cls): sample = cls() sample.actions = [Action.sample()] sample.next = ( "http://localhost:8989/v2/actions?sort_keys=id,name&" "sort_dirs=asc,desc&limit=10&" "marker=123e4567-e89b-12d3-a456-426655440000" ) return sample class Execution(resource.Resource): """Execution resource.""" id = wtypes.text "execution ID. It is immutable and auto assigned or determined by the API " "client on execution creation. " "If it's passed to POST method from a client it'll be assigned to the " "newly created execution object, but only if an execution with such ID " "doesn't exist. If it exists, then the endpoint will just return " "execution properties in JSON." workflow_id = wtypes.text "workflow ID" workflow_name = wtypes.text "workflow name" workflow_namespace = wtypes.text """Workflow namespace. The workflow namespace is also saved under params and passed to all sub-workflow executions. When looking for the next sub-workflow to run, The correct workflow will be found by name and namespace, where the namespace can be the workflow namespace or the default namespace. Workflows in the same namespace as the top workflow will be given a higher priority.""" description = wtypes.text "description of workflow execution" tags = [wtypes.text] "tags of workflow execution" params = types.jsontype """'params' define workflow type specific parameters. Specific parameters are: 'task_name' - the name of the target task. Only for reverse workflows. 'env' - A string value containing the name of the stored environment object or a dictionary with the environment variables used during workflow execution and accessible as 'env()' from within expressions (YAQL or Jinja) defined in the workflow text. 'evaluate_env' - If present, controls whether or not Mistral should recursively find and evaluate all expressions (YAQL or Jinja) within the specified environment (via 'env' parameter). 'True' - evaluate all expressions recursively in the environment structure. 'False' - don't evaluate expressions. 'True' by default. """ task_execution_id = wtypes.text "reference to the parent task execution" root_execution_id = wtypes.text "reference to the root execution" source_execution_id = wtypes.text """reference to a workflow execution id which will signal the api to perform a lookup of a current workflow_execution and create a replica based on that workflow inputs and parameters""" state = wtypes.text "state can be one of: IDLE, RUNNING, SUCCESS, ERROR, PAUSED" state_info = wtypes.text "an optional state information string" input = types.jsontype "input is a JSON structure containing workflow input values" output = types.jsontype "output is a workflow output" created_at = wtypes.text updated_at = wtypes.text project_id = wsme.wsattr(wtypes.text, readonly=True) published_global = types.jsontype @classmethod def sample(cls): return cls( id='123e4567-e89b-12d3-a456-426655440000', workflow_name='flow', workflow_namespace='some_namespace', workflow_id='123e4567-e89b-12d3-a456-426655441111', description='this is the first execution.', tags=['simple', 'amazing'], project_id='40a908dbddfe48ad80a87fb30fa70a03', state='SUCCESS', input={}, output={}, published_global={'key': 'value'}, params={ 'env': {'k1': 'abc', 'k2': 123}, 'notify': [ { 'type': 'webhook', 'url': 'http://endpoint/of/webhook', 'headers': { 'Content-Type': 'application/json', 'X-Auth-Token': '123456789' } }, { 'type': 'queue', 'topic': 'failover_queue', 'backend': 'rabbitmq', 'host': '127.0.0.1', 'port': 5432 } ] }, created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000' ) class Executions(resource.ResourceList): """A collection of Execution resources.""" executions = [Execution] def __init__(self, **kwargs): self._type = 'executions' super(Executions, self).__init__(**kwargs) @classmethod def sample(cls): sample = cls() sample.executions = [Execution.sample()] sample.next = ( "http://localhost:8989/v2/executions?" "sort_keys=id,workflow_name&sort_dirs=asc,desc&limit=10&" "marker=123e4567-e89b-12d3-a456-426655440000" ) return sample class Task(resource.Resource): """Task resource.""" id = wtypes.text name = wtypes.text type = wtypes.text workflow_name = wtypes.text workflow_namespace = wtypes.text workflow_id = wtypes.text workflow_execution_id = wtypes.text tags = [wtypes.text] state = wtypes.text """state can take one of the following values: IDLE, RUNNING, SUCCESS, ERROR, DELAYED""" state_info = wtypes.text "an optional state information string" project_id = wsme.wsattr(wtypes.text, readonly=True) runtime_context = types.jsontype result = wtypes.text published = types.jsontype published_global = types.jsontype processed = bool created_at = wtypes.text updated_at = wtypes.text started_at = wtypes.text finished_at = wtypes.text # Add this param to make Mistral API work with WSME 0.8.0 or higher version reset = wsme.wsattr(bool, mandatory=True) env = types.jsontype @classmethod def sample(cls): return cls( id='123e4567-e89b-12d3-a456-426655440000', workflow_name='flow', workflow_id='123e4567-e89b-12d3-a456-426655441111', workflow_execution_id='123e4567-e89b-12d3-a456-426655440000', tags=['long', 'security'], name='task', state=states.SUCCESS, project_id='40a908dbddfe48ad80a87fb30fa70a03', runtime_context={ 'triggered_by': [ { 'task_id': '123-123-123', 'event': 'on-success' } ] }, result='task result', published={'key': 'value'}, published_global={'key': 'value'}, processed=True, created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000', reset=True ) class Tasks(resource.ResourceList): """A collection of tasks.""" tasks = [Task] def __init__(self, **kwargs): self._type = 'tasks' super(Tasks, self).__init__(**kwargs) @classmethod def sample(cls): return cls(tasks=[Task.sample()]) class ActionExecution(resource.Resource): """ActionExecution resource.""" id = wtypes.text workflow_name = wtypes.text workflow_namespace = wtypes.text task_name = wtypes.text task_execution_id = wtypes.text state = wtypes.text state_info = wtypes.text tags = [wtypes.text] name = wtypes.text description = wtypes.text project_id = wsme.wsattr(wtypes.text, readonly=True) accepted = bool input = types.jsontype output = types.jsontype created_at = wtypes.text updated_at = wtypes.text params = types.jsontype # TODO(rakhmerov): What is this?? @classmethod def sample(cls): return cls( id='123e4567-e89b-12d3-a456-426655440000', workflow_name='flow', task_name='task1', workflow_execution_id='653e4127-e89b-12d3-a456-426655440076', task_execution_id='343e45623-e89b-12d3-a456-426655440090', state=states.SUCCESS, state_info=states.SUCCESS, tags=['foo', 'fee'], name='std.echo', description='My running action', project_id='40a908dbddfe48ad80a87fb30fa70a03', accepted=True, input={'first_name': 'John', 'last_name': 'Doe'}, output={'some_output': 'Hello, John Doe!'}, created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000', params={'save_result': True, "run_sync": False} ) class ActionExecutions(resource.ResourceList): """A collection of action_executions.""" action_executions = [ActionExecution] def __init__(self, **kwargs): self._type = 'action_executions' super(ActionExecutions, self).__init__(**kwargs) @classmethod def sample(cls): return cls(action_executions=[ActionExecution.sample()]) class CronTrigger(resource.Resource): """CronTrigger resource.""" id = wtypes.text name = wtypes.text workflow_name = wtypes.text workflow_id = wtypes.text workflow_input = types.jsontype workflow_params = types.jsontype project_id = wsme.wsattr(wtypes.text, readonly=True) scope = SCOPE_TYPES pattern = wtypes.text remaining_executions = wtypes.IntegerType(minimum=1) first_execution_time = wtypes.text next_execution_time = wtypes.text created_at = wtypes.text updated_at = wtypes.text @classmethod def sample(cls): return cls( id='123e4567-e89b-12d3-a456-426655440000', name='my_trigger', workflow_name='my_wf', workflow_id='123e4567-e89b-12d3-a456-426655441111', workflow_input={}, workflow_params={}, project_id='40a908dbddfe48ad80a87fb30fa70a03', scope='private', pattern='* * * * *', remaining_executions=42, created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000' ) class CronTriggers(resource.ResourceList): """A collection of cron triggers.""" cron_triggers = [CronTrigger] def __init__(self, **kwargs): self._type = 'cron_triggers' super(CronTriggers, self).__init__(**kwargs) @classmethod def sample(cls): return cls(cron_triggers=[CronTrigger.sample()]) class Environment(resource.Resource): """Environment resource.""" id = wtypes.text name = wtypes.text description = wtypes.text variables = types.jsontype scope = SCOPE_TYPES project_id = wsme.wsattr(wtypes.text, readonly=True) created_at = wtypes.text updated_at = wtypes.text @classmethod def sample(cls): return cls( id='123e4567-e89b-12d3-a456-426655440000', name='sample', description='example environment entry', variables={ 'server': 'localhost', 'database': 'temp', 'timeout': 600, 'verbose': True }, scope='private', project_id='40a908dbddfe48ad80a87fb30fa70a03', created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000' ) class Environments(resource.ResourceList): """A collection of Environment resources.""" environments = [Environment] def __init__(self, **kwargs): self._type = 'environments' super(Environments, self).__init__(**kwargs) @classmethod def sample(cls): return cls(environments=[Environment.sample()]) class Member(resource.Resource): id = types.uuid resource_id = wtypes.text resource_type = wtypes.text project_id = wtypes.text member_id = wtypes.text status = wtypes.Enum(str, 'pending', 'accepted', 'rejected') created_at = wtypes.text updated_at = wtypes.text @classmethod def sample(cls): return cls( id='123e4567-e89b-12d3-a456-426655440000', resource_id='123e4567-e89b-12d3-a456-426655440011', resource_type='workflow', project_id='40a908dbddfe48ad80a87fb30fa70a03', member_id='a7eb669e9819420ea4bd1453e672c0a7', status='accepted', created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000' ) class Members(resource.ResourceList): members = [Member] @classmethod def sample(cls): return cls(members=[Member.sample()]) class Service(resource.Resource): """Service resource.""" name = wtypes.text type = wtypes.text @classmethod def sample(cls): return cls(name='host1_1234', type='executor_group') class Services(resource.Resource): """A collection of Services.""" services = [Service] @classmethod def sample(cls): return cls(services=[Service.sample()]) class EventTrigger(resource.Resource): """EventTrigger resource.""" id = wsme.wsattr(wtypes.text, readonly=True) created_at = wsme.wsattr(wtypes.text, readonly=True) updated_at = wsme.wsattr(wtypes.text, readonly=True) project_id = wsme.wsattr(wtypes.text, readonly=True) name = wtypes.text workflow_id = types.uuid workflow_input = types.jsontype workflow_params = types.jsontype exchange = wtypes.text topic = wtypes.text event = wtypes.text scope = SCOPE_TYPES @classmethod def sample(cls): return cls(id='123e4567-e89b-12d3-a456-426655441414', created_at='1970-01-01T00:00:00.000000', updated_at='1970-01-01T00:00:00.000000', project_id='project', name='expiration_event_trigger', workflow_id='123e4567-e89b-12d3-a456-426655441414', workflow_input={}, workflow_params={}, exchange='nova', topic='notifications', event='compute.instance.create.end') class EventTriggers(resource.ResourceList): """A collection of event triggers.""" event_triggers = [EventTrigger] def __init__(self, **kwargs): self._type = 'event_triggers' super(EventTriggers, self).__init__(**kwargs) @classmethod def sample(cls): triggers_sample = cls() triggers_sample.event_triggers = [EventTrigger.sample()] triggers_sample.next = ("http://localhost:8989/v2/event_triggers?" "sort_keys=id,name&" "sort_dirs=asc,desc&limit=10&" "marker=123e4567-e89b-12d3-a456-426655440000") return triggers_sample class BaseExecutionReportEntry(resource.Resource): """Execution report entry resource.""" id = wtypes.text name = wtypes.text created_at = wtypes.text updated_at = wtypes.text state = wtypes.text state_info = wtypes.text @classmethod def sample(cls): # TODO(rakhmerov): complete return cls( id='123e4567-e89b-12d3-a456-426655441414', created_at='2019-01-30T00:00:00.000000', updated_at='2019-01-30T00:00:00.000000', state=states.SUCCESS ) class ActionExecutionReportEntry(BaseExecutionReportEntry): """Action execution report entry resource.""" accepted = bool last_heartbeat = wtypes.text @classmethod def sample(cls): sample = super(ActionExecutionReportEntry, cls).sample() sample.accepted = True sample.last_heartbeat = '2019-01-30T00:00:00.000000' return sample class WorkflowExecutionReportEntry(BaseExecutionReportEntry): """Workflow execution report entry resource.""" # NOTE(rakhmerov): task_executions has to be declared below # after we declare a class for task execution entry resource. @classmethod def sample(cls): sample = super(WorkflowExecutionReportEntry, cls).sample() # We can't define a non-empty list task executions here because # the needed class is not defined yet. Since this is just a sample # we can sacrifice it. sample.task_executions = [] return sample class TaskExecutionReportEntry(BaseExecutionReportEntry): """Task execution report entity resource.""" action_executions = [ActionExecutionReportEntry] workflow_executions = [WorkflowExecutionReportEntry] retry_count = wtypes.IntegerType(minimum=0) @classmethod def sample(cls): sample = super(TaskExecutionReportEntry, cls).sample() sample.action_executions = [ActionExecutionReportEntry.sample()] sample.workflow_executions = [] sample.retry_count = 0 return sample # We have to declare this field later because of the dynamic binding. # It can't be within WorkflowExecutionReportEntry before # TaskExecutionReportEntry is declared. WorkflowExecutionReportEntry.task_executions = [TaskExecutionReportEntry] wtypes.registry.reregister(WorkflowExecutionReportEntry) class ExecutionReportStatistics(resource.Resource): """Execution report statistics. TODO(rakhmerov): There's much more we can add here. For example, information about action, average (and also min and max) task execution run time etc. """ total_tasks_count = wtypes.IntegerType(minimum=0) running_tasks_count = wtypes.IntegerType(minimum=0) success_tasks_count = wtypes.IntegerType(minimum=0) error_tasks_count = wtypes.IntegerType(minimum=0) idle_tasks_count = wtypes.IntegerType(minimum=0) paused_tasks_count = wtypes.IntegerType(minimum=0) def __init__(self, **kw): self.total_tasks_count = 0 self.running_tasks_count = 0 self.success_tasks_count = 0 self.error_tasks_count = 0 self.idle_tasks_count = 0 self.paused_tasks_count = 0 super(ExecutionReportStatistics, self).__init__(**kw) def increment_running(self): self.running_tasks_count += 1 self.total_tasks_count += 1 def increment_success(self): self.success_tasks_count += 1 self.total_tasks_count += 1 def increment_error(self): self.error_tasks_count += 1 self.total_tasks_count += 1 def increment_idle(self): self.idle_tasks_count += 1 self.total_tasks_count += 1 def increment_paused(self): self.paused_tasks_count += 1 self.total_tasks_count += 1 @classmethod def sample(cls): return cls( total_tasks_count=10, running_tasks_count=3, success_tasks_count=5, error_tasks_count=2, idle_tasks_count=0, paused_tasks_count=0 ) class ExecutionReport(resource.Resource): """Execution report resource.""" statistics = ExecutionReportStatistics """General statistics about the workflow execution hierarchy.""" root_workflow_execution = WorkflowExecutionReportEntry """Root entry of the report associated with a workflow execution.""" @classmethod def sample(cls): sample = cls() sample.statistics = ExecutionReportStatistics.sample() sample.root_workflow_execution = WorkflowExecutionReportEntry.sample() return sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/root.py0000644000175000017500000000447400000000000022602 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api.controllers import resource from mistral.api.controllers.v2 import action from mistral.api.controllers.v2 import action_execution from mistral.api.controllers.v2 import cron_trigger from mistral.api.controllers.v2 import environment from mistral.api.controllers.v2 import event_trigger from mistral.api.controllers.v2 import execution from mistral.api.controllers.v2 import service from mistral.api.controllers.v2 import task from mistral.api.controllers.v2 import workbook from mistral.api.controllers.v2 import workflow class RootResource(resource.Resource): """Root resource for API version 2. It references all other resources belonging to the API. """ uri = wtypes.text # TODO(everyone): what else do we need here? # TODO(everyone): we need to collect all the links from API v2.0 # and provide them. class Controller(object): """API root controller for version 2.""" workbooks = workbook.WorkbooksController() actions = action.ActionsController() workflows = workflow.WorkflowsController() executions = execution.ExecutionsController() tasks = task.TasksController() cron_triggers = cron_trigger.CronTriggersController() environments = environment.EnvironmentController() action_executions = action_execution.ActionExecutionsController() services = service.ServicesController() event_triggers = event_trigger.EventTriggersController() @wsme_pecan.wsexpose(RootResource) def index(self): return RootResource(uri='%s/%s' % (pecan.request.application_url, 'v2')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/service.py0000644000175000017500000000606200000000000023252 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from pecan import rest import six import tooz.coordination import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral import context from mistral import exceptions as exc from mistral.service import coordination from mistral.utils import rest_utils LOG = logging.getLogger(__name__) class ServicesController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Services) def get_all(self): """Return all services.""" acl.enforce('services:list', context.ctx()) LOG.debug("Fetch services.") if not cfg.CONF.coordination.backend_url: raise exc.CoordinationNotSupportedException("Service API " "is not supported.") service_coordinator = coordination.get_service_coordinator() if not service_coordinator.is_active(): raise exc.CoordinationException( "Failed to connect to coordination backend." ) # Should be the same as LAUNCH_OPTIONS in launch.py # At the moment there is a duplication, need to solve it. # We cannot depend on launch.py since it uses eventlet monkey patch # under wsgi it causes problems mistral_services = {'api', 'engine', 'executor', 'event-engine', 'notifier'} services_list = [] service_group = ['%s_group' % i for i in mistral_services] try: for group in service_group: members = service_coordinator.get_members(group) members_list = [ resources.Service.from_dict( { 'type': group, 'name': member } ) for member in members ] services_list.extend(members_list) except tooz.coordination.ToozError as e: # In the scenario of network interruption or manually shutdown # connection shutdown, ToozError will be raised. raise exc.CoordinationException( "Failed to get service members from coordination backend. %s" % six.text_type(e) ) return resources.Services(services=services_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/sub_execution.py0000644000175000017500000001022000000000000024455 0ustar00coreycorey00000000000000# Copyright 2020 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from pecan import request from pecan import rest import wsmeext.pecan as wsme_pecan from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral.db.v2 import api as db_api from mistral.utils import rest_utils from mistral.workflow import states LOG = logging.getLogger(__name__) def get_task_sub_executions_list(task_ex_id, filters, cur_depth): task_sub_execs = [] with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex_id) if filters['errors_only'] and task_ex.state != states.ERROR: return [] child_wf_executions = task_ex.workflow_executions for c_ex in child_wf_executions: task_sub_execs.extend( get_execution_sub_executions_list( c_ex.id, filters, cur_depth ) ) return task_sub_execs def get_execution_sub_executions_list(wf_ex_id, filters, cur_depth): max_depth = filters['max_depth'] include_output = filters['include_output'] ex_sub_execs = [] if 0 <= max_depth < cur_depth: return [] with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) wf_resource = _get_wf_resource_from_db_model( wf_ex, include_output) ex_sub_execs.append(wf_resource) task_execs = wf_ex.task_executions for t_ex in task_execs: task_sub_executions = get_task_sub_executions_list( t_ex.id, filters, cur_depth + 1 ) ex_sub_execs.extend(task_sub_executions) return ex_sub_execs def _get_wf_resource_from_db_model(wf_ex, include_output): if include_output: rest_utils.load_deferred_fields(wf_ex, ['params', 'input', 'output']) else: rest_utils.load_deferred_fields(wf_ex, ['params', 'input']) return resources.Execution.from_db_model(wf_ex) def _get_sub_executions(origin, id, filters): if origin == 'execution': return get_execution_sub_executions_list(id, filters, cur_depth=0) else: return get_task_sub_executions_list(id, filters, cur_depth=0) class SubExecutionsController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Executions, types.uuid, bool, int, bool) def get(self, id, errors_only=False, max_depth=-1, include_output=False): """Return workflow execution report. :param id: The ID of the workflow execution or task execution to get the sub-executions of. :param errors_only: Optional. If True, only error paths of the execution tree are returned . :param max_depth: Optional. Limits the depth of recursion while obtaining the execution tree. If a value of the flag is a negative number then no limit is set. :param include_output: Optional. Include the output for all executions in the list. """ origin = 'execution' if request.path.startswith('/v2/executions') \ else 'task' LOG.info( "Fetching sub executions of %s [id=%s]", origin, id ) filters = { 'errors_only': errors_only, 'max_depth': max_depth, 'include_output': include_output } sub_executions_resource = _get_sub_executions(origin, id, filters) return resources.Executions.convert_with_links( sub_executions_resource, request.application_url, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/task.py0000644000175000017500000004627700000000000022570 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2019 - NetCracker Technology Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from oslo_log import log as logging from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import action_execution from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import sub_execution from mistral.api.controllers.v2 import types from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral import expressions as expr from mistral.lang import parser as spec_parser from mistral.rpc import clients as rpc from mistral.utils import filter_utils from mistral.utils import rest_utils from mistral.workflow import data_flow from mistral.workflow import states LOG = logging.getLogger(__name__) STATE_TYPES = wtypes.Enum( str, states.IDLE, states.RUNNING, states.SUCCESS, states.ERROR, states.RUNNING_DELAYED ) def _get_task_resource_with_result(task_ex): task = resources.Task.from_db_model(task_ex) task.result = json.dumps(data_flow.get_task_execution_result(task_ex)) return task # Use retries to prevent possible failures. @rest_utils.rest_retry_on_db_error def _get_task_execution(id): with db_api.transaction(): task_ex = db_api.get_task_execution(id) rest_utils.load_deferred_fields(task_ex, ['workflow_execution']) rest_utils.load_deferred_fields( task_ex.workflow_execution, ['context', 'input', 'params', 'root_execution'] ) rest_utils.load_deferred_fields( task_ex.workflow_execution.root_execution, ['params'] ) return _get_task_resource_with_result(task_ex), task_ex def get_published_global(task_ex, wf_ex=None): if task_ex.state not in [states.SUCCESS, states.ERROR]: return if wf_ex is None: wf_ex = task_ex.workflow_execution expr_ctx = data_flow.ContextView( data_flow.get_current_task_dict(task_ex), task_ex.in_context, data_flow.get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input ) task_spec = spec_parser.get_task_spec(task_ex.spec) publish_spec = task_spec.get_publish(task_ex.state) if not publish_spec: return global_vars = publish_spec.get_global() return expr.evaluate_recursively(global_vars, expr_ctx) def _task_with_published_global(task, task_ex): published_global_vars = get_published_global(task_ex) if published_global_vars: task.published_global = published_global_vars return task class TaskExecutionsController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Executions, types.uuid, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, types.uuid, wtypes.text, types.uniquelist, types.jsontype, STATE_TYPES, wtypes.text, types.jsontype, types.jsontype, wtypes.text, wtypes.text) def get_all(self, task_execution_id, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', workflow_name=None, workflow_id=None, description=None, tags=None, params=None, state=None, state_info=None, input=None, output=None, created_at=None, updated_at=None): """Return all executions that belong to the given task execution. :param task_execution_id: Task task execution ID. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param workflow_name: Optional. Keep only resources with a specific workflow name. :param workflow_id: Optional. Keep only resources with a specific workflow ID. :param description: Optional. Keep only resources with a specific description. :param tags: Optional. Keep only resources containing specific tags. :param params: Optional. Keep only resources with specific parameters. :param state: Optional. Keep only resources with a specific state. :param state_info: Optional. Keep only resources with specific state information. :param input: Optional. Keep only resources with a specific input. :param output: Optional. Keep only resources with a specific output. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. """ acl.enforce('executions:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( task_execution_id=task_execution_id, created_at=created_at, workflow_name=workflow_name, workflow_id=workflow_id, tags=tags, params=params, state=state, state_info=state_info, input=input, output=output, updated_at=updated_at, description=description ) LOG.debug( "Fetch executions. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, filters=%s", marker, limit, sort_keys, sort_dirs, filters ) return rest_utils.get_all( resources.Executions, resources.Execution, db_api.get_workflow_executions, db_api.get_workflow_execution, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters ) class TasksController(rest.RestController): action_executions = action_execution.TasksActionExecutionController() workflow_executions = TaskExecutionsController() executions = sub_execution.SubExecutionsController() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Task, wtypes.text) def get(self, id): """Return the specified task. :param id: UUID of task to retrieve """ acl.enforce('tasks:get', context.ctx()) LOG.debug("Fetch task [id=%s]", id) task, task_ex = _get_task_execution(id) return _task_with_published_global(task, task_ex) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Tasks, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, types.uuid, types.uuid, types.uniquelist, STATE_TYPES, wtypes.text, wtypes.text, types.jsontype, bool, wtypes.text, wtypes.text, bool, types.jsontype) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', name=None, workflow_name=None, workflow_id=None, workflow_execution_id=None, tags=None, state=None, state_info=None, result=None, published=None, processed=None, created_at=None, updated_at=None, reset=None, env=None): """Return all tasks. Where project_id is the same as the requester or project_id is different but the scope is public. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param workflow_name: Optional. Keep only resources with a specific workflow name. :param workflow_id: Optional. Keep only resources with a specific workflow ID. :param workflow_execution_id: Optional. Keep only resources with a specific workflow execution ID. :param state: Optional. Keep only resources with a specific state. :param state_info: Optional. Keep only resources with specific state information. :param result: Optional. Keep only resources with a specific result. :param published: Optional. Keep only resources with specific published content. :param processed: Optional. Keep only resources which have been processed or not. :param reset: Optional. Keep only resources which have been reset or not. :param env: Optional. Keep only resources with a specific environment. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. """ acl.enforce('tasks:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, workflow_name=workflow_name, workflow_id=workflow_id, tags=tags, state=state, state_info=state_info, updated_at=updated_at, name=name, workflow_execution_id=workflow_execution_id, result=result, published=published, processed=processed, reset=reset, env=env ) LOG.debug( "Fetch tasks. marker=%s, limit=%s, sort_keys=%s, sort_dirs=%s," " filters=%s", marker, limit, sort_keys, sort_dirs, filters ) return rest_utils.get_all( resources.Tasks, resources.Task, db_api.get_task_executions, db_api.get_task_execution, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters ) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Task, wtypes.text, body=resources.Task) def put(self, id, task): """Update the specified task execution. :param id: Task execution ID. :param task: Task execution object. """ acl.enforce('tasks:update', context.ctx()) LOG.debug("Update task execution [id=%s, task=%s]", id, task) @rest_utils.rest_retry_on_db_error def _read_task_params(id, task): with db_api.transaction(): task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution( task_ex.workflow_execution_id ) return env, reset, task_ex, task_spec, wf_ex env, reset, task_ex, task_spec, wf_ex = _read_task_params(id, task) wf_name = task.workflow_name or None if wf_name and wf_name != wf_ex.name: raise exc.WorkflowException('Workflow name does not match.') if task.state != states.RUNNING: raise exc.WorkflowException( 'Invalid task state. ' 'Only updating task to rerun is supported.' ) if task_ex.state != states.ERROR: raise exc.WorkflowException( 'The current task execution must be in ERROR for rerun.' ' Only updating task to rerun is supported.' ) if not task_spec.get_with_items() and not reset: raise exc.WorkflowException( 'Only with-items task has the option to not reset.' ) rpc.get_engine_client().rerun_workflow( task_ex.id, reset=reset, env=env ) @rest_utils.rest_retry_on_db_error def _retrieve_task(): with db_api.transaction(): task_ex = db_api.get_task_execution(id) return _get_task_resource_with_result(task_ex) return _retrieve_task() class ExecutionTasksController(rest.RestController): @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Tasks, types.uuid, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, types.uuid, types.uniquelist, STATE_TYPES, wtypes.text, wtypes.text, types.jsontype, bool, wtypes.text, wtypes.text, bool, types.jsontype) def get_all(self, workflow_execution_id, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', name=None, workflow_name=None, workflow_id=None, tags=None, state=None, state_info=None, result=None, published=None, processed=None, created_at=None, updated_at=None, reset=None, env=None): """Return all tasks within the execution. Where project_id is the same as the requester or project_id is different but the scope is public. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at, which is backward compatible. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: desc. The length of sort_dirs can be equal or less than that of sort_keys. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param workflow_name: Optional. Keep only resources with a specific workflow name. :param workflow_id: Optional. Keep only resources with a specific workflow ID. :param workflow_execution_id: Optional. Keep only resources with a specific workflow execution ID. :param tags: Optional. Keep only resources containing specific tags. :param state: Optional. Keep only resources with a specific state. :param state_info: Optional. Keep only resources with specific state information. :param result: Optional. Keep only resources with a specific result. :param published: Optional. Keep only resources with specific published content. :param processed: Optional. Keep only resources which have been processed or not. :param reset: Optional. Keep only resources which have been reset or not. :param env: Optional. Keep only resources with a specific environment. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. """ acl.enforce('tasks:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( workflow_execution_id=workflow_execution_id, created_at=created_at, workflow_name=workflow_name, workflow_id=workflow_id, tags=tags, state=state, state_info=state_info, updated_at=updated_at, name=name, result=result, published=published, processed=processed, reset=reset, env=env ) LOG.debug( "Fetch tasks. workflow_execution_id=%s, marker=%s, limit=%s, " "sort_keys=%s, sort_dirs=%s, filters=%s", workflow_execution_id, marker, limit, sort_keys, sort_dirs, filters ) return rest_utils.get_all( resources.Tasks, resources.Task, db_api.get_task_executions, db_api.get_task_execution, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/types.py0000644000175000017500000000653000000000000022756 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from mistral.utils import filter_utils from oslo_utils import uuidutils import six from wsme import types as wtypes from mistral import exceptions as exc class ListType(wtypes.UserType): """A simple list type.""" basetype = wtypes.text name = 'list' @staticmethod def validate(value): """Validate and convert the input to a ListType. :param value: A comma separated string of values :returns: A list of values. """ items = [v.strip().lower() for v in six.text_type(value).split(',')] # remove empty items. return [x for x in items if x] @staticmethod def frombasetype(value): return ListType.validate(value) if value is not None else None class UniqueListType(ListType): """A simple list type with no duplicate items.""" name = 'uniquelist' @staticmethod def validate(value): """Validate and convert the input to a UniqueListType. :param value: A comma separated string of values. :returns: A list with no duplicate items. """ items = ListType.validate(value) seen = set() return [x for x in items if not (x in seen or seen.add(x))] @staticmethod def frombasetype(value): return UniqueListType.validate(value) if value is not None else None class UuidType(wtypes.UserType): """A simple UUID type. The builtin UuidType class in wsme.types doesn't work properly with pecan. """ basetype = wtypes.text name = 'uuid' @staticmethod def validate(value): _, data = filter_utils.extract_filter_type_and_value(value) if not uuidutils.is_uuid_like(data): raise exc.InputException( "Expected a uuid but received %s." % data ) return data @staticmethod def frombasetype(value): return UuidType.validate(value) if value is not None else None class JsonType(wtypes.UserType): """A simple JSON type.""" basetype = wtypes.text name = 'json' def validate(self, value): if not value: return {} if not isinstance(value, dict): raise exc.InputException( 'JsonType field value must be a dictionary [actual=%s]' % value ) return value def frombasetype(self, value): if isinstance(value, dict): return value try: return json.loads(value) if value is not None else None except TypeError as e: raise ValueError(e) def tobasetype(self, value): # Value must be a dict. return json.dumps(value) if value is not None else None uuid = UuidType() list = ListType() uniquelist = UniqueListType() jsontype = JsonType() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/validation.py0000644000175000017500000000224000000000000023736 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan from pecan import rest from mistral import exceptions as exc class SpecValidationController(rest.RestController): def __init__(self, parser): super(SpecValidationController, self).__init__() self._parse_func = parser @pecan.expose('json') def post(self): """Validate a spec.""" definition = pecan.request.text try: self._parse_func(definition, validate=True) except exc.DSLParsingException as e: return {'valid': False, 'error': str(e)} return {'valid': True} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/workbook.py0000644000175000017500000002026700000000000023452 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging import pecan from pecan import hooks from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral.api.controllers.v2 import validation from mistral.api.hooks import content_type as ct_hook from mistral import context from mistral.db.v2 import api as db_api from mistral.lang import parser as spec_parser from mistral.services import workbooks from mistral.utils import filter_utils from mistral.utils import rest_utils LOG = logging.getLogger(__name__) class WorkbooksController(rest.RestController, hooks.HookController): __hooks__ = [ct_hook.ContentTypeHook("application/json", ['POST', 'PUT'])] validate = validation.SpecValidationController( spec_parser.get_workbook_spec_from_yaml) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Workbook, wtypes.text, wtypes.text) def get(self, name, namespace=''): """Return the named workbook. :param name: Name of workbook to retrieve. :param namespace: Optional. Namespace of workbook to retrieve. """ acl.enforce('workbooks:get', context.ctx()) LOG.debug("Fetch workbook [name=%s, namespace=%s]", name, namespace) # Use retries to prevent possible failures. r = rest_utils.create_db_retry_object() db_model = r.call(db_api.get_workbook, name, namespace=namespace) return resources.Workbook.from_db_model(db_model) @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") def put(self, namespace=''): """Update a workbook. :param namespace: Optional. Namespace of workbook to update. :param validate: Optional. If set to False, disables validation of the workflow YAML definition syntax, but only if allowed in the service configuration. By default, validation is enabled. """ acl.enforce('workbooks:update', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') # If "skip_validation" is present in the query string parameters # then workflow language validation will be disabled. skip_validation = 'skip_validation' in pecan.request.GET resources.Workbook.validate_scope(scope) LOG.debug("Update workbook [definition=%s]", definition) wb_db = rest_utils.rest_retry_on_db_error( workbooks.update_workbook_v2)( definition, namespace=namespace, scope=scope, validate=not skip_validation ) return resources.Workbook.from_db_model(wb_db).to_json() @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") def post(self, namespace=''): """Create a new workbook. :param namespace: Optional. The namespace to create the workbook in. Workbooks with the same name can be added to a given project if they are in two different namespaces. """ acl.enforce('workbooks:create', context.ctx()) definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') # If "skip_validation" is present in the query string parameters # then workflow language validation will be disabled. skip_validation = 'skip_validation' in pecan.request.GET resources.Workbook.validate_scope(scope) LOG.debug("Create workbook [definition=%s]", definition) wb_db = rest_utils.rest_retry_on_db_error( workbooks.create_workbook_v2)( definition, namespace=namespace, scope=scope, validate=not skip_validation ) pecan.response.status = 201 return resources.Workbook.from_db_model(wb_db).to_json() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204) def delete(self, name, namespace=''): """Delete the named workbook. :param name: Name of workbook to delete. :param namespace: Optional. Namespace of workbook to delete. """ acl.enforce('workbooks:delete', context.ctx()) LOG.debug("Delete workbook [name=%s, namespace=%s]", name, namespace) rest_utils.rest_retry_on_db_error(db_api.delete_workbook)( name, namespace ) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Workbooks, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, wtypes.text, resources.SCOPE_TYPES, wtypes.text, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', created_at=None, definition=None, name=None, scope=None, tags=None, updated_at=None, namespace=None): """Return a list of workbooks. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: asc. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param definition: Optional. Keep only resources with a specific definition. :param tags: Optional. Keep only resources containing specific tags. :param scope: Optional. Keep only resources with a specific scope. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. :param namespace: Optional. Keep only resources with specific namespace. """ acl.enforce('workbooks:list', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, definition=definition, name=name, scope=scope, tags=tags, updated_at=updated_at, namespace=namespace ) LOG.debug("Fetch workbooks. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, fields=%s, filters=%s", marker, limit, sort_keys, sort_dirs, fields, filters) return rest_utils.get_all( resources.Workbooks, resources.Workbook, db_api.get_workbooks, db_api.get_workbook, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **filters ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/controllers/v2/workflow.py0000644000175000017500000002703000000000000023462 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_utils import uuidutils import pecan from pecan import hooks from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from mistral.api import access_control as acl from mistral.api.controllers.v2 import member from mistral.api.controllers.v2 import resources from mistral.api.controllers.v2 import types from mistral.api.controllers.v2 import validation from mistral.api.hooks import content_type as ct_hook from mistral import context from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.services import workflows from mistral.utils import filter_utils from mistral.utils import rest_utils LOG = logging.getLogger(__name__) class WorkflowsController(rest.RestController, hooks.HookController): # TODO(nmakhotkin): Have a discussion with pecan/WSME folks in order # to have requests and response of different content types. Then # delete ContentTypeHook. __hooks__ = [ct_hook.ContentTypeHook("application/json", ['POST', 'PUT'])] validate = validation.SpecValidationController( spec_parser.get_workflow_list_spec_from_yaml) @pecan.expose() def _lookup(self, identifier, sub_resource, *remainder): LOG.debug( "Lookup subcontrollers of WorkflowsController, " "sub_resource: %s, remainder: %s.", sub_resource, remainder ) if sub_resource == 'members': if not uuidutils.is_uuid_like(identifier): raise exc.WorkflowException( "Only support UUID as resource identifier in resource " "sharing feature." ) # We don't check workflow's existence here, since a user may query # members of a workflow, which doesn't belong to him/her. return member.MembersController('workflow', identifier), remainder return super(WorkflowsController, self)._lookup( identifier, sub_resource, *remainder ) @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Workflow, wtypes.text, wtypes.text) def get(self, identifier, namespace=''): """Return the named workflow. :param identifier: Name or UUID of the workflow to retrieve. :param namespace: Optional. Namespace of the workflow to retrieve. """ acl.enforce('workflows:get', context.ctx()) LOG.debug("Fetch workflow [identifier=%s]", identifier) # Use retries to prevent possible failures. r = rest_utils.create_db_retry_object() db_model = r.call( db_api.get_workflow_definition, identifier, namespace=namespace ) return resources.Workflow.from_db_model(db_model) @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") def put(self, identifier=None, namespace=''): """Update one or more workflows. :param identifier: Optional. If provided, it's UUID of a workflow. Only one workflow can be updated with identifier param. :param namespace: Optional. If provided, it's the namespace of the workflow/workflows. Currently, namespace cannot be changed. The text is allowed to have definitions of multiple workflows. In such case, they all will be updated. """ acl.enforce('workflows:update', context.ctx()) # NOTE(rakhmerov): We can't use normal method arguments to access # request data because it will break dynamic sub controller lookup # functionality (see _lookup() above) so we have to get the data # directly from the request object. definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') # If "skip_validation" is present in the query string parameters # then workflow language validation will be disabled. skip_validation = 'skip_validation' in pecan.request.GET resources.Workflow.validate_scope(scope) if scope == 'public': acl.enforce('workflows:publicize', context.ctx()) LOG.debug("Update workflow(s) [definition=%s]", definition) db_wfs = rest_utils.rest_retry_on_db_error(workflows.update_workflows)( definition, scope=scope, identifier=identifier, namespace=namespace, validate=not skip_validation ) workflow_list = [ resources.Workflow.from_db_model(db_wf) for db_wf in db_wfs ] return (workflow_list[0].to_json() if identifier else resources.Workflows(workflows=workflow_list).to_json()) @rest_utils.wrap_pecan_controller_exception @pecan.expose(content_type="text/plain") def post(self, namespace=''): """Create a new workflow. :param namespace: Optional. The namespace to create the workflow in. Workflows with the same name can be added to a given project if they are in two different namespaces. The text is allowed to have definitions of multiple workflows. In such case, they all will be created. """ acl.enforce('workflows:create', context.ctx()) # NOTE(rakhmerov): We can't use normal method arguments to access # request data because it will break dynamic sub controller lookup # functionality (see _lookup() above) so we have to get the data # directly from the request object. definition = pecan.request.text scope = pecan.request.GET.get('scope', 'private') # If "skip_validation" is present in the query string parameters # then workflow language validation will be disabled. skip_validation = 'skip_validation' in pecan.request.GET pecan.response.status = 201 resources.Workflow.validate_scope(scope) if scope == 'public': acl.enforce('workflows:publicize', context.ctx()) LOG.debug("Create workflow(s) [definition=%s]", definition) db_wfs = rest_utils.rest_retry_on_db_error(workflows.create_workflows)( definition, scope=scope, namespace=namespace, validate=not skip_validation ) workflow_list = [ resources.Workflow.from_db_model(db_wf) for db_wf in db_wfs ] return resources.Workflows(workflows=workflow_list).to_json() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204) def delete(self, identifier, namespace=''): """Delete a workflow. :param identifier: Name or ID of workflow to delete. :param namespace: Optional. Namespace of the workflow to delete. """ acl.enforce('workflows:delete', context.ctx()) LOG.debug("Delete workflow [identifier=%s, namespace=%s]", identifier, namespace) @rest_utils.rest_retry_on_db_error def _delete_workflow_definition(): with db_api.transaction(): db_api.delete_workflow_definition(identifier, namespace) _delete_workflow_definition() @rest_utils.wrap_wsme_controller_exception @wsme_pecan.wsexpose(resources.Workflows, types.uuid, int, types.uniquelist, types.list, types.uniquelist, wtypes.text, wtypes.text, wtypes.text, wtypes.text, resources.SCOPE_TYPES, types.uuid, wtypes.text, wtypes.text, bool, wtypes.text) def get_all(self, marker=None, limit=None, sort_keys='created_at', sort_dirs='asc', fields='', name=None, input=None, definition=None, tags=None, scope=None, project_id=None, created_at=None, updated_at=None, all_projects=False, namespace=None): """Return a list of workflows. :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. Columns to sort results by. Default: created_at. :param sort_dirs: Optional. Directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: asc. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param name: Optional. Keep only resources with a specific name. :param namespace: Optional. Keep only resources with a specific namespace :param input: Optional. Keep only resources with a specific input. :param definition: Optional. Keep only resources with a specific definition. :param tags: Optional. Keep only resources containing specific tags. :param scope: Optional. Keep only resources with a specific scope. :param project_id: Optional. The same as the requester project_id or different if the scope is public. :param created_at: Optional. Keep only resources created at a specific time and date. :param updated_at: Optional. Keep only resources with specific latest update time and date. :param all_projects: Optional. Get resources of all projects. """ acl.enforce('workflows:list', context.ctx()) if all_projects: acl.enforce('workflows:list:all_projects', context.ctx()) filters = filter_utils.create_filters_from_request_params( created_at=created_at, name=name, scope=scope, tags=tags, updated_at=updated_at, input=input, definition=definition, project_id=project_id, namespace=namespace ) LOG.debug("Fetch workflows. marker=%s, limit=%s, sort_keys=%s, " "sort_dirs=%s, fields=%s, filters=%s, all_projects=%s", marker, limit, sort_keys, sort_dirs, fields, filters, all_projects) return rest_utils.get_all( resources.Workflows, resources.Workflow, db_api.get_workflow_definitions, db_api.get_workflow_definition_by_id, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, all_projects=all_projects, **filters ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1055672 mistral-10.0.0.0b3/mistral/api/hooks/0000755000175000017500000000000000000000000017462 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/hooks/__init__.py0000644000175000017500000000000000000000000021561 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/hooks/content_type.py0000644000175000017500000000264200000000000022553 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pecan import hooks class ContentTypeHook(hooks.PecanHook): def __init__(self, content_type, methods=('GET',)): """Content type hook. This hook is needed for changing content type of responses but only for some HTTP methods. This is kind of 'hack' but it seems impossible using pecan/WSME to set different content types on request and response. :param content_type: Content-Type of the response. :type content_type: str :param methods: HTTP methods that should have response with given content_type. :type methods: list """ self.content_type = content_type self.methods = methods def after(self, state): if state.request.method in self.methods: state.response.content_type = self.content_type ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/service.py0000644000175000017500000000413100000000000020350 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_config import cfg from oslo_service import service from oslo_service import wsgi from mistral.api import app from mistral.rpc import clients as rpc_clients class WSGIService(service.ServiceBase): """Provides ability to launch Mistral API from wsgi app.""" def __init__(self, name): self.name = name self.app = app.setup_app() self.workers = ( cfg.CONF.api.api_workers or processutils.get_worker_count() ) self.server = wsgi.Server( cfg.CONF, name, self.app, host=cfg.CONF.api.host, port=cfg.CONF.api.port, use_ssl=cfg.CONF.api.enable_ssl_api ) def start(self): # NOTE: When oslo.service creates an API worker it forks a new child # system process. The child process is created as precise copy of the # parent process (see how os.fork() works) and within the child process # oslo.service calls service's start() method again to reinitialize # what's needed. So we must clean up all RPC clients so that RPC works # properly (e.g. message routing for synchronous calls may be based on # generated queue names). rpc_clients.cleanup() self.server.start() print('API server started.') def stop(self): self.server.stop() def wait(self): self.server.wait() def reset(self): self.server.reset() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/api/wsgi.py0000644000175000017500000000123500000000000017663 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.api import app application = app.init_wsgi() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1055672 mistral-10.0.0.0b3/mistral/auth/0000755000175000017500000000000000000000000016527 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/auth/__init__.py0000644000175000017500000000245300000000000020644 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_config import cfg import six from stevedore import driver from mistral import exceptions as exc _IMPL_AUTH_HANDLER = None def get_auth_handler(): auth_type = cfg.CONF.auth_type global _IMPL_AUTH_HANDLER if not _IMPL_AUTH_HANDLER: mgr = driver.DriverManager( 'mistral.auth', auth_type, invoke_on_load=True ) _IMPL_AUTH_HANDLER = mgr.driver return _IMPL_AUTH_HANDLER @six.add_metaclass(abc.ABCMeta) class AuthHandler(object): """Abstract base class for an authentication plugin.""" @abc.abstractmethod def authenticate(self, req): raise exc.UnauthorizedException() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/auth/keycloak.py0000644000175000017500000001455000000000000020710 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from cachetools import cached from cachetools import LRUCache import json import jwt from jwt import algorithms as jwt_algos from oslo_config import cfg from oslo_log import log as logging import pprint import requests from six.moves import urllib from mistral._i18n import _ from mistral import auth from mistral import exceptions as exc LOG = logging.getLogger(__name__) CONF = cfg.CONF class KeycloakAuthHandler(auth.AuthHandler): def authenticate(self, req): if 'X-Auth-Token' not in req.headers: msg = _("Auth token must be provided in 'X-Auth-Token' header.") LOG.error(msg) raise exc.UnauthorizedException(message=msg) access_token = req.headers.get('X-Auth-Token') try: decoded = jwt.decode( access_token, algorithms=['RS256'], verify=False ) except Exception as e: msg = _("Token can't be decoded because of wrong format %s")\ % str(e) LOG.error(msg) raise exc.UnauthorizedException(message=msg) # Get user realm from parsed token # Format is "iss": "http://:/auth/realms/", __, __, realm_name = decoded['iss'].strip().rpartition('/realms/') audience = decoded.get('aud') # Get roles from parsed token roles = ','.join(decoded['realm_access']['roles']) \ if 'realm_access' in decoded else '' # NOTE(rakhmerov): There's a special endpoint for introspecting # access tokens described in OpenID Connect specification but it's # available in KeyCloak starting only with version 1.8.Final so we have # to use user info endpoint which also takes exactly one parameter # (access token) and replies with error if token is invalid. user_info_endpoint_url = CONF.keycloak_oidc.user_info_endpoint_url if user_info_endpoint_url.startswith(('http://', 'https://')): self.send_request_to_auth_server( url=user_info_endpoint_url, access_token=access_token ) else: public_key = self.get_public_key(realm_name) keycloak_iss = None try: if CONF.keycloak_oidc.keycloak_iss: keycloak_iss = CONF.keycloak_oidc.keycloak_iss % realm_name jwt.decode( access_token, public_key, audience=audience, issuer=keycloak_iss, algorithms=['RS256'], verify=True ) except Exception: LOG.exception('The request access token is invalid.') raise exc.UnauthorizedException() req.headers["X-Identity-Status"] = "Confirmed" req.headers["X-Project-Id"] = realm_name req.headers["X-Roles"] = roles @staticmethod def get_system_ca_file(): """Return path to system default CA file.""" # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora, # Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca. ca_path = [ '/etc/ssl/certs/ca-certificates.crt', '/etc/pki/tls/certs/ca-bundle.crt', '/etc/ssl/ca-bundle.pem', '/etc/ssl/cert.pem', '/System/Library/OpenSSL/certs/cacert.pem', requests.certs.where() ] for ca in ca_path: LOG.debug("Looking for ca file %s", ca) if os.path.exists(ca): LOG.debug("Using ca file %s", ca) return ca LOG.warning("System ca file could not be found.") @cached(LRUCache(maxsize=32)) def get_public_key(self, realm_name): keycloak_key_url = ( CONF.keycloak_oidc.auth_url + CONF.keycloak_oidc.public_cert_url % realm_name ) response_json = self.send_request_to_auth_server(keycloak_key_url) keys = response_json.get('keys') if not keys: raise exc.MistralException( 'Unexpected response structure from the keycloak server.' ) public_key = jwt_algos.RSAAlgorithm.from_jwk( json.dumps(keys[0]) ) return public_key def send_request_to_auth_server(self, url, access_token=None): certfile = CONF.keycloak_oidc.certfile keyfile = CONF.keycloak_oidc.keyfile cafile = CONF.keycloak_oidc.cafile or self.get_system_ca_file() insecure = CONF.keycloak_oidc.insecure verify = None if urllib.parse.urlparse(url).scheme == "https": verify = False if insecure else cafile cert = (certfile, keyfile) if certfile and keyfile else None headers = {} if access_token: headers["Authorization"] = "Bearer %s" % access_token try: resp = requests.get( url, headers=headers, verify=verify, cert=cert ) except requests.ConnectionError: msg = _( "Can't connect to the keycloak server with address '%s'." ) % url LOG.exception(msg) raise exc.MistralException(message=msg) if resp.status_code == 401: LOG.warning( "HTTP response from OIDC provider:" " [%s] with WWW-Authenticate: [%s]", pprint.pformat(resp.text), resp.headers.get("WWW-Authenticate") ) else: LOG.debug( "HTTP response from the OIDC provider: %s", pprint.pformat(resp.json()) ) resp.raise_for_status() return resp.json() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/auth/keystone.py0000644000175000017500000000273500000000000020751 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral import auth from mistral import exceptions as exc CONF = cfg.CONF class KeystoneAuthHandler(auth.AuthHandler): def authenticate(self, req): # Note(nmakhotkin): Since we have deferred authentication, # need to check for auth manually (check for corresponding # headers according to keystonemiddleware docs. identity_status = req.headers.get('X-Identity-Status') service_identity_status = req.headers.get('X-Service-Identity-Status') if (identity_status == 'Confirmed' or service_identity_status == 'Confirmed'): return if req.headers.get('X-Auth-Token'): msg = 'Auth token is invalid: %s' % req.headers['X-Auth-Token'] else: msg = 'Authentication required' raise exc.UnauthorizedException(msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1055672 mistral-10.0.0.0b3/mistral/cmd/0000755000175000017500000000000000000000000016331 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/cmd/__init__.py0000644000175000017500000000000000000000000020430 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/cmd/launch.py0000644000175000017500000002052700000000000020163 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import eventlet eventlet.monkey_patch( os=True, select=True, socket=True, thread=False if '--use-debugger' in sys.argv else True, time=True) import os # If ../mistral/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath( os.path.join( os.path.abspath(sys.argv[0]), os.pardir, os.pardir ) ) if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'mistral', '__init__.py')): sys.path.insert(0, POSSIBLE_TOPDIR) from oslo_config import cfg from oslo_log import log as logging from oslo_service import service from mistral.api import service as api_service from mistral import config from mistral.engine import engine_server from mistral.event_engine import event_engine_server from mistral.executors import executor_server from mistral.notifiers import notification_server from mistral.rpc import base as rpc from mistral import version CONF = cfg.CONF SERVER_THREAD_MANAGER = None SERVER_PROCESS_MANAGER = None LOG = logging.getLogger(__name__) def launch_thread(server, workers=1): try: global SERVER_THREAD_MANAGER if not SERVER_THREAD_MANAGER: SERVER_THREAD_MANAGER = service.ServiceLauncher( CONF, restart_method='mutate' ) SERVER_THREAD_MANAGER.launch_service(server, workers=workers) except Exception as e: sys.stderr.write("ERROR: %s\n" % e) sys.exit(1) def launch_process(server, workers=1): try: global SERVER_PROCESS_MANAGER if not SERVER_PROCESS_MANAGER: SERVER_PROCESS_MANAGER = service.ProcessLauncher( CONF, restart_method='mutate' ) SERVER_PROCESS_MANAGER.launch_service(server, workers=workers) except Exception as e: sys.stderr.write("ERROR: %s\n" % e) sys.exit(1) def launch_executor(): launch_thread(executor_server.get_oslo_service()) def launch_engine(): launch_thread(engine_server.get_oslo_service()) def launch_event_engine(): launch_thread(event_engine_server.get_oslo_service()) def launch_notifier(): launch_thread(notification_server.get_oslo_service()) def launch_api(): server = api_service.WSGIService('mistral_api') launch_process(server, workers=server.workers) def launch_any(options): for option in options: LAUNCH_OPTIONS[option]() global SERVER_PROCESS_MANAGER global SERVER_THREAD_MANAGER if SERVER_THREAD_MANAGER: SERVER_THREAD_MANAGER.wait() if SERVER_PROCESS_MANAGER: SERVER_PROCESS_MANAGER.wait() # Map cli options to appropriate functions. The cli options are # registered in mistral's config.py. LAUNCH_OPTIONS = { 'api': launch_api, 'engine': launch_engine, 'executor': launch_executor, 'event-engine': launch_event_engine, 'notifier': launch_notifier } MISTRAL_TITLE = r""" |\\\ //| || || ||\\\ //|| __ || __ __ || || \\\// || || // |||||| || \\\ // \\\ || || \\/ || \\\ || || || \\\ || || || || \\\ || || || /\\\ || || || || __// ||_// || \\\__// \\\_ || Mistral Workflow Service, version %s """ % version.version_string def print_server_info(): print(MISTRAL_TITLE) comp_str = ("[%s]" % ','.join(LAUNCH_OPTIONS if cfg.CONF.server == ['all'] else cfg.CONF.server)) print('Launching server components %s...' % comp_str) def get_properly_ordered_parameters(): """Orders launch parameters in the right order. In oslo it's important the order of the launch parameters. if --config-file came after the command line parameters the command line parameters are ignored. So to make user command line parameters are never ignored this method moves --config-file to be always first. """ args = sys.argv[1:] for arg in sys.argv[1:]: if arg == '--config-file' or arg.startswith('--config-file='): if "=" in arg: conf_file_value = arg.split("=", 1)[1] else: conf_file_value = args[args.index(arg) + 1] args.remove(conf_file_value) args.remove(arg) args.insert(0, "--config-file") args.insert(1, conf_file_value) return args def override_keystone_options(): # TODO(wxy): This function is used for keeping backward compatibility. # Remove it in Stein. auth_opts = CONF['keystone_authtoken'] for opt, value in auth_opts.items(): if opt in CONF['keystone']: default_value = auth_opts._group._opts[opt]['opt'].default if default_value != value != CONF['keystone'][opt]: LOG.warning("The config option '%s' in section " "[keystone_authtoken] has the same copy in " "[keystone]. Please add the same option to the " "[keystone] section to keep using it.", opt) CONF.set_override(opt, value, group='keystone') def main(): try: CONF.register_cli_opts(config.CLI_OPTS) config.parse_args(get_properly_ordered_parameters()) print_server_info() logging.setup(CONF, 'Mistral') override_keystone_options() # Please refer to the oslo.messaging documentation for transport # configuration. The default transport for oslo.messaging is # rabbitMQ. The available transport drivers are listed in the # setup.cfg file in oslo.messaging under the entry_points section for # oslo.messaging.drivers. The transport driver is specified using the # rpc_backend option in the default section of the oslo configuration # file. The expected value for the rpc_backend is one of the key # values available for the oslo.messaging.drivers (i.e. rabbit, fake). # There are additional options such as ssl and credential that can be # specified depending on the driver. Please refer to the driver # implementation for those additional options. It's important to note # that the "fake" transport should only be used if "all" the Mistral # servers are launched on the same process. Otherwise, messages do not # get delivered if the Mistral servers are launched on different # processes because the "fake" transport is using an in process queue. rpc.get_transport() if cfg.CONF.server == ['all']: # Launch all servers. launch_any(LAUNCH_OPTIONS.keys()) else: # Validate launch option. if set(cfg.CONF.server) - set(LAUNCH_OPTIONS.keys()): raise Exception('Valid options are all or any combination of ' ', '.join(LAUNCH_OPTIONS.keys())) # Launch distinct set of server(s). launch_any(set(cfg.CONF.server)) except RuntimeError as excp: sys.stderr.write("ERROR: %s\n" % excp) sys.exit(1) # Helper method used in unit tests to reset the service launchers. def reset_server_managers(): global SERVER_THREAD_MANAGER global SERVER_PROCESS_MANAGER SERVER_THREAD_MANAGER = None SERVER_PROCESS_MANAGER = None # Helper method used in unit tests to access the service launcher. def get_server_thread_manager(): global SERVER_THREAD_MANAGER return SERVER_THREAD_MANAGER # Helper method used in unit tests to access the process launcher. def get_server_process_manager(): global SERVER_PROCESS_MANAGER return SERVER_PROCESS_MANAGER if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/config.py0000644000175000017500000006511500000000000017415 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # Copyright 2019 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration options registration and useful routines. """ import itertools import json from keystoneauth1 import loading from oslo_config import cfg from oslo_log import log from oslo_middleware import cors from osprofiler import opts as profiler from mistral import version from mistral._i18n import _ from mistral.workflow import states # Options under default group. launch_opt = cfg.ListOpt( 'server', default=['all'], help=_('Specifies which mistral server to start by the launch script. ' 'Valid options are all or any combination of ' 'api, engine, and executor.') ) wf_trace_log_name_opt = cfg.StrOpt( 'workflow_trace_log_name', default='workflow_trace', help=_('Logger name for pretty workflow trace output.') ) use_debugger_opt = cfg.BoolOpt( 'use-debugger', default=False, help=_('Enables debugger. Note that using this option changes how the ' 'eventlet library is used to support async IO. This could result ' 'in failures that do not occur under normal operation. ' 'Use at your own risk.') ) auth_type_opt = cfg.StrOpt( 'auth_type', default='keystone', help=_('Authentication type (valid options: keystone, keycloak-oidc)') ) api_opts = [ cfg.HostAddressOpt( 'host', default='0.0.0.0', help='Mistral API server host' ), cfg.PortOpt('port', default=8989, help='Mistral API server port'), cfg.BoolOpt( 'allow_action_execution_deletion', default=False, help=_('Enables the ability to delete action_execution which ' 'has no relationship with workflows.') ), cfg.BoolOpt( 'enable_ssl_api', default=False, help=_('Enable the integrated stand-alone API to service requests ' 'via HTTPS instead of HTTP.') ), cfg.IntOpt( 'api_workers', help=_('Number of workers for Mistral API service ' 'default is equal to the number of CPUs available if that can ' 'be determined, else a default worker count of 1 is returned.') ), cfg.StrOpt( 'validation_mode', default='mandatory', choices=['enabled', 'mandatory', 'disabled'], help=_("Defines in what cases Mistral will be validating the syntax " "of workflow YAML definitions. If 'enabled' is set the service " "will be validating the syntax but only if it's not explicitly " "turned off in the API request. 'disabled' disables validation " "for all API requests. 'mandatory' enables validation for all " "API requests.") ) ] js_impl_opt = cfg.StrOpt( 'js_implementation', default='pyv8', choices=['pyv8', 'v8eval', 'py_mini_racer'], help=_('The JavaScript implementation to be used by the std.javascript ' 'action to evaluate scripts.') ) rpc_impl_opt = cfg.StrOpt( 'rpc_implementation', default='oslo', choices=['oslo', 'kombu'], help=_('Specifies RPC implementation for RPC client and server. ' 'Support of kombu driver is experimental.') ) # TODO(ddeja): This config option is a part of oslo RPCClient # It would be the best to not register it twice, rather use RPCClient somehow rpc_response_timeout_opt = cfg.IntOpt( 'rpc_response_timeout', default=60, help=_('Seconds to wait for a response from a call.') ) oslo_rpc_executor = cfg.StrOpt( 'oslo_rpc_executor', default='eventlet', choices=['eventlet', 'blocking', 'threading'], help=_('Executor type used by Oslo Messaging framework. Defines how ' 'Oslo Messaging based RPC subsystem processes incoming calls.') ) expiration_token_duration = cfg.IntOpt( 'expiration_token_duration', default=30, help=_('Window of seconds to determine whether the given token is about' ' to expire.') ) pecan_opts = [ cfg.StrOpt( 'root', default='mistral.api.controllers.root.RootController', help=_('Pecan root controller') ), cfg.ListOpt( 'modules', default=["mistral.api"], help=_('A list of modules where pecan will search for applications.') ), cfg.BoolOpt( 'debug', default=False, help=_('Enables the ability to display tracebacks in the browser and' ' interactively debug during development.') ), cfg.BoolOpt( 'auth_enable', default=True, help=_('Enables user authentication in pecan.') ) ] engine_opts = [ cfg.StrOpt('engine', default='default', help='Mistral engine plugin'), cfg.HostAddressOpt( 'host', default='0.0.0.0', help=_('Name of the engine node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address.') ), cfg.StrOpt( 'topic', default='mistral_engine', help=_('The message topic that the engine listens on.') ), cfg.StrOpt('version', default='1.0', help='The version of the engine.'), cfg.IntOpt( 'execution_field_size_limit_kb', default=1024, help=_('The default maximum size in KB of large text fields ' 'of runtime execution objects. Use -1 for no limit.') ), cfg.IntOpt( 'execution_integrity_check_delay', default=20, help=_('A number of seconds since the last update of a task' ' execution in RUNNING state after which Mistral will' ' start checking its integrity, meaning that if all' ' associated actions/workflows are finished its state' ' will be restored automatically. If this property is' ' set to a negative value Mistral will never be doing ' ' this check.') ), cfg.IntOpt( 'execution_integrity_check_batch_size', default=5, min=1, help=_('A number of task executions in RUNNING state that the' ' execution integrity checker can process in a single' ' iteration.') ), cfg.IntOpt( 'action_definition_cache_time', default=60, help=_('A number of seconds that indicates how long action ' 'definitions should be stored in the local cache.') ), cfg.BoolOpt( 'start_subworkflows_via_rpc', default=False, help=( 'Enables starting subworkflows via RPC. Use "False" to start ' 'subworkflow within the same engine instance. Use "True" ' 'to start subworkflow via RPC to improve load balancing ' 'in case of several engine instances.' ) ) ] executor_opts = [ cfg.StrOpt( 'type', choices=['local', 'remote'], default='remote', help=( 'Type of executor. Use local to run the executor within the ' 'engine server. Use remote if the executor is launched as ' 'a separate server to run action executions.' ) ), cfg.HostAddressOpt( 'host', default='0.0.0.0', help=_('Name of the executor node. This can be an any string ' 'name/identifier. It is not necessarily a hostname, ' 'FQDN, or IP address. It is also related to the "target" ' 'attribute of tasks defined in a workflow text. If "target" ' 'is defined for a task then the action of the task will be ' 'sent to one of the executors that have the same value in the ' '"host" property.') ), cfg.StrOpt( 'topic', default='mistral_executor', help=_('The message topic that the executor listens on.') ), cfg.StrOpt( 'version', default='1.0', help=_('The version of the executor.') ) ] scheduler_type_opt = cfg.StrOpt( 'scheduler_type', default='legacy', choices=['legacy', 'default'], help=_('The name of the scheduler implementation used in the system.') ) scheduler_opts = [ cfg.FloatOpt( 'fixed_delay', default=1, min=0.1, help=( 'Fixed part of the delay between scheduler iterations, ' 'in seconds. ' 'Full delay is defined as a sum of "fixed_delay" and a random ' 'delay limited by "random_delay".' ) ), cfg.FloatOpt( 'random_delay', default=0, min=0, help=( 'Max value of the random part of the delay between scheduler ' 'iterations, in seconds. ' 'Full delay is defined as a sum of "fixed_delay" and a random ' 'delay limited by this property.' ) ), cfg.IntOpt( 'batch_size', default=None, min=1, help=( 'The max number of delayed calls will be selected during ' 'a scheduler iteration. ' 'If this property equals None then there is no ' 'restriction on selection.' ) ), cfg.FloatOpt( 'captured_job_timeout', default=30, min=1, help=( 'Defines how soon (in seconds) a scheduled job captured for ' 'processing becomes eligible for capturing by other schedulers ' 'again. This option is needed to prevent situations when a ' 'scheduler instance captured a job and failed while processing ' 'and so this job can never be processed again because it is ' 'marked as captured.' ) ), cfg.FloatOpt( 'pickup_job_after', default=60, min=1, help='Time period given to a scheduler to process a scheduled job ' 'locally before it becomes eligible for processing by other ' 'scheduler instances.' 'For example, a job needs to run at 12:00:00. When a scheduler ' 'starts processing it has 60 seconds (or other configured ' 'value) to complete the job. If the scheduler did not complete ' 'the job within this period it most likely means that the ' 'scheduler process crashed. In this case another scheduler ' 'instance will pick it up from the Job Store, but not earlier ' 'than 12:01:00 and try to process it.' ) ] cron_trigger_opts = [ cfg.BoolOpt( 'enabled', default=True, help=( 'If this value is set to False then the subsystem of cron triggers' ' is disabled. Disabling cron triggers increases system' ' performance.' ) ), cfg.IntOpt( 'execution_interval', default=1, min=1, help=( 'This setting defines how frequently Mistral checks for cron ' 'triggers that need execution. By default this is every second ' 'which can lead to high system load. Increasing the number will ' 'reduce the load but also limit the minimum freqency. For ' 'example, a cron trigger can be configured to run every second ' 'but if the execution_interval is set to 60, it will only run ' 'once per minute.' ) ) ] event_engine_opts = [ cfg.HostAddressOpt( 'host', default='0.0.0.0', help=_('Name of the event engine node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address.') ), cfg.HostAddressOpt( 'listener_pool_name', default='events', help=_('Name of the event engine\'s listener pool. This can be an' ' opaque identifier. It is used for identifying the group' ' of event engine listeners in oslo.messaging.') ), cfg.StrOpt( 'topic', default='mistral_event_engine', help=_('The message topic that the event engine listens on.') ), cfg.StrOpt( 'event_definitions_cfg_file', default='/etc/mistral/event_definitions.yaml', help=_('Configuration file for event definitions.') ), ] notifier_opts = [ cfg.StrOpt( 'type', choices=['local', 'remote'], default='remote', help=( 'Type of notifier. Use local to run the notifier within the ' 'engine server. Use remote if the notifier is launched as ' 'a separate server to process events.' ) ), cfg.StrOpt( 'host', default='0.0.0.0', help=_('Name of the notifier node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address.') ), cfg.StrOpt( 'topic', default='mistral_notifier', help=_('The message topic that the notifier server listens on.') ), cfg.ListOpt( 'notify', item_type=json.loads, bounds=True, help=_('List of publishers to publish notification.') ) ] execution_expiration_policy_opts = [ cfg.IntOpt( 'evaluation_interval', help=_('How often will the executions be evaluated ' '(in minutes). For example for value 120 the interval ' 'will be 2 hours (every 2 hours). ' 'Note that only final state executions will be removed: ' '( SUCCESS / ERROR / CANCELLED ).') ), cfg.IntOpt( 'older_than', help=_('Evaluate from which time remove executions in minutes. ' 'For example when older_than = 60, remove all executions ' 'that finished a 60 minutes ago or more. ' 'Minimum value is 1.') ), cfg.IntOpt( 'max_finished_executions', default=0, help=_('The maximum number of finished workflow executions ' 'to be stored. For example when max_finished_executions = 100, ' 'only the 100 latest finished executions will be preserved. ' 'This means that even unexpired executions are eligible ' 'for deletion, to decrease the number of executions in the ' 'database. The default value is 0. If it is set to 0, ' 'this constraint won\'t be applied.') ), cfg.IntOpt( 'batch_size', default=0, help=_('Size of batch of expired executions to be deleted.' 'The default value is 0. If it is set to 0, ' 'size of batch is total number of expired executions ' 'that is going to be deleted.') ), cfg.ListOpt( 'ignored_states', default=[], help='The states that the expiration policy will filter ' 'out and will not delete.' 'Valid values are, [{}]'.format(states.TERMINAL_STATES) ) ] action_heartbeat_opts = [ cfg.IntOpt( 'max_missed_heartbeats', min=0, default=15, help=_('The maximum amount of missed heartbeats to be allowed. ' 'If set to 0 then this feature is disabled. ' 'See check_interval for more details.') ), cfg.IntOpt( 'check_interval', min=0, default=20, help=_('How often (in seconds) action executions are checked. ' 'For example when check_interval is 10, check action ' 'executions every 10 seconds. When the checker runs it will ' 'transit all running action executions to error if the last ' 'heartbeat received is older than 10 * max_missed_heartbeats ' 'seconds. If set to 0 then this feature is disabled.') ), cfg.IntOpt( 'batch_size', min=0, default=10, help=_('The maximum number of action executions processed during ' 'one iteration of action execution heartbeat checker. If set ' 'to 0 then there is no limit.') ), cfg.IntOpt( 'first_heartbeat_timeout', min=0, default=3600, help=_('The first heartbeat is handled differently, to provide a ' 'grace period in case there is no available executor to handle ' 'the action execution. For example when ' 'first_heartbeat_timeout = 3600, wait 3600 seconds before ' 'closing the action executions that never received a heartbeat.' ) ) ] coordination_opts = [ cfg.StrOpt( 'backend_url', help=_('The backend URL to be used for coordination') ), cfg.FloatOpt( 'heartbeat_interval', default=5.0, help=_('Number of seconds between heartbeats for coordination.') ) ] profiler_opts = profiler.list_opts()[0][1] profiler_opts.append( cfg.StrOpt( 'profiler_log_name', default='profiler_trace', help=_('Logger name for the osprofiler trace output.') ) ) keycloak_oidc_opts = [ cfg.StrOpt( 'auth_url', help=_('Keycloak base url (e.g. https://my.keycloak:8443/auth)') ), cfg.StrOpt( 'certfile', help=_('Required if identity server requires client certificate') ), cfg.StrOpt( 'keyfile', help=_('Required if identity server requires client certificate') ), cfg.StrOpt( 'cafile', help=_('A PEM encoded Certificate Authority to use when verifying ' 'HTTPs connections. Defaults to system CAs.') ), cfg.BoolOpt( 'insecure', default=False, help=_('If True, SSL/TLS certificate verification is disabled') ), cfg.StrOpt( 'user_info_endpoint_url', default='/realms/%s/protocol/openid-connect/userinfo', help='Endpoint against which authorization will be performed' ), cfg.StrOpt( 'public_cert_url', default="/realms/%s/protocol/openid-connect/certs", help="URL to get the public key for a particular realm" ), cfg.StrOpt( 'keycloak_iss', help="Keycloak issuer(iss) url. " "Example: https://ip_add:port/auth/realms/%s" ) ] yaql_opts = [ cfg.IntOpt( 'limit_iterators', default=-1, min=-1, help=_('Limit iterators by the given number of elements. When set, ' 'each time any function declares its parameter to be iterator, ' 'that iterator is modified to not produce more than a given ' 'number of items. If not set (or set to -1) the result data is ' 'allowed to contain endless iterators that would cause errors ' 'if the result where to be serialized.') ), cfg.IntOpt( 'memory_quota', default=-1, min=-1, help=_('The memory usage quota (in bytes) for all data produced by ' 'the expression (or any part of it). -1 means no limitation.') ), cfg.BoolOpt( 'convert_input_data', default=True, help=_('Enables input data conversion for YAQL expressions. If set ' 'to True, YAQL will convert mutable data structures ' '(lists, dicts, sets) into their immutable versions. That ' 'will allow them to work with some constructs that require ' 'hashable types even if elements are not hashable. For ' 'example, it will be possible to put dicts into a set. ' 'Although it conflicts with the base principles of such ' 'collections (e.g. we cannot put a non-hashable type into ' 'a set just because otherwise it will not work correctly) the ' 'YAQL library itself allows this. ' 'Disabling input data conversion may give significant ' 'performance boost if the input data for an expression is ' 'large.') ), cfg.BoolOpt( 'convert_output_data', default=True, help=_('Enables output data conversion for YAQL expressions.' 'If set to False, it is possible that YAQL will generate ' 'an output that will be not JSON-serializable. For example, ' 'if an expression has ".toSet()" in the end to convert a list ' 'into a set. It does not mean though that such functions ' 'cannot be used, they can still be used in expressions but ' 'user has to keep in mind of what type a result will be, ' 'whereas if the value of ths property is True YAQL will ' 'convert the result to a JSON-compatible type.') ), cfg.BoolOpt( 'convert_tuples_to_lists', default=True, help=_('When set to True, yaql converts all tuples in the expression ' 'result to lists. It works only if "convert_output_data" is ' 'set to True.') ), cfg.BoolOpt( 'convert_sets_to_lists', default=False, help=_('When set to True, yaql converts all sets in the expression ' 'result to lists. Otherwise the produced result may contain ' 'sets that are not JSON-serializable. It works only if ' '"convert_output_data" is set to True.') ), cfg.BoolOpt( 'iterable_dicts', default=False, help=_('When set to True, dictionaries are considered to be iterable ' 'and iteration over dictionaries produces their keys (as in ' 'Python and yaql 0.2).') ), cfg.StrOpt( 'keyword_operator', default='=>', help=_('Allows one to configure keyword/mapping symbol. ' 'Ability to pass named arguments can be disabled altogether ' 'if empty string is provided.') ), cfg.BoolOpt( 'allow_delegates', default=False, help=_('Enables or disables delegate expression parsing.') ) ] CONF = cfg.CONF API_GROUP = 'api' ENGINE_GROUP = 'engine' EXECUTOR_GROUP = 'executor' SCHEDULER_GROUP = 'scheduler' CRON_TRIGGER_GROUP = 'cron_trigger' EVENT_ENGINE_GROUP = 'event_engine' NOTIFIER_GROUP = 'notifier' PECAN_GROUP = 'pecan' COORDINATION_GROUP = 'coordination' EXECUTION_EXPIRATION_POLICY_GROUP = 'execution_expiration_policy' ACTION_HEARTBEAT_GROUP = 'action_heartbeat' PROFILER_GROUP = profiler.list_opts()[0][0] KEYCLOAK_OIDC_GROUP = "keycloak_oidc" YAQL_GROUP = "yaql" KEYSTONE_GROUP = "keystone" CONF.register_opt(wf_trace_log_name_opt) CONF.register_opt(auth_type_opt) CONF.register_opt(scheduler_type_opt) CONF.register_opt(js_impl_opt) CONF.register_opt(rpc_impl_opt) CONF.register_opt(rpc_response_timeout_opt) CONF.register_opt(oslo_rpc_executor) CONF.register_opt(expiration_token_duration) CONF.register_opts(api_opts, group=API_GROUP) CONF.register_opts(engine_opts, group=ENGINE_GROUP) CONF.register_opts(executor_opts, group=EXECUTOR_GROUP) CONF.register_opts(scheduler_opts, group=SCHEDULER_GROUP) CONF.register_opts(cron_trigger_opts, group=CRON_TRIGGER_GROUP) CONF.register_opts( execution_expiration_policy_opts, group=EXECUTION_EXPIRATION_POLICY_GROUP ) CONF.register_opts( action_heartbeat_opts, group=ACTION_HEARTBEAT_GROUP ) CONF.register_opts(event_engine_opts, group=EVENT_ENGINE_GROUP) CONF.register_opts(notifier_opts, group=NOTIFIER_GROUP) CONF.register_opts(pecan_opts, group=PECAN_GROUP) CONF.register_opts(coordination_opts, group=COORDINATION_GROUP) CONF.register_opts(profiler_opts, group=PROFILER_GROUP) CONF.register_opts(keycloak_oidc_opts, group=KEYCLOAK_OIDC_GROUP) CONF.register_opts(yaql_opts, group=YAQL_GROUP) loading.register_session_conf_options(CONF, KEYSTONE_GROUP) CLI_OPTS = [ use_debugger_opt, launch_opt ] default_group_opts = itertools.chain( CLI_OPTS, [ wf_trace_log_name_opt, auth_type_opt, scheduler_type_opt, js_impl_opt, rpc_impl_opt, rpc_response_timeout_opt, oslo_rpc_executor, expiration_token_duration ] ) _DEFAULT_LOG_LEVELS = [ 'eventlet.wsgi.server=WARN', 'oslo_service.periodic_task=INFO', 'oslo_service.loopingcall=INFO', 'mistral.services.periodic=INFO', 'kazoo.client=WARN', 'oslo_db=WARN' ] def list_opts(): return [ (API_GROUP, api_opts), (ENGINE_GROUP, engine_opts), (EXECUTOR_GROUP, executor_opts), (EVENT_ENGINE_GROUP, event_engine_opts), (SCHEDULER_GROUP, scheduler_opts), (CRON_TRIGGER_GROUP, cron_trigger_opts), (NOTIFIER_GROUP, notifier_opts), (PECAN_GROUP, pecan_opts), (COORDINATION_GROUP, coordination_opts), (EXECUTION_EXPIRATION_POLICY_GROUP, execution_expiration_policy_opts), (PROFILER_GROUP, profiler_opts), (KEYCLOAK_OIDC_GROUP, keycloak_oidc_opts), (YAQL_GROUP, yaql_opts), (ACTION_HEARTBEAT_GROUP, action_heartbeat_opts), (None, default_group_opts) ] def parse_args(args=None, usage=None, default_config_files=None): default_log_levels = log.get_default_log_levels() default_log_levels.extend(_DEFAULT_LOG_LEVELS) log.set_defaults(default_log_levels=default_log_levels) log.register_options(CONF) CONF( args=args, project='mistral', version=version.version_string, usage=usage, default_config_files=default_config_files ) def set_config_defaults(): """This method updates all configuration default values.""" set_cors_middleware_defaults() def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-Project-Id', 'X-User-Name', 'X-Project-Name'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-Project-Id', 'X-User-Name', 'X-Project-Name'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/context.py0000644000175000017500000002352000000000000017626 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 from mistral_lib.actions import context as lib_ctx from mistral_lib import serialization from oslo_config import cfg from oslo_context import context as oslo_context from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from osprofiler import profiler import pecan from pecan import hooks from mistral import auth from mistral import exceptions as exc from mistral_lib import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF _CTX_THREAD_LOCAL_NAME = "MISTRAL_APP_CTX_THREAD_LOCAL" ALLOWED_WITHOUT_AUTH = ['/', '/v2/', '/workflowv2/', '/workflowv2/v2/'] class MistralContext(oslo_context.RequestContext): def __init__(self, auth_uri=None, auth_cacert=None, insecure=False, service_catalog=None, region_name=None, is_trust_scoped=False, redelivered=False, expires_at=None, trust_id=None, is_target=False, **kwargs): self.auth_uri = auth_uri self.auth_cacert = auth_cacert self.insecure = insecure self.service_catalog = service_catalog self.region_name = region_name self.is_trust_scoped = is_trust_scoped self.redelivered = redelivered self.expires_at = expires_at self.trust_id = trust_id self.is_target = is_target # We still use Mistral thread local variable. Maybe could consider # using the variable provided by oslo_context in future. super(MistralContext, self).__init__(overwrite=False, **kwargs) def to_dict(self): """Return a dictionary of context attributes.""" ctx_dict = super(MistralContext, self).to_dict() ctx_dict.update( { 'user_name': self.user_name, 'project_name': self.project_name, 'domain_name': self.domain_name, 'user_domain_name': self.user_domain_name, 'project_domain_name': self.project_domain_name, 'auth_uri': self.auth_uri, 'auth_cacert': self.auth_cacert, 'insecure': self.insecure, 'service_catalog': self.service_catalog, 'region_name': self.region_name, 'is_trust_scoped': self.is_trust_scoped, 'redelivered': self.redelivered, 'expires_at': self.expires_at, 'trust_id': self.trust_id, 'is_target': self.is_target, } ) return ctx_dict @classmethod def from_dict(cls, values, **kwargs): """Construct a context object from a provided dictionary.""" kwargs.setdefault('auth_uri', values.get('auth_uri')) kwargs.setdefault('auth_cacert', values.get('auth_cacert')) kwargs.setdefault('insecure', values.get('insecure', False)) kwargs.setdefault('service_catalog', values.get('service_catalog')) kwargs.setdefault('region_name', values.get('region_name')) kwargs.setdefault( 'is_trust_scoped', values.get('is_trust_scoped', False) ) kwargs.setdefault('redelivered', values.get('redelivered', False)) kwargs.setdefault('expires_at', values.get('expires_at')) kwargs.setdefault('trust_id', values.get('trust_id')) kwargs.setdefault('is_target', values.get('is_target', False)) return super(MistralContext, cls).from_dict(values, **kwargs) @classmethod def from_environ(cls, headers, env): kwargs = _extract_mistral_auth_params(headers) token_info = env.get('keystone.token_info', {}) if not kwargs['is_target']: kwargs['service_catalog'] = token_info.get('token', {}) kwargs['expires_at'] = (token_info['token']['expires_at'] if token_info else None) context = super(MistralContext, cls).from_environ(env, **kwargs) context.is_admin = True if 'admin' in context.roles else False return context def has_ctx(): return utils.has_thread_local(_CTX_THREAD_LOCAL_NAME) def ctx(): if not has_ctx(): raise exc.ApplicationContextNotFoundException() return utils.get_thread_local(_CTX_THREAD_LOCAL_NAME) def set_ctx(new_ctx): utils.set_thread_local(_CTX_THREAD_LOCAL_NAME, new_ctx) def _extract_mistral_auth_params(headers): service_catalog = None if headers.get("X-Target-Auth-Uri"): insecure_header = headers.get('X-Target-Insecure', 'False') if insecure_header == 'False': insecure = False elif insecure_header == 'True': insecure = True else: raise (exc.MistralException( 'X-Target-Insecure must be either "True", "False" or not ' 'provided. The default is "False".')) params = { # TODO(akovi): Target cert not handled yet 'auth_cacert': None, 'insecure': insecure, 'auth_token': headers.get('X-Target-Auth-Token'), 'auth_uri': headers.get('X-Target-Auth-Uri'), 'tenant': headers.get('X-Target-Project-Id'), 'user': headers.get('X-Target-User-Id'), 'user_name': headers.get('X-Target-User-Name'), 'region_name': headers.get('X-Target-Region-Name'), 'is_target': True } if not params['auth_token']: raise (exc.MistralException( 'Target auth URI (X-Target-Auth-Uri) target auth token ' '(X-Target-Auth-Token) must be present')) # It's possible that target service catalog is not provided, in this # case, Mistral needs to get target service catalog dynamically when # talking to target openstack deployment later on. service_catalog = _extract_service_catalog_from_headers( headers ) else: params = { 'auth_uri': CONF.keystone_authtoken.www_authenticate_uri, 'auth_cacert': CONF.keystone_authtoken.cafile, 'insecure': False, 'region_name': headers.get('X-Region-Name'), 'is_target': False } params['service_catalog'] = service_catalog return params def _extract_service_catalog_from_headers(headers): target_service_catalog_header = headers.get( 'X-Target-Service-Catalog') if target_service_catalog_header: decoded_catalog = base64.b64decode( target_service_catalog_header).decode() return jsonutils.loads(decoded_catalog) else: return None class RpcContextSerializer(messaging.Serializer): def __init__(self, entity_serializer=None): self.entity_serializer = ( entity_serializer or serialization.get_polymorphic_serializer() ) def serialize_entity(self, context, entity): if not self.entity_serializer: return entity return self.entity_serializer.serialize(entity) def deserialize_entity(self, context, entity): if not self.entity_serializer: return entity return self.entity_serializer.deserialize(entity) def serialize_context(self, context): ctx = context.to_dict() pfr = profiler.get() if pfr: ctx['trace_info'] = { "hmac_key": pfr.hmac_key, "base_id": pfr.get_base_id(), "parent_id": pfr.get_id() } return ctx def deserialize_context(self, context): trace_info = context.pop('trace_info', None) if trace_info: profiler.init(**trace_info) ctx = MistralContext.from_dict(context) set_ctx(ctx) return ctx class AuthHook(hooks.PecanHook): def before(self, state): if state.request.path in ALLOWED_WITHOUT_AUTH: return if not CONF.pecan.auth_enable: return try: auth_handler = auth.get_auth_handler() auth_handler.authenticate(state.request) except Exception as e: msg = "Failed to validate access token: %s" % str(e) LOG.exception(msg) pecan.abort( status_code=401, detail=msg, headers={'Server-Error-Message': msg, "WWW-Authenticate": msg} ) class ContextHook(hooks.PecanHook): def before(self, state): context = MistralContext.from_environ( state.request.headers, state.request.environ ) set_ctx(context) def after(self, state): set_ctx(None) def create_action_context(execution_ctx): context = ctx() security_ctx = lib_ctx.SecurityContext( auth_cacert=context.auth_cacert, auth_token=context.auth_token, auth_uri=context.auth_uri, expires_at=context.expires_at, insecure=context.insecure, is_target=context.is_target, is_trust_scoped=context.is_trust_scoped, project_id=context.project_id, project_name=context.project_name, user_name=context.user_name, redelivered=context.redelivered, region_name=context.region_name, service_catalog=context.service_catalog, trust_id=context.trust_id, ) ex_ctx = lib_ctx.ExecutionContext(**execution_ctx) return lib_ctx.ActionContext(security_ctx, ex_ctx) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1095672 mistral-10.0.0.0b3/mistral/db/0000755000175000017500000000000000000000000016153 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/__init__.py0000644000175000017500000000000000000000000020252 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1095672 mistral-10.0.0.0b3/mistral/db/sqlalchemy/0000755000175000017500000000000000000000000020315 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/__init__.py0000644000175000017500000000000000000000000022414 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/base.py0000644000175000017500000001441400000000000021605 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cachetools from oslo_config import cfg from oslo_db import options from oslo_db.sqlalchemy import enginefacade import osprofiler.sqlalchemy import sqlalchemy as sa from mistral.db.sqlalchemy import sqlite_lock from mistral import exceptions as exc from mistral_lib import utils # Note(dzimine): sqlite only works for basic testing. options.set_defaults(cfg.CONF, connection="sqlite:///mistral.sqlite") _DB_SESSION_THREAD_LOCAL_NAME = "__db_sql_alchemy_session__" _TX_SCOPED_CACHE_THREAD_LOCAL_NAME = "__tx_scoped_cache__" _facade = None _sqlalchemy_create_engine_orig = sa.create_engine def _get_facade(): global _facade if not _facade: _facade = enginefacade.LegacyEngineFacade( cfg.CONF.database.connection, sqlite_fk=True, autocommit=False, **dict(cfg.CONF.database.items()) ) if cfg.CONF.profiler.enabled: if cfg.CONF.profiler.trace_sqlalchemy: osprofiler.sqlalchemy.add_tracing( sa, _facade.get_engine(), 'db' ) return _facade # Monkey-patching sqlalchemy to set the isolation_level # as this configuration is not exposed by oslo_db. def _sqlalchemy_create_engine_wrapper(*args, **kwargs): # sqlite (used for unit testing and not allowed for production) # does not support READ_COMMITTED. # Checking the drivername using the args and not the get_driver_name() # method because that method requires a session. if args[0].drivername != 'sqlite': kwargs["isolation_level"] = "READ_COMMITTED" return _sqlalchemy_create_engine_orig(*args, **kwargs) def get_engine(): # If the patch was not applied yet. if sa.create_engine != _sqlalchemy_create_engine_wrapper: # Replace the original create_engine with our wrapper. sa.create_engine = _sqlalchemy_create_engine_wrapper return _get_facade().get_engine() def _get_session(): return _get_facade().get_session() def _get_thread_local_session(): return utils.get_thread_local(_DB_SESSION_THREAD_LOCAL_NAME) def get_tx_scoped_cache(): return utils.get_thread_local(_TX_SCOPED_CACHE_THREAD_LOCAL_NAME) def _get_or_create_thread_local_session(): ses = _get_thread_local_session() if ses: return ses, False ses = _get_session() _set_thread_local_session(ses) return ses, True def _set_thread_local_session(session): utils.set_thread_local(_DB_SESSION_THREAD_LOCAL_NAME, session) if session is not None: utils.set_thread_local( _TX_SCOPED_CACHE_THREAD_LOCAL_NAME, cachetools.LRUCache(maxsize=1000) ) else: utils.set_thread_local(_TX_SCOPED_CACHE_THREAD_LOCAL_NAME, None) def session_aware(param_name="session"): """Decorator for methods working within db session.""" def _decorator(func): def _within_session(*args, **kw): # If 'created' flag is True it means that the transaction is # demarcated explicitly outside this module. ses, created = _get_or_create_thread_local_session() try: kw[param_name] = ses result = func(*args, **kw) if created: ses.commit() return result except Exception: if created: ses.rollback() raise finally: if created: _set_thread_local_session(None) ses.close() _within_session.__doc__ = func.__doc__ return _within_session return _decorator # Transaction management. def start_tx(): """Starts transaction. Opens new database session and starts new transaction assuming there wasn't any opened sessions within the same thread. """ if _get_thread_local_session(): raise exc.DataAccessException( "Database transaction has already been started." ) _set_thread_local_session(_get_session()) def release_locks_if_sqlite(session): if get_driver_name() == 'sqlite': sqlite_lock.release_locks(session) def commit_tx(): """Commits previously started database transaction.""" ses = _get_thread_local_session() if not ses: raise exc.DataAccessException( "Nothing to commit. Database transaction" " has not been previously started." ) ses.commit() def rollback_tx(): """Rolls back previously started database transaction.""" ses = _get_thread_local_session() if not ses: raise exc.DataAccessException( "Nothing to roll back. Database transaction has not been started." ) ses.rollback() def end_tx(): """Ends transaction. Ends current database transaction. It rolls back all uncommitted changes and closes database session. """ ses = _get_thread_local_session() if not ses: raise exc.DataAccessException( "Database transaction has not been started." ) if ses.dirty: rollback_tx() release_locks_if_sqlite(ses) ses.close() _set_thread_local_session(None) @session_aware() def get_driver_name(session=None): return session.bind.url.drivername @session_aware() def get_dialect_name(session=None): return session.bind.url.get_dialect().name @session_aware() def model_query(model, columns=(), session=None): """Query helper. :param model: Base model to query. :param columns: Optional. Which columns to be queried. """ if columns: return session.query(*columns) return session.query(model) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1095672 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/0000755000175000017500000000000000000000000022306 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/__init__.py0000644000175000017500000000000000000000000024405 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic.ini0000644000175000017500000000214200000000000024402 0ustar00coreycorey00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = mistral/db/sqlalchemy/migration/alembic_migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1095672 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/0000755000175000017500000000000000000000000026136 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/README.md0000644000175000017500000000414600000000000027422 0ustar00coreycorey00000000000000The migrations in `alembic_migrations/versions` contain the changes needed to migrate between Mistral database revisions. A migration occurs by executing a script that details the changes needed to upgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially. The scripts are executed by Mistral's migration wrapper which uses the Alembic library to manage the migration. Mistral supports migration from Kilo or later. You can upgrade to the latest database version via: ``` mistral-db-manage --config-file /path/to/mistral.conf upgrade head ``` You can populate the database with standard actions and workflows: ``` mistral-db-manage --config-file /path/to/mistral.conf populate ``` To check the current database version: ``` mistral-db-manage --config-file /path/to/mistral.conf current ``` To create a script to run the migration offline: ``` mistral-db-manage --config-file /path/to/mistral.conf upgrade head --sql ``` To run the offline migration between specific migration versions: ``` mistral-db-manage --config-file /path/to/mistral.conf upgrade : --sql ``` Upgrade the database incrementally: ``` mistral-db-manage --config-file /path/to/mistral.conf upgrade --delta <# of revs> ``` Or, upgrade the database to one newer revision: ``` mistral-db-manage --config-file /path/to/mistral.conf upgrade +1 ``` Create new revision: ``` mistral-db-manage --config-file /path/to/mistral.conf revision -m "description of revision" --autogenerate ``` Create a blank file: ``` mistral-db-manage --config-file /path/to/mistral.conf revision -m "description of revision" ``` This command does not perform any migrations, it only sets the revision. Revision may be any existing revision. Use this command carefully. ``` mistral-db-manage --config-file /path/to/mistral.conf stamp ``` To verify that the timeline does branch, you can run this command: ``` mistral-db-manage --config-file /path/to/mistral.conf check_migration ``` If the migration path has branch, you can find the branch point via: ``` mistral-db-manage --config-file /path/to/mistral.conf history././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/__init__.py0000644000175000017500000000000000000000000030235 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/env.py0000644000175000017500000000462300000000000027305 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement from alembic import context from logging import config as c from oslo_utils import importutils from sqlalchemy import create_engine from sqlalchemy import pool from mistral.db.sqlalchemy import model_base importutils.try_import('mistral.db.v2.sqlalchemy.models') # This is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config mistral_config = config.mistral_config # Interpret the config file for Python logging. # This line sets up loggers basically. c.fileConfig(config.config_file_name) # Add your model's MetaData object here for 'autogenerate' support. target_metadata = model_base.MistralSecureModelBase.metadata def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=mistral_config.database.connection) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( mistral_config.database.connection, poolclass=pool.NullPool ) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/script.py.mako0000644000175000017500000000166700000000000030754 0ustar00coreycorey00000000000000# Copyright ${create_date.year} OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"}././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1135674 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/0000755000175000017500000000000000000000000030006 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/001_kilo.py0000644000175000017500000002541100000000000031701 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Kilo release Revision ID: 001 Revises: None Create Date: 2015-03-31 12:02:51.935368 """ # revision identifiers, used by Alembic. revision = '001' down_revision = None from alembic import op import sqlalchemy as sa from mistral.db.sqlalchemy import types as st def upgrade(): op.create_table( 'workbooks_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=True), sa.Column('definition', sa.Text(), nullable=True), sa.Column('spec', st.JsonEncoded(), nullable=True), sa.Column('tags', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'project_id') ) op.create_table( 'tasks', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=True), sa.Column('requires', st.JsonEncoded(), nullable=True), sa.Column('workbook_name', sa.String(length=80), nullable=True), sa.Column('execution_id', sa.String(length=36), nullable=True), sa.Column('description', sa.String(length=200), nullable=True), sa.Column('task_spec', st.JsonEncoded(), nullable=True), sa.Column('action_spec', st.JsonEncoded(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('tags', st.JsonEncoded(), nullable=True), sa.Column('in_context', st.JsonEncoded(), nullable=True), sa.Column('parameters', st.JsonEncoded(), nullable=True), sa.Column('output', st.JsonEncoded(), nullable=True), sa.Column('task_runtime_context', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'action_definitions_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=True), sa.Column('definition', sa.Text(), nullable=True), sa.Column('spec', st.JsonEncoded(), nullable=True), sa.Column('tags', st.JsonEncoded(), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.Column('input', sa.Text(), nullable=True), sa.Column('action_class', sa.String(length=200), nullable=True), sa.Column('attributes', st.JsonEncoded(), nullable=True), sa.Column('is_system', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'project_id') ) op.create_table( 'workflow_definitions_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=True), sa.Column('definition', sa.Text(), nullable=True), sa.Column('spec', st.JsonEncoded(), nullable=True), sa.Column('tags', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'project_id') ) op.create_table( 'executions_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('type', sa.String(length=50), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=True), sa.Column('workflow_name', sa.String(length=80), nullable=True), sa.Column('spec', st.JsonEncoded(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('state_info', sa.String(length=1024), nullable=True), sa.Column('tags', st.JsonEncoded(), nullable=True), sa.Column('accepted', sa.Boolean(), nullable=True), sa.Column('input', st.JsonEncoded(), nullable=True), sa.Column('output', st.JsonLongDictType(), nullable=True), sa.Column('params', st.JsonEncoded(), nullable=True), sa.Column('context', st.JsonEncoded(), nullable=True), sa.Column('action_spec', st.JsonEncoded(), nullable=True), sa.Column('processed', sa.BOOLEAN(), nullable=True), sa.Column('in_context', st.JsonLongDictType(), nullable=True), sa.Column('published', st.JsonEncoded(), nullable=True), sa.Column('runtime_context', st.JsonEncoded(), nullable=True), sa.Column('task_execution_id', sa.String(length=36), nullable=True), sa.Column( 'workflow_execution_id', sa.String(length=36), nullable=True ), sa.ForeignKeyConstraint( ['task_execution_id'], [u'executions_v2.id'], ), sa.ForeignKeyConstraint( ['workflow_execution_id'], [u'executions_v2.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table( 'workbooks', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('definition', sa.Text(), nullable=True), sa.Column('description', sa.String(length=200), nullable=True), sa.Column('tags', st.JsonEncoded(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('trust_id', sa.String(length=80), nullable=True), sa.PrimaryKeyConstraint('id', 'name'), sa.UniqueConstraint('name') ) op.create_table( 'environments_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=200), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.Column('variables', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'project_id') ) op.create_table( 'triggers', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('pattern', sa.String(length=20), nullable=False), sa.Column('next_execution_time', sa.DateTime(), nullable=False), sa.Column('workbook_name', sa.String(length=80), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) op.create_table( 'delayed_calls_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column( 'factory_method_path', sa.String(length=200), nullable=True ), sa.Column('target_method_name', sa.String(length=80), nullable=False), sa.Column('method_arguments', st.JsonEncoded(), nullable=True), sa.Column('serializers', st.JsonEncoded(), nullable=True), sa.Column('auth_context', st.JsonEncoded(), nullable=True), sa.Column('execution_time', sa.DateTime(), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_table( 'workflow_executions', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('workbook_name', sa.String(length=80), nullable=True), sa.Column('task', sa.String(length=80), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('context', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'cron_triggers_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=200), nullable=True), sa.Column('pattern', sa.String(length=100), nullable=True), sa.Column('next_execution_time', sa.DateTime(), nullable=False), sa.Column('workflow_name', sa.String(length=80), nullable=True), sa.Column('remaining_executions', sa.Integer(), nullable=True), sa.Column('workflow_id', sa.String(length=36), nullable=True), sa.Column('workflow_input', st.JsonEncoded(), nullable=True), sa.Column('workflow_input_hash', sa.CHAR(length=64), nullable=True), sa.Column('trust_id', sa.String(length=80), nullable=True), sa.ForeignKeyConstraint( ['workflow_id'], [u'workflow_definitions_v2.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'project_id'), sa.UniqueConstraint( 'workflow_input_hash', 'workflow_name', 'pattern', 'project_id' ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/002_kilo.py0000644000175000017500000000263200000000000031702 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Kilo Revision ID: 002 Revises: 001 Create Date: 2015-04-30 16:15:34.737030 """ # revision identifiers, used by Alembic. revision = '002' down_revision = '001' from alembic import op import sqlalchemy as sa from mistral.db.sqlalchemy import types as st def upgrade(): op.drop_table('tasks') op.drop_table('workflow_executions') op.drop_table('workbooks') op.drop_table('triggers') op.add_column( 'cron_triggers_v2', sa.Column('workflow_params', st.JsonEncoded(), nullable=True) ) op.add_column( 'cron_triggers_v2', sa.Column('workflow_params_hash', sa.CHAR(length=64), nullable=True) ) op.create_unique_constraint( None, 'cron_triggers_v2', ['workflow_input_hash', 'workflow_name', 'pattern', 'project_id', 'workflow_params_hash'] ) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/003_cron_trigger_constraints.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/003_cron_trigger_cons0000644000175000017500000000231400000000000034021 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """cron_trigger_constraints Revision ID: 003 Revises: 002 Create Date: 2015-05-25 13:09:50.190136 """ # revision identifiers, used by Alembic. revision = '003' down_revision = '002' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'cron_triggers_v2', sa.Column('first_execution_time', sa.DateTime(), nullable=True) ) op.create_unique_constraint( None, 'cron_triggers_v2', [ 'workflow_input_hash', 'workflow_name', 'pattern', 'project_id', 'workflow_params_hash', 'remaining_executions', 'first_execution_time' ] ) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/004_add_description_for_execution.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/004_add_description_f0000644000175000017500000000170300000000000033755 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add description for execution Revision ID: 004 Revises: 003 Create Date: 2015-06-10 14:23:54.494596 """ # revision identifiers, used by Alembic. revision = '004' down_revision = '003' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'executions_v2', sa.Column('description', sa.String(length=255), nullable=True) ) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/005_increase_execution_columns_size.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/005_increase_executio0000644000175000017500000000303600000000000034015 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Increase executions_v2 column size from JsonDictType to JsonLongDictType Revision ID: 005 Revises: 004 Create Date: 2015-07-21 08:48:51.636094 """ # revision identifiers, used by Alembic. revision = '005' down_revision = '004' from alembic import op from mistral.db.sqlalchemy import types as st def upgrade(): # Changing column types from JsonDictType to JsonLongDictType op.alter_column('executions_v2', 'runtime_context', type_=st.JsonLongDictType()) op.alter_column('executions_v2', 'input', type_=st.JsonLongDictType()) op.alter_column('executions_v2', 'params', type_=st.JsonLongDictType()) op.alter_column('executions_v2', 'context', type_=st.JsonLongDictType()) op.alter_column('executions_v2', 'action_spec', type_=st.JsonLongDictType()) op.alter_column('executions_v2', 'published', type_=st.JsonLongDictType()) ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/006_add_processed_to_delayed_calls_v2.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/006_add_processed_to_0000644000175000017500000000175400000000000033765 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add a Boolean column 'processed' to the table delayed_calls_v2 Revision ID: 006 Revises: 005 Create Date: 2015-08-09 09:44:38.289271 """ # revision identifiers, used by Alembic. revision = '006' down_revision = '005' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'delayed_calls_v2', sa.Column('processing', sa.Boolean, default=False, nullable=False) ) ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/007_move_system_flag_to_base_definition.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/007_move_system_flag_0000644000175000017500000000206500000000000034024 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Move system flag to base definition Revision ID: 007 Revises: 006 Create Date: 2015-09-15 11:24:43.081824 """ # revision identifiers, used by Alembic. revision = '007' down_revision = '006' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'workbooks_v2', sa.Column('is_system', sa.Boolean(), nullable=True) ) op.add_column( 'workflow_definitions_v2', sa.Column('is_system', sa.Boolean(), nullable=True) ) ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/008_increase_size_of_state_info_column.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/008_increase_size_of_0000644000175000017500000000167300000000000033775 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Increase size of state_info column from String to Text Revision ID: 008 Revises: 007 Create Date: 2015-11-17 21:30:50.991290 """ # revision identifiers, used by Alembic. revision = '008' down_revision = '007' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('executions_v2', 'state_info', type_=sa.Text()) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/009_add_database_indices.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/009_add_database_indi0000644000175000017500000001156500000000000033710 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add database indices Revision ID: 009 Revises: 008 Create Date: 2015-11-25 19:06:14.975474 """ # revision identifiers, used by Alembic. revision = '009' down_revision = '008' from alembic import op from sqlalchemy.engine import reflection def upgrade(): inspector = reflection.Inspector.from_engine(op.get_bind()) op.create_index( 'action_definitions_v2_action_class', 'action_definitions_v2', ['action_class'], unique=False ) op.create_index( 'action_definitions_v2_is_system', 'action_definitions_v2', ['is_system'], unique=False ) op.create_index( 'action_definitions_v2_project_id', 'action_definitions_v2', ['project_id'], unique=False ) op.create_index( 'action_definitions_v2_scope', 'action_definitions_v2', ['scope'], unique=False ) op.create_index( 'cron_triggers_v2_next_execution_time', 'cron_triggers_v2', ['next_execution_time'], unique=False ) op.create_index( 'cron_triggers_v2_project_id', 'cron_triggers_v2', ['project_id'], unique=False ) op.create_index( 'cron_triggers_v2_scope', 'cron_triggers_v2', ['scope'], unique=False ) op.create_index( 'cron_triggers_v2_workflow_name', 'cron_triggers_v2', ['workflow_name'], unique=False ) cron_v2_constrs = [uc['name'] for uc in inspector.get_unique_constraints('cron_triggers_v2')] if ('cron_triggers_v2_workflow_input_hash_workflow_name_pattern__key' in cron_v2_constrs): op.drop_constraint( 'cron_triggers_v2_workflow_input_hash_workflow_name_pattern__key', 'cron_triggers_v2', type_='unique' ) if ('cron_triggers_v2_workflow_input_hash_workflow_name_pattern_key1' in cron_v2_constrs): op.drop_constraint( 'cron_triggers_v2_workflow_input_hash_workflow_name_pattern_key1', 'cron_triggers_v2', type_='unique' ) op.create_index( 'delayed_calls_v2_processing_execution_time', 'delayed_calls_v2', ['processing', 'execution_time'], unique=False ) op.create_index( 'environments_v2_name', 'environments_v2', ['name'], unique=False ) op.create_index( 'environments_v2_project_id', 'environments_v2', ['project_id'], unique=False ) op.create_index( 'environments_v2_scope', 'environments_v2', ['scope'], unique=False ) op.create_index( 'executions_v2_project_id', 'executions_v2', ['project_id'], unique=False ) op.create_index( 'executions_v2_scope', 'executions_v2', ['scope'], unique=False ) op.create_index( 'executions_v2_state', 'executions_v2', ['state'], unique=False ) op.create_index( 'executions_v2_task_execution_id', 'executions_v2', ['task_execution_id'], unique=False ) op.create_index( 'executions_v2_type', 'executions_v2', ['type'], unique=False ) op.create_index( 'executions_v2_updated_at', 'executions_v2', ['updated_at'], unique=False ) op.create_index( 'executions_v2_workflow_execution_id', 'executions_v2', ['workflow_execution_id'], unique=False ) op.create_index( 'workbooks_v2_project_id', 'workbooks_v2', ['project_id'], unique=False ) op.create_index( 'workbooks_v2_scope', 'workbooks_v2', ['scope'], unique=False ) op.create_index( 'workflow_definitions_v2_is_system', 'workflow_definitions_v2', ['is_system'], unique=False ) op.create_index( 'workflow_definitions_v2_project_id', 'workflow_definitions_v2', ['project_id'], unique=False ) op.create_index( 'workflow_definitions_v2_scope', 'workflow_definitions_v2', ['scope'], unique=False ) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/010_add_resource_members_v2_table.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/010_add_resource_memb0000644000175000017500000000310200000000000033744 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add_resource_members_v2_table Revision ID: 010 Revises: 009 Create Date: 2015-11-15 08:39:58.772417 """ # revision identifiers, used by Alembic. revision = '010' down_revision = '009' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'resource_members_v2', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=False), sa.Column('member_id', sa.String(length=80), nullable=False), sa.Column('resource_id', sa.String(length=80), nullable=False), sa.Column('resource_type', sa.String(length=50), nullable=False), sa.Column('status', sa.String(length=20), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint( 'resource_id', 'resource_type', 'member_id' ) ) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/011_add_workflow_id_for_execution.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/011_add_workflow_id_f0000644000175000017500000000170200000000000033755 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add workflow id for execution Revision ID: 011 Revises: 010 Create Date: 2016-02-02 22:29:34.672735 """ # revision identifiers, used by Alembic. revision = '011' down_revision = '010' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'executions_v2', sa.Column('workflow_id', sa.String(length=80), nullable=True) ) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/012_add_event_triggers_v2_table.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/012_add_event_trigger0000644000175000017500000000431300000000000033770 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add event triggers table Revision ID: 012 Revises: 011 Create Date: 2016-03-04 09:49:52.481791 """ # revision identifiers, used by Alembic. revision = '012' down_revision = '011' from alembic import op import sqlalchemy as sa from mistral.db.sqlalchemy import types as st def upgrade(): op.create_table( 'event_triggers_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=200), nullable=True), sa.Column('workflow_id', sa.String(length=36), nullable=False), sa.Column('exchange', sa.String(length=80), nullable=False), sa.Column('topic', sa.String(length=80), nullable=False), sa.Column('event', sa.String(length=80), nullable=False), sa.Column('workflow_params', st.JsonEncoded(), nullable=True), sa.Column('workflow_input', st.JsonEncoded(), nullable=True), sa.Column('trust_id', sa.String(length=80), nullable=True), sa.ForeignKeyConstraint( ['workflow_id'], [u'workflow_definitions_v2.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint( 'exchange', 'topic', 'event', 'workflow_id', 'project_id' ), sa.Index( 'event_triggers_v2_project_id_workflow_id', 'project_id', 'workflow_id' ) ) ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/013_split_execution_table_increase_names.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/013_split_execution_t0000644000175000017500000001757700000000000034076 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """split_execution_table_increase_names Revision ID: 013 Revises: 012 Create Date: 2016-08-02 11:03:03.263944 """ # revision identifiers, used by Alembic. from mistral.db.sqlalchemy import types as st from alembic import op import sqlalchemy as sa revision = '013' down_revision = '012' def upgrade(): op.create_table( 'action_executions_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('workflow_name', sa.String(length=255), nullable=True), sa.Column('workflow_id', sa.String(length=80), nullable=True), sa.Column('spec', st.JsonMediumDictType(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('state_info', sa.TEXT(), nullable=True), sa.Column('tags', st.JsonListType(), nullable=True), sa.Column('runtime_context', st.JsonLongDictType(), nullable=True), sa.Column('accepted', sa.Boolean(), nullable=True), sa.Column('input', st.JsonLongDictType(), nullable=True), sa.Column('output', st.JsonLongDictType(), nullable=True), sa.Column('task_execution_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id'), sa.Index( 'action_executions_v2_project_id', 'project_id' ), sa.Index( 'action_executions_v2_scope', 'scope' ), sa.Index( 'action_executions_v2_state', 'state' ), sa.Index( 'action_executions_v2_updated_at', 'updated_at' ), ) op.create_table( 'workflow_executions_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('workflow_name', sa.String(length=255), nullable=True), sa.Column('workflow_id', sa.String(length=80), nullable=True), sa.Column('spec', st.JsonMediumDictType(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('state_info', sa.TEXT(), nullable=True), sa.Column('tags', st.JsonListType(), nullable=True), sa.Column('runtime_context', st.JsonLongDictType(), nullable=True), sa.Column('accepted', sa.Boolean(), nullable=True), sa.Column('input', st.JsonLongDictType(), nullable=True), sa.Column('output', st.JsonLongDictType(), nullable=True), sa.Column('params', st.JsonLongDictType(), nullable=True), sa.Column('context', st.JsonLongDictType(), nullable=True), sa.Column('task_execution_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id'), sa.Index( 'workflow_executions_v2_project_id', 'project_id' ), sa.Index( 'workflow_executions_v2_scope', 'scope' ), sa.Index( 'workflow_executions_v2_state', 'state' ), sa.Index( 'workflow_executions_v2_updated_at', 'updated_at' ), ) op.create_table( 'task_executions_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('scope', sa.String(length=80), nullable=True), sa.Column('project_id', sa.String(length=80), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('workflow_name', sa.String(length=255), nullable=True), sa.Column('workflow_id', sa.String(length=80), nullable=True), sa.Column('spec', st.JsonMediumDictType(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('state_info', sa.TEXT(), nullable=True), sa.Column('tags', st.JsonListType(), nullable=True), sa.Column('runtime_context', st.JsonLongDictType(), nullable=True), sa.Column('action_spec', st.JsonLongDictType(), nullable=True), sa.Column('processed', sa.Boolean(), nullable=True), sa.Column('in_context', st.JsonLongDictType(), nullable=True), sa.Column('published', st.JsonLongDictType(), nullable=True), sa.Column( 'workflow_execution_id', sa.String(length=36), nullable=True ), sa.PrimaryKeyConstraint('id'), sa.Index( 'task_executions_v2_project_id', 'project_id' ), sa.Index( 'task_executions_v2_scope', 'scope' ), sa.Index( 'task_executions_v2_state', 'state' ), sa.Index( 'task_executions_v2_updated_at', 'updated_at' ), sa.Index( 'task_executions_v2_workflow_execution_id', 'workflow_execution_id' ), sa.ForeignKeyConstraint( ['workflow_execution_id'], [u'workflow_executions_v2.id'], ondelete='CASCADE' ), ) # 2 foreign keys are added here because all 3 tables are dependent. op.create_foreign_key( None, 'action_executions_v2', 'task_executions_v2', ['task_execution_id'], ['id'], ondelete='CASCADE' ) op.create_foreign_key( None, 'workflow_executions_v2', 'task_executions_v2', ['task_execution_id'], ['id'], ondelete='CASCADE' ) op.alter_column( 'workbooks_v2', 'name', type_=sa.String(length=255) ) op.alter_column( 'workbooks_v2', 'definition', type_=st.MediumText() ) op.alter_column( 'workbooks_v2', 'spec', type_=st.JsonMediumDictType() ) op.alter_column( 'workflow_definitions_v2', 'name', type_=sa.String(length=255) ) op.alter_column( 'workflow_definitions_v2', 'definition', type_=st.MediumText() ) op.alter_column( 'workflow_definitions_v2', 'spec', type_=st.JsonMediumDictType() ) op.alter_column( 'action_definitions_v2', 'name', type_=sa.String(length=255) ) op.alter_column( 'action_definitions_v2', 'definition', type_=st.MediumText() ) op.alter_column( 'action_definitions_v2', 'spec', type_=st.JsonMediumDictType() ) op.alter_column( 'cron_triggers_v2', 'workflow_name', type_=sa.String(length=255) ) ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/014_fix_past_scripts_discrepancies.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/014_fix_past_scripts_0000644000175000017500000000462200000000000034044 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """fix_past_scripts_discrepancies Revision ID: 014 Revises: 013 Create Date: 2016-08-07 13:12:34.958845 """ # revision identifiers, used by Alembic. revision = '014' down_revision = '013' from alembic import op from sqlalchemy.dialects import mysql from sqlalchemy.engine import reflection def upgrade(): inspect = reflection.Inspector.from_engine(op.get_bind()) ct_unique_constraints = [ uc['name'] for uc in inspect.get_unique_constraints('cron_triggers_v2') ] # unique constraint was added in 001, 002 and 003 with slight variations # without deleting the previous ones. # here we try to delete all three in case they exist if 'workflow_input_hash' in ct_unique_constraints: op.drop_index('workflow_input_hash', table_name='cron_triggers_v2') if 'workflow_input_hash_2' in ct_unique_constraints: op.drop_index('workflow_input_hash_2', table_name='cron_triggers_v2') if 'workflow_input_hash_3' in ct_unique_constraints: op.drop_index('workflow_input_hash_3', table_name='cron_triggers_v2') # create the correct latest unique constraint for table cron_triggers_v2 op.create_unique_constraint( None, 'cron_triggers_v2', [ 'workflow_input_hash', 'workflow_name', 'pattern', 'project_id', 'workflow_params_hash', 'remaining_executions', 'first_execution_time' ] ) # column was added in 012. nullable value does not match today's model. op.alter_column( 'event_triggers_v2', 'workflow_id', existing_type=mysql.VARCHAR(length=36), nullable=True ) # column was added in 010. nullable value does not match today's model op.alter_column( 'resource_members_v2', 'project_id', existing_type=mysql.VARCHAR(length=80), nullable=True ) ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/015_add_unique_keys_for_non_locking_model.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/015_add_unique_keys_f0000644000175000017500000000245100000000000033776 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add_unique_keys_for_non_locking_model Revision ID: 015 Revises: 014 Create Date: 2016-08-08 11:05:20.109380 """ # revision identifiers, used by Alembic. revision = '015' down_revision = '014' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'delayed_calls_v2', sa.Column('unique_key', sa.String(length=80), nullable=True) ) op.create_unique_constraint( None, 'delayed_calls_v2', ['unique_key', 'processing'] ) op.add_column( 'task_executions_v2', sa.Column('unique_key', sa.String(length=80), nullable=True) ) op.create_unique_constraint( None, 'task_executions_v2', ['unique_key'] ) ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/016_increase_size_of_task_unique_key.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/016_increase_size_of_0000644000175000017500000000165100000000000033770 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Increase size of task_executions_v2.unique_key Revision ID: 016 Revises: 015 Create Date: 2016-08-11 15:57:23.241734 """ # revision identifiers, used by Alembic. revision = '016' down_revision = '015' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('task_executions_v2', 'unique_key', type_=sa.String(200)) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/017_add_named_lock_table.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/017_add_named_lock_ta0000644000175000017500000000227400000000000033715 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add named lock table Revision ID: 017 Revises: 016 Create Date: 2016-08-17 13:06:26.616451 """ # revision identifiers, used by Alembic. revision = '017' down_revision = '016' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'named_locks', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=250), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/018_increate_task_execution_unique_key_size.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/018_increate_task_exe0000644000175000017500000000164200000000000034001 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """increate_task_execution_unique_key_size Revision ID: 018 Revises: 017 Create Date: 2016-08-17 17:47:30.325182 """ # revision identifiers, used by Alembic. revision = '018' down_revision = '017' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('task_executions_v2', 'unique_key', type_=sa.String(250)) ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/019_change_scheduler_schema.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/019_change_scheduler_0000644000175000017500000000323300000000000033745 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Change scheduler schema. Revision ID: 019 Revises: 018 Create Date: 2016-08-17 17:54:51.952949 """ # revision identifiers, used by Alembic. revision = '019' down_revision = '018' from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection def upgrade(): inspect = reflection.Inspector.from_engine(op.get_bind()) unique_constraints = [ uc['name'] for uc in inspect.get_unique_constraints('delayed_calls_v2') ] if 'delayed_calls_v2_processing_execution_time' in unique_constraints: op.drop_index( 'delayed_calls_v2_processing_execution_time', table_name='delayed_calls_v2' ) if 'unique_key' in unique_constraints: op.drop_index('unique_key', table_name='delayed_calls_v2') op.drop_column('delayed_calls_v2', 'unique_key') op.add_column( 'delayed_calls_v2', sa.Column('key', sa.String(length=250), nullable=True) ) op.create_index( 'delayed_calls_v2_execution_time', 'delayed_calls_v2', ['execution_time'], unique=False ) ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/020_add_type_to_task_execution.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/020_add_type_to_task_0000644000175000017500000000370100000000000033767 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add type to task execution Revision ID: 020 Revises: 019 Create Date: 2016-10-05 13:24:52.911011 """ # revision identifiers, used by Alembic. revision = '020' down_revision = '019' from alembic import op from mistral.db.sqlalchemy import types as st import sqlalchemy as sa # A simple model of the task executions table with only the fields needed for # the migration. task_executions = sa.Table( 'task_executions_v2', sa.MetaData(), sa.Column('id', sa.String(36), nullable=False), sa.Column( 'spec', st.JsonMediumDictType() ), sa.Column('type', sa.String(10), nullable=True) ) def upgrade(): op.add_column( 'task_executions_v2', sa.Column('type', sa.String(length=10), nullable=True) ) session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(task_executions): values.append({'id': row[0], 'spec': row[1]}) with session.begin(subtransactions=True): for value in values: task_type = "ACTION" if "workflow" in value['spec']: task_type = "WORKFLOW" session.execute( task_executions.update().values(type=task_type).where( task_executions.c.id == value['id'] ) ) # this commit appears to be necessary session.commit() ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/021_increase_env_columns_size.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/021_increase_env_colu0000644000175000017500000000206400000000000034000 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Increase environments_v2 column size from JsonDictType to JsonLongDictType Revision ID: 021 Revises: 020 Create Date: 2017-06-13 13:29:41.636094 """ # revision identifiers, used by Alembic. revision = '021' down_revision = '020' from alembic import op from mistral.db.sqlalchemy import types as st def upgrade(): # Changing column types from JsonDictType to JsonLongDictType op.alter_column('environments_v2', 'variables', type_=st.JsonLongDictType()) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/022_namespace_support.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/022_namespace_support0000644000175000017500000000712600000000000034052 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """namespace_support Revision ID: 022 Revises: 021 Create Date: 2017-06-11 13:09:06.782095 """ # revision identifiers, used by Alembic. revision = '022' down_revision = '021' from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from sqlalchemy.sql import table, column # A simple model of the workflow definitions table with only the field needed wf_def = table('workflow_definitions_v2', column('namespace')) # A simple model of the workflow executions table with only the field needed wf_exec = table('workflow_executions_v2', column('workflow_namespace')) # A simple model of the task executions table with only the field needed task_exec = table('task_executions_v2', column('workflow_namespace')) # A simple model of the action executions table with only the fields needed action_executions = sa.Table( 'action_executions_v2', sa.MetaData(), sa.Column('id', sa.String(36), nullable=False), sa.Column('workflow_name', sa.String(255)), sa.Column('workflow_namespace', sa.String(255), nullable=True) ) def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( 'workflow_definitions_v2', sa.Column( 'namespace', sa.String(length=255), nullable=True ) ) inspect = reflection.Inspector.from_engine(op.get_bind()) unique_constraints = [ unique_constraint['name'] for unique_constraint in inspect.get_unique_constraints('workflow_definitions_v2') ] if 'name' in unique_constraints: op.drop_index('name', table_name='workflow_definitions_v2') op.create_unique_constraint( None, 'workflow_definitions_v2', ['name', 'namespace', 'project_id'] ) op.add_column( 'workflow_executions_v2', sa.Column( 'workflow_namespace', sa.String(length=255), nullable=True ) ) op.add_column( 'task_executions_v2', sa.Column( 'workflow_namespace', sa.String(length=255), nullable=True ) ) op.add_column( 'action_executions_v2', sa.Column('workflow_namespace', sa.String(length=255), nullable=True) ) session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(action_executions): values.append({'id': row[0], 'workflow_name': row[1]}) with session.begin(subtransactions=True): session.execute(wf_def.update().values(namespace='')) session.execute(wf_exec.update().values(workflow_namespace='')) session.execute(task_exec.update().values(workflow_namespace='')) for value in values: if value['workflow_name']: session.execute(action_executions.update().values( workflow_namespace='' ).where(action_executions.c.id == value['id'])) # this commit appears to be necessary session.commit() # ### end Alembic commands ### ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/023_add_root_execution_id.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/023_add_root_executio0000644000175000017500000000175500000000000034025 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add the root execution ID to the workflow execution model Revision ID: 023 Revises: 022 Create Date: 2017-07-26 14:51:02.384729 """ # revision identifiers, used by Alembic. revision = '023' down_revision = '022' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'workflow_executions_v2', sa.Column('root_execution_id', sa.String(length=80), nullable=True) ) ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/024_add_composite_index_workflow_execution_id_name.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/024_add_composite_ind0000644000175000017500000000171700000000000033770 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add database indices Revision ID: 024 Revises: 023 Create Date: 2017-10-11 15:23:04.904251 """ # revision identifiers, used by Alembic. revision = '024' down_revision = '023' from alembic import op def upgrade(): op.create_index('task_executions_v2_workflow_execution_id_name', 'task_executions_v2', ['workflow_execution_id', 'name']) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/025_fix_length_task_name.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/025_fix_length_task_n0000644000175000017500000000202700000000000034006 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Fix length task name Revision ID: 025 Revises: 024 Create Date: 2017-12-16 23:25:04.666777 """ # revision identifiers, used by Alembic. revision = '025' down_revision = '024' from alembic import op import sqlalchemy as sa def upgrade(): # https://dev.mysql.com/doc/refman/5.6/en/innodb-restrictions.html op.alter_column('task_executions_v2', 'unique_key', type_=sa.String(255)) op.alter_column('named_locks', 'name', type_=sa.String(255)) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/026_optimize_task_expression_func.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/026_optimize_task_exp0000644000175000017500000000214400000000000034057 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Optimize task expression function Revision ID: 026 Revises: 025 Create Date: 2018-22-03 15:23:04.904251 """ # revision identifiers, used by Alembic. revision = '026' down_revision = '025' from alembic import op def upgrade(): op.create_index('action_executions_v2_task_execution_id', 'action_executions_v2', ['task_execution_id']) op.create_index('workflow_executions_v2_task_execution_id', 'workflow_executions_v2', ['task_execution_id']) ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/027_add_last_heartbeat_to_action_execution.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/027_add_last_heartbea0000644000175000017500000000254100000000000033731 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add last_heartbeat to action execution Revision ID: 027 Revises: 026 Create Date: 2018-09-05 16:49:50.342349 """ # revision identifiers, used by Alembic. revision = '027' down_revision = '026' from alembic import op import datetime from mistral_lib import utils from oslo_config import cfg from sqlalchemy import Column, DateTime, Boolean CONF = cfg.CONF def upgrade(): op.add_column( 'action_executions_v2', Column( 'last_heartbeat', DateTime, default=lambda: utils.utc_now_sec() + datetime.timedelta( seconds=CONF.action_heartbeat.first_heartbeat_timeout ) ) ) op.add_column( 'action_executions_v2', Column('is_sync', Boolean, default=None, nullable=True) ) ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/028_add_namespace_column_to_workbooks.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/028_add_namespace_col0000644000175000017500000000263200000000000033726 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add namespace column to workbooks Revision ID: 028 Revises: 027 Create Date: 2018-07-17 15:39:25.031935 """ # revision identifiers, used by Alembic. revision = '028' down_revision = '027' from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection def upgrade(): op.add_column( 'workbooks_v2', sa.Column('namespace', sa.String(length=255), nullable=True) ) inspect = reflection.Inspector.from_engine(op.get_bind()) unique_constraints = [ unique_constraint['name'] for unique_constraint in inspect.get_unique_constraints('workbooks_v2') ] if 'name' in unique_constraints: op.drop_index('name', table_name='workbooks_v2') op.create_unique_constraint( None, 'workbooks_v2', ['name', 'namespace', 'project_id'] ) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/029_workbook_empty_namespace.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/029_workbook_empty_na0000644000175000017500000000224100000000000034053 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """workbook_empty_namespace Revision ID: 029 Revises: 028 Create Date: 2018-08-08 10:30:00.727769 """ # revision identifiers, used by Alembic. revision = '029' down_revision = '028' from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table, column def upgrade(): wb_def = table('workbooks_v2', column('namespace')) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): session.execute( wb_def.update().values(namespace='').where( wb_def.c.namespace == None)) # noqa session.commit() ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/030_increase_delayed_calls_v2_auth_context.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/030_increase_delayed_0000644000175000017500000000211100000000000033725 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Increase delayed_calls_v2.auth_context column size from JsonDictType to JsonMediumDictType Revision ID: 030 Revises: 029 Create Date: 2018-08-07 08:35:57.609328 """ # revision identifiers, used by Alembic. revision = '030' down_revision = '029' from alembic import op from mistral.db.sqlalchemy import types as st def upgrade(): # Changing column type from JsonDictType to JsonLongDictType op.alter_column('delayed_calls_v2', 'auth_context', type_=st.JsonMediumDictType()) ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/031_add_started_at_and_finished_at_to_task_execution.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/031_add_started_at_an0000644000175000017500000000211000000000000033726 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add started_at and finished_at to task execution Revision ID: 031 Revises: 030 Create Date: 2018-10-03 20:09:45.582597 """ # revision identifiers, used by Alembic. revision = '031' down_revision = '030' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'task_executions_v2', sa.Column('started_at', sa.DateTime(), nullable=True) ) op.add_column( 'task_executions_v2', sa.Column('finished_at', sa.DateTime(), nullable=True) ) ././@PaxHeader0000000000000000000000000000025400000000000011456 xustar0000000000000000150 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/032_add_has_next_tasks_and_error_handled_to_task_execution.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/032_add_has_next_task0000644000175000017500000000212300000000000033756 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add has_next_tasks and error_handled to task execution. Revision ID: 032 Revises: 031 Create Date: 2019-04-16 13:42:12.123412 """ # revision identifiers, used by Alembic. revision = '032' down_revision = '031' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'task_executions_v2', sa.Column('has_next_tasks', sa.Boolean(), nullable=True) ) op.add_column( 'task_executions_v2', sa.Column('error_handled', sa.Boolean(), nullable=True) ) ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/033_add_next_tasks_to_task_execution.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/033_add_next_tasks_to0000644000175000017500000000176600000000000034025 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add next_tasks to task execution. Revision ID: 033 Revises: 032 Create Date: 2019-06-06 13:42:12.123412 """ # revision identifiers, used by Alembic. revision = '033' down_revision = '032' from alembic import op import sqlalchemy as sa from mistral.db.sqlalchemy import types as st def upgrade(): op.add_column( 'task_executions_v2', sa.Column('next_tasks', st.JsonListType(), nullable=True) ) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/034_add_scheduled_jobs_table.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/034_add_scheduled_job0000644000175000017500000000471600000000000033731 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Create scheduled jobs table. Revision ID: 034 Revises: 033 Create Date: 2019-07-01 17:38:41.153354 """ from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from mistral.db.sqlalchemy import types as st # revision identifiers, used by Alembic. revision = '034' down_revision = '033' def upgrade(): # NOTE(rakhmerov): We have to check if the table already # exists and drop it, if needed. This is because the DB # model for scheduled jobs was released w/o a migration # in the first place, so for some users the table was # created automatically at Mistral run based on the model. # But the structure of the table is old so we need to # recreate it anyway in this migration. It's safe to drop # this table because it contains temporary data. inspect = reflection.Inspector.from_engine(op.get_bind()) if 'scheduled_jobs_v2' in inspect.get_table_names(): op.drop_table('scheduled_jobs_v2') op.create_table( 'scheduled_jobs_v2', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('run_after', sa.Integer(), nullable=True), sa.Column( 'target_factory_func_name', sa.String(length=200), nullable=True ), sa.Column('func_name', sa.String(length=80), nullable=True), sa.Column('func_args', st.JsonEncoded(), nullable=True), sa.Column('func_arg_serializers', st.JsonEncoded(), nullable=True), sa.Column('auth_ctx', st.JsonEncoded(), nullable=True), sa.Column('execute_at', sa.DateTime(), nullable=False), sa.Column('captured_at', sa.DateTime(), nullable=True), sa.Column('key', sa.String(length=250), nullable=True), sa.PrimaryKeyConstraint('id'), ) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/035_namespace_support_postgresql.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/035_namespace_support0000644000175000017500000000242000000000000034046 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Namespace support postgresql Revision ID: 035 Revises: 034 Create Date: 2019-08-01 15:48:34.115639 """ # revision identifiers, used by Alembic. revision = '035' down_revision = '034' from alembic import op from sqlalchemy.engine import reflection def upgrade(): inspect = reflection.Inspector.from_engine(op.get_bind()) unique_constraints = [ unique_constraint['name'] for unique_constraint in inspect.get_unique_constraints('workflow_definitions_v2') ] if 'workflow_definitions_v2_name_project_id_key' in unique_constraints: op.drop_constraint('workflow_definitions_v2_name_project_id_key', table_name='workflow_definitions_v2') ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/036_namespace_support_for_workbooks_postgresql.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/036_namespace_support0000644000175000017500000000235100000000000034052 0ustar00coreycorey00000000000000# Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """namespace support for workbooks table postgresql Revision ID: 036 Revises: 035 Create Date: 2020-1-6 9:49:20 """ # revision identifiers, used by Alembic. from alembic import op from sqlalchemy.engine import reflection revision = '036' down_revision = '035' def upgrade(): inspect = reflection.Inspector.from_engine(op.get_bind()) unique_constraints = [ unique_constraint['name'] for unique_constraint in inspect.get_unique_constraints('workbooks_v2') ] if 'workbooks_v2_name_project_id_key' in unique_constraints: op.drop_constraint('workbooks_v2_name_project_id_key', table_name='workbooks_v2') ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/037_add_namespace_column_to_action_definitions.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/037_add_namespace_col0000644000175000017500000000377300000000000033735 0ustar00coreycorey00000000000000# Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add namespace column to action definitions Revision ID: 037 Revises: 036 Create Date: 2020-1-6 10:22:20 """ # revision identifiers, used by Alembic. from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from sqlalchemy.sql import table, column revision = '037' down_revision = '036' def upgrade(): op.add_column( 'action_definitions_v2', sa.Column('namespace', sa.String(length=255), nullable=True) ) inspect = reflection.Inspector.from_engine(op.get_bind()) unique_constraints = [ unique_constraint['name'] for unique_constraint in inspect.get_unique_constraints('action_definitions_v2') ] if 'name' in unique_constraints: op.drop_index('name', table_name='action_definitions_v2') if 'action_definitions_v2_name_project_id_key' in unique_constraints: op.drop_constraint('action_definitions_v2_name_project_id_key', table_name='action_definitions_v2') op.create_unique_constraint( None, 'action_definitions_v2', ['name', 'namespace', 'project_id'] ) action_def = table('action_definitions_v2', column('namespace')) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): session.execute( action_def.update().values(namespace='').where( action_def.c.namespace is None)) # noqa session.commit() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/alembic_migrations/versions/__init__.py0000644000175000017500000000000000000000000032105 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/migration/cli.py0000644000175000017500000001071000000000000023426 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for mistral-db-manage.""" import os from alembic import command as alembic_cmd from alembic import config as alembic_cfg from alembic import util as alembic_u from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils import six import sys from mistral.services import action_manager from mistral.services import workflows # We need to import mistral.api.app to # make sure we register all needed options. importutils.try_import('mistral.api.app') CONF = cfg.CONF LOG = logging.getLogger(__name__) def do_alembic_command(config, cmd, *args, **kwargs): try: getattr(alembic_cmd, cmd)(config, *args, **kwargs) except alembic_u.CommandError as e: alembic_u.err(six.text_type(e)) def do_check_migration(config, _cmd): do_alembic_command(config, 'branches') def do_upgrade(config, cmd): if not CONF.command.revision and not CONF.command.delta: raise SystemExit('You must provide a revision or relative delta') revision = CONF.command.revision if CONF.command.delta: sign = '+' if CONF.command.name == 'upgrade' else '-' revision = sign + str(CONF.command.delta) do_alembic_command(config, cmd, revision, sql=CONF.command.sql) def do_stamp(config, cmd): do_alembic_command( config, cmd, CONF.command.revision, sql=CONF.command.sql ) def do_populate(config, cmd): LOG.info("Populating db") action_manager.sync_db() workflows.sync_db() def do_populate_actions(config, cmd): LOG.info("Populating actions db") action_manager.sync_db() def do_revision(config, cmd): do_alembic_command( config, cmd, message=CONF.command.message, autogenerate=CONF.command.autogenerate, sql=CONF.command.sql, rev_id=CONF.command.rev_id ) def add_command_parsers(subparsers): for name in ['current', 'history', 'branches']: parser = subparsers.add_parser(name) parser.set_defaults(func=do_alembic_command) parser = subparsers.add_parser('upgrade') parser.add_argument('--delta', type=int) parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') parser.set_defaults(func=do_upgrade) parser = subparsers.add_parser('populate') parser.set_defaults(func=do_populate) parser = subparsers.add_parser('populate_actions') parser.set_defaults(func=do_populate_actions) parser = subparsers.add_parser('stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') parser.set_defaults(func=do_stamp) parser = subparsers.add_parser('revision') parser.add_argument('-m', '--message') parser.add_argument('--rev-id', dest='rev_id') parser.add_argument('--autogenerate', action='store_true') parser.add_argument('--sql', action='store_true') parser.set_defaults(func=do_revision) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) CONF.register_cli_opt(command_opt) # To Keep backwards compatibility we need to accept mapping path # from mistral-extra if present try: import mistral_extra.config as extra_conf CONF.register_cli_opt(extra_conf.os_actions_mapping_path) except ImportError: LOG.debug("Mistral-extra not installed") def main(): config = alembic_cfg.Config( os.path.join(os.path.dirname(__file__), 'alembic.ini') ) config.set_main_option( 'script_location', 'mistral.db.sqlalchemy.migration:alembic_migrations' ) # attach the Mistral conf to the Alembic conf config.mistral_config = CONF logging.register_options(CONF) CONF(project='mistral') logging.setup(CONF, 'Mistral') CONF.command.func(config, CONF.command.name) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/model_base.py0000644000175000017500000001272400000000000022767 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_db.sqlalchemy import models as oslo_models import sqlalchemy as sa from sqlalchemy import event from sqlalchemy.ext import declarative from sqlalchemy.orm import attributes from mistral.services import security from mistral_lib import utils def id_column(): return sa.Column( sa.String(36), primary_key=True, default=utils.generate_unicode_uuid ) class _MistralModelBase(oslo_models.ModelBase, oslo_models.TimestampMixin): """Base class for all Mistral SQLAlchemy DB Models.""" created_at = sa.Column(sa.DateTime, default=lambda: utils.utc_now_sec()) updated_at = sa.Column(sa.DateTime, onupdate=lambda: utils.utc_now_sec()) __table__ = None __hash__ = object.__hash__ def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def __eq__(self, other): if type(self) is not type(other): return False for col in self.__table__.columns: # In case of single table inheritance a class attribute # corresponding to a table column may not exist so we need # to skip these attributes. if (hasattr(self, col.name) and hasattr(other, col.name) and getattr(self, col.name) != getattr(other, col.name)): return False return True def __ne__(self, other): return not self.__eq__(other) def to_dict(self): """sqlalchemy based automatic to_dict method.""" d = {col_name: col_val for col_name, col_val in self.iter_columns()} utils.datetime_to_str_in_dict(d, 'created_at') utils.datetime_to_str_in_dict(d, 'updated_at') return d def iter_column_names(self): """Returns an iterator for loaded column names. :return: A generator function for column names. """ # If a column is unloaded at this point, it is # probably deferred. We do not want to access it # here and thereby cause it to load. unloaded = attributes.instance_state(self).unloaded for col in self.__table__.columns: if col.name not in unloaded and hasattr(self, col.name): yield col.name def iter_columns(self): """Returns an iterator for loaded columns. :return: A generator function that generates tuples (column name, column value). """ for col_name in self.iter_column_names(): yield col_name, getattr(self, col_name) def get_clone(self): """Clones current object, loads all fields and returns the result.""" m = self.__class__() for col in self.__table__.columns: if hasattr(self, col.name): setattr(m, col.name, getattr(self, col.name)) setattr( m, 'created_at', utils.datetime_to_str(getattr(self, 'created_at')) ) updated_at = getattr(self, 'updated_at') # NOTE(nmakhotkin): 'updated_at' field is empty for just created # object since it has not updated yet. if updated_at: setattr(m, 'updated_at', utils.datetime_to_str(updated_at)) return m def __repr__(self): return '%s %s' % (type(self).__name__, self.to_dict().__repr__()) @classmethod def _get_nullable_column_names(cls): return [c.name for c in cls.__table__.columns if c.nullable] @classmethod def check_allowed_none_values(cls, column_names): """Checks if the given columns can be assigned with None value. :param column_names: The names of the columns to check. """ all_columns = cls.__table__.columns.keys() nullable_columns = cls._get_nullable_column_names() for col in column_names: if col not in all_columns: raise ValueError("'{}' is not a valid field name.".format(col)) if col not in nullable_columns: raise ValueError( "The field '{}' can't hold None value.".format(col) ) MistralModelBase = declarative.declarative_base(cls=_MistralModelBase) # Secure model related stuff. class MistralSecureModelBase(MistralModelBase): """Base class for all secure models.""" __abstract__ = True scope = sa.Column(sa.String(80), default='private') project_id = sa.Column(sa.String(80), default=security.get_project_id) def _set_project_id(target, value, oldvalue, initiator): return security.get_project_id() def register_secure_model_hooks(): # Make sure 'project_id' is always properly set. for sec_model_class in utils.iter_subclasses(MistralSecureModelBase): if '__abstract__' not in sec_model_class.__dict__: event.listen( sec_model_class.project_id, 'set', _set_project_id, retval=True ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/sqlite_lock.py0000644000175000017500000000316200000000000023202 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import semaphore _mutex = semaphore.Semaphore() _locks = {} def acquire_lock(obj_id, session): with _mutex: if obj_id not in _locks: _locks[obj_id] = (session, semaphore.BoundedSemaphore(1)) tup = _locks.get(obj_id) tup[1].acquire() # Make sure to update the dictionary once the lock is acquired # to adjust session ownership. _locks[obj_id] = (session, tup[1]) def release_locks(session): with _mutex: for obj_id, tup in _locks.items(): if tup[0] is session: tup[1].release() def get_locks(): return _locks def cleanup(): with _mutex: # NOTE: For the sake of simplicity we assume that we remove stale locks # after all tests because this kind of locking can only be used with # sqlite database. Supporting fully dynamically allocated (and removed) # locks is much more complex task. If this method is not called after # tests it will cause a memory leak. _locks.clear() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/sqlalchemy/types.py0000644000175000017500000000572100000000000022040 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This module implements SQLAlchemy-based types for dict and list # expressed by json-strings # import sqlalchemy as sa from sqlalchemy.dialects import mysql from sqlalchemy.ext import mutable from mistral import utils class JsonEncoded(sa.TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = sa.Text def process_bind_param(self, value, dialect): return utils.to_json_str(value) def process_result_value(self, value, dialect): return utils.from_json_str(value) class MutableList(mutable.Mutable, list): @classmethod def coerce(cls, key, value): """Convert plain lists to MutableList.""" if not isinstance(value, MutableList): if isinstance(value, list): return MutableList(value) # this call will raise ValueError return mutable.Mutable.coerce(key, value) return value def __add__(self, value): """Detect list add events and emit change events.""" list.__add__(self, value) self.changed() def append(self, value): """Detect list add events and emit change events.""" list.append(self, value) self.changed() def __setitem__(self, key, value): """Detect list set events and emit change events.""" list.__setitem__(self, key, value) self.changed() def __delitem__(self, i): """Detect list del events and emit change events.""" list.__delitem__(self, i) self.changed() def JsonDictType(): """Returns an SQLAlchemy Column Type suitable to store a Json dict.""" return mutable.MutableDict.as_mutable(JsonEncoded) def JsonListType(): """Returns an SQLAlchemy Column Type suitable to store a Json array.""" return MutableList.as_mutable(JsonEncoded) def MediumText(): # TODO(rakhmerov): Need to do for postgres. return sa.Text().with_variant(mysql.MEDIUMTEXT(), 'mysql') class JsonEncodedMediumText(JsonEncoded): impl = MediumText() def JsonMediumDictType(): return mutable.MutableDict.as_mutable(JsonEncodedMediumText) def LongText(): # TODO(rakhmerov): Need to do for postgres. return sa.Text().with_variant(mysql.LONGTEXT(), 'mysql') class JsonEncodedLongText(JsonEncoded): impl = LongText() def JsonLongDictType(): return mutable.MutableDict.as_mutable(JsonEncodedLongText) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/utils.py0000644000175000017500000001637500000000000017701 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from cachetools import keys as cachetools_keys import decorator import functools import inspect import six from sqlalchemy import exc as sqla_exc from oslo_db import exception as db_exc from oslo_log import log as logging import tenacity from mistral import context from mistral.db.sqlalchemy import base as db_base from mistral import exceptions as exc from mistral.services import security from mistral_lib import utils as ml_utils LOG = logging.getLogger(__name__) _RETRY_ERRORS = ( db_exc.DBDeadlock, db_exc.DBConnectionError, sqla_exc.OperationalError ) def _with_auth_context(auth_ctx, func, *args, **kw): """Runs the given function with the specified auth context. :param auth_ctx: Authentication context. :param func: Function to run with the specified auth context. :param args: Function positional arguments. :param kw: Function keyword arguments. :return: Function result. """ old_auth_ctx = context.ctx() if context.has_ctx() else None context.set_ctx(auth_ctx) try: return func(*args, **kw) except Exception as e: # Note (rakhmerov): In case of "Too many connections" error from the # database it doesn't get wrapped with a SQLAlchemy exception for some # reason so we have to check the exception message explicitly. if isinstance(e, _RETRY_ERRORS) or 'Too many connections' in str(e): LOG.exception( "DB error detected, operation will be retried: %s", func ) raise finally: context.set_ctx(old_auth_ctx) def retry_on_db_error(func, retry=None): """Decorates the given function so that it retries on DB errors. Note that the decorator retries the function/method only on some of the DB errors that are considered to be worth retrying, like deadlocks and disconnections. :param func: Function to decorate. :param retry: a Retrying object :return: Decorated function. """ if not retry: retry = tenacity.Retrying( retry=( tenacity.retry_if_exception_type(_RETRY_ERRORS) | tenacity.retry_if_exception_message( match='Too many connections' ) ), stop=tenacity.stop_after_attempt(50), wait=tenacity.wait_incrementing(start=0, increment=0.1, max=2) ) # The `assigned` arg should be empty as some of the default values are not # supported by simply initialized MagicMocks. The consequence may # be that the representation will contain the wrapper and not the # wrapped function. @functools.wraps(func, assigned=[]) def decorate(*args, **kw): # Retrying library decorator might potentially run a decorated # function within a new thread so it's safer not to apply the # decorator directly to a target method/function because we can # lose an authentication context. # The solution is to create one more function and explicitly set # auth context before calling it (potentially in a new thread). auth_ctx = context.ctx() if context.has_ctx() else None return retry.call(_with_auth_context, auth_ctx, func, *args, **kw) return decorate def check_db_obj_access(db_obj): """Check accessibility to db object.""" ctx = context.ctx() is_admin = ctx.is_admin if not is_admin and db_obj.project_id != security.get_project_id(): raise exc.NotAllowedException( "Can not access %s resource of other projects, ID: %s" % (db_obj.__class__.__name__, db_obj.id) ) if not is_admin and hasattr(db_obj, 'is_system') and db_obj.is_system: raise exc.InvalidActionException( "Can not modify a system %s resource, ID: %s" % (db_obj.__class__.__name__, db_obj.id) ) def tx_cached(use_args=None, ignore_args=None): """Decorates any function to cache its result within a DB transaction. Since a DB transaction is coupled with the current thread, the scope of the underlying cache doesn't go beyond the thread. The decorator is mainly useful for situations when we know we can safely cache a result of some calculation if we know that it's not going to change till the end of the current transaction. :param use_args: A tuple with argument names of the decorated function used to build a cache key. :param ignore_args: A tuple with argument names of the decorated function that should be ignored when building a cache key. :return: Decorated function. """ if use_args and ignore_args: raise ValueError( "Only one of 'use_args' and 'ignore_args' can be used." ) def _build_cache_key(func, *args, **kw): # { arg name => arg value } arg_dict = inspect.getcallargs(func, *args, **kw) if ignore_args: if not isinstance(ignore_args, (six.string_types, tuple)): raise ValueError( "'ignore_args' must be either a tuple or a string," " actual type: %s" % type(ignore_args) ) ignore_args_tup = ( ignore_args if isinstance(ignore_args, tuple) else (ignore_args,) ) for arg_name in ignore_args_tup: arg_dict.pop(arg_name, None) if use_args: if not isinstance(use_args, (six.string_types, tuple)): raise ValueError( "'use_args' must be either a tuple or a string," " actual type: %s" % type(use_args) ) use_args_tup = ( use_args if isinstance(use_args, tuple) else (use_args,) ) for arg_name in arg_dict.keys(): if arg_name not in tuple(use_args_tup): arg_dict.pop(arg_name, None) return cachetools_keys.hashkey(**arg_dict) @decorator.decorator def _decorator(func, *args, **kw): cache = db_base.get_tx_scoped_cache() # A DB transaction may not be necessarily open at the moment. if cache is None: return func(*args, **kw) cache_key = _build_cache_key(func, *args, **kw) result = cache.get(cache_key, default=ml_utils.NotDefined) if result is not ml_utils.NotDefined: return result # We don't do any exception handling here. In case of an exception # nothing will be put into the cache and the exception will just # bubble up as if there wasn't any wrapper. result = func(*args, **kw) cache[cache_key] = result return result return _decorator ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1135674 mistral-10.0.0.0b3/mistral/db/v2/0000755000175000017500000000000000000000000016502 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/v2/__init__.py0000644000175000017500000000000000000000000020601 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/v2/api.py0000644000175000017500000003731400000000000017635 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cachetools import contextlib import threading from oslo_config import cfg from oslo_db import api as db_api _BACKEND_MAPPING = { 'sqlalchemy': 'mistral.db.v2.sqlalchemy.api', } IMPL = db_api.DBAPI('sqlalchemy', backend_mapping=_BACKEND_MAPPING) CONF = cfg.CONF _ACTION_DEF_CACHE = cachetools.TTLCache( maxsize=1000, ttl=CONF.engine.action_definition_cache_time # 60 seconds by default ) _ACTION_DEF_CACHE_LOCK = threading.RLock() def setup_db(): IMPL.setup_db() def drop_db(): IMPL.drop_db() # Transaction control. def start_tx(): IMPL.start_tx() def commit_tx(): IMPL.commit_tx() def rollback_tx(): IMPL.rollback_tx() def end_tx(): IMPL.end_tx() @contextlib.contextmanager def transaction(read_only=False): with IMPL.transaction(read_only): yield def refresh(model): IMPL.refresh(model) def expire_all(): IMPL.expire_all() # Locking. def acquire_lock(model, id): return IMPL.acquire_lock(model, id) # Workbooks. def get_workbook(name, namespace, fields=()): return IMPL.get_workbook(name, namespace=namespace, fields=fields) def load_workbook(name, namespace, fields=()): """Unlike get_workbook this method is allowed to return None.""" return IMPL.load_workbook(name, namespace=namespace, fields=fields) def get_workbooks(limit=None, marker=None, sort_keys=None, sort_dirs=None, fields=None, **kwargs): return IMPL.get_workbooks( limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **kwargs ) def create_workbook(values): return IMPL.create_workbook(values) def update_workbook(name, values): return IMPL.update_workbook(name, values) def create_or_update_workbook(name, values): return IMPL.create_or_update_workbook(name, values) def delete_workbook(name, namespace=None): IMPL.delete_workbook(name, namespace) def delete_workbooks(**kwargs): IMPL.delete_workbooks(**kwargs) # Workflow definitions. def get_workflow_definition(identifier, namespace='', fields=()): return IMPL.get_workflow_definition( identifier, namespace=namespace, fields=fields ) def get_workflow_definition_by_id(id, fields=()): return IMPL.get_workflow_definition_by_id(id, fields=fields) def load_workflow_definition(name, namespace='', fields=()): """Unlike get_workflow_definition this method is allowed to return None.""" return IMPL.load_workflow_definition(name, namespace, fields=fields) def get_workflow_definitions(limit=None, marker=None, sort_keys=None, sort_dirs=None, fields=None, **kwargs): return IMPL.get_workflow_definitions( limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **kwargs ) def create_workflow_definition(values): return IMPL.create_workflow_definition(values) def update_workflow_definition(identifier, values): return IMPL.update_workflow_definition(identifier, values) def create_or_update_workflow_definition(name, values): return IMPL.create_or_update_workflow_definition(name, values) def delete_workflow_definition(identifier, namespace=''): IMPL.delete_workflow_definition(identifier, namespace) def delete_workflow_definitions(**kwargs): IMPL.delete_workflow_definitions(**kwargs) # Action definitions. def get_action_definition_by_id(id, fields=()): return IMPL.get_action_definition_by_id(id, fields=fields) def get_action_definition(name, fields=(), namespace=''): return IMPL.get_action_definition(name, fields=fields, namespace=namespace) def load_action_definition(name, fields=(), namespace=''): """Unlike get_action_definition this method is allowed to return None.""" key = '{}:{}'.format(name, namespace) if namespace else name with _ACTION_DEF_CACHE_LOCK: action_def = _ACTION_DEF_CACHE.get(key) if action_def: return action_def action_def = IMPL.load_action_definition(name, fields=fields, namespace=namespace,) # If action definition was not found in the workflow namespace, # check in the default namespace if not action_def: action_def = IMPL.load_action_definition(name, fields=fields, namespace='') with _ACTION_DEF_CACHE_LOCK: _ACTION_DEF_CACHE[key] = ( action_def.get_clone() if action_def else None ) return action_def def get_action_definitions(limit=None, marker=None, sort_keys=None, sort_dirs=None, **kwargs): return IMPL.get_action_definitions( limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs, **kwargs ) def create_action_definition(values): return IMPL.create_action_definition(values) def update_action_definition(identifier, values): return IMPL.update_action_definition(identifier, values) def create_or_update_action_definition(name, values): return IMPL.create_or_update_action_definition(name, values) def delete_action_definition(name, namespace=''): return IMPL.delete_action_definition(name, namespace=namespace) def delete_action_definitions(**kwargs): return IMPL.delete_action_definitions(**kwargs) # Action executions. def get_action_execution(id, fields=(), insecure=False): return IMPL.get_action_execution(id, fields=fields, insecure=insecure) def load_action_execution(name, fields=()): """Unlike get_action_execution this method is allowed to return None.""" return IMPL.load_action_execution(name, fields=fields) def get_action_executions(**kwargs): return IMPL.get_action_executions(**kwargs) def create_action_execution(values): return IMPL.create_action_execution(values) def update_action_execution(id, values, insecure=False): return IMPL.update_action_execution(id, values, insecure) def create_or_update_action_execution(id, values): return IMPL.create_or_update_action_execution(id, values) def update_action_execution_heartbeat(id): return IMPL.update_action_execution_heartbeat(id) def delete_action_execution(id): return IMPL.delete_action_execution(id) def delete_action_executions(**kwargs): IMPL.delete_action_executions(**kwargs) # Workflow executions. def get_workflow_execution(id, fields=()): return IMPL.get_workflow_execution(id, fields=fields) def load_workflow_execution(name, fields=()): """Unlike get_workflow_execution this method is allowed to return None.""" return IMPL.load_workflow_execution(name, fields=fields) def get_workflow_executions(limit=None, marker=None, sort_keys=None, sort_dirs=None, **kwargs): return IMPL.get_workflow_executions( limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs, **kwargs ) def create_workflow_execution(values): return IMPL.create_workflow_execution(values) def update_workflow_execution(id, values): return IMPL.update_workflow_execution(id, values) def create_or_update_workflow_execution(id, values): return IMPL.create_or_update_workflow_execution(id, values) def delete_workflow_execution(id): return IMPL.delete_workflow_execution(id) def delete_workflow_executions(**kwargs): IMPL.delete_workflow_executions(**kwargs) def update_workflow_execution_state(**kwargs): return IMPL.update_workflow_execution_state(**kwargs) # Tasks executions. def get_task_execution(id, fields=()): return IMPL.get_task_execution(id, fields=fields) def load_task_execution(id, fields=()): """Unlike get_task_execution this method is allowed to return None.""" return IMPL.load_task_execution(id, fields=fields) def get_task_executions(limit=None, marker=None, sort_keys=None, sort_dirs=None, **kwargs): return IMPL.get_task_executions( limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs, **kwargs ) def get_task_executions_count(**kwargs): return IMPL.get_task_executions_count(**kwargs) def get_completed_task_executions(**kwargs): return IMPL.get_completed_task_executions(**kwargs) def get_completed_task_executions_as_batches(**kwargs): return IMPL.get_completed_task_executions_as_batches(**kwargs) def get_incomplete_task_executions(**kwargs): return IMPL.get_incomplete_task_executions(**kwargs) def get_incomplete_task_executions_count(**kwargs): return IMPL.get_incomplete_task_executions_count(**kwargs) def create_task_execution(values): return IMPL.create_task_execution(values) def update_task_execution(id, values): return IMPL.update_task_execution(id, values) def create_or_update_task_execution(id, values): return IMPL.create_or_update_task_execution(id, values) def delete_task_execution(id): return IMPL.delete_task_execution(id) def delete_task_executions(**kwargs): return IMPL.delete_task_executions(**kwargs) def update_task_execution_state(**kwargs): return IMPL.update_task_execution_state(**kwargs) # Delayed calls. def get_delayed_calls_to_start(time, batch_size=None): return IMPL.get_delayed_calls_to_start(time, batch_size) def create_delayed_call(values): return IMPL.create_delayed_call(values) def delete_delayed_call(id): return IMPL.delete_delayed_call(id) def update_delayed_call(id, values, query_filter=None): return IMPL.update_delayed_call(id, values, query_filter) def get_delayed_call(id): return IMPL.get_delayed_call(id) def get_delayed_calls(**kwargs): return IMPL.get_delayed_calls(**kwargs) def get_delayed_calls_count(**kwargs): return IMPL.get_delayed_calls_count(**kwargs) def delete_delayed_calls(**kwargs): return IMPL.delete_delayed_calls(**kwargs) # Scheduled jobs. def get_scheduled_jobs_to_start(time, batch_size=None): return IMPL.get_scheduled_jobs_to_start(time, batch_size) def create_scheduled_job(values): return IMPL.create_scheduled_job(values) def delete_scheduled_job(id): return IMPL.delete_scheduled_job(id) def update_scheduled_job(id, values, query_filter=None): return IMPL.update_scheduled_job(id, values, query_filter) def get_scheduled_job(id): return IMPL.get_scheduled_job(id) def get_scheduled_jobs(**kwargs): return IMPL.get_scheduled_jobs(**kwargs) def delete_scheduled_jobs(**kwargs): return IMPL.delete_scheduled_jobs(**kwargs) def get_scheduled_jobs_count(**kwargs): return IMPL.get_scheduled_jobs_count(**kwargs) # Cron triggers. def get_cron_trigger(identifier): return IMPL.get_cron_trigger(identifier) def get_cron_trigger_by_id(id): return IMPL.get_cron_trigger_by_id(id) def load_cron_trigger(identifier): """Unlike get_cron_trigger this method is allowed to return None.""" return IMPL.load_cron_trigger(identifier) def get_cron_triggers(**kwargs): return IMPL.get_cron_triggers(**kwargs) def get_next_cron_triggers(time): return IMPL.get_next_cron_triggers(time) def get_expired_executions(expiration_time, limit=None, columns=()): return IMPL.get_expired_executions( expiration_time, limit, columns ) def get_running_expired_sync_action_executions(expiration_time, limit, session=None): return IMPL.get_running_expired_sync_action_executions( expiration_time, limit ) def get_superfluous_executions(max_finished_executions, limit=None, columns=()): return IMPL.get_superfluous_executions( max_finished_executions, limit, columns ) def create_cron_trigger(values): return IMPL.create_cron_trigger(values) def update_cron_trigger(identifier, values, query_filter=None): return IMPL.update_cron_trigger(identifier, values, query_filter=query_filter) def create_or_update_cron_trigger(identifier, values): return IMPL.create_or_update_cron_trigger(identifier, values) def delete_cron_trigger(identifier): return IMPL.delete_cron_trigger(identifier) def delete_cron_triggers(**kwargs): return IMPL.delete_cron_triggers(**kwargs) # Environments. def get_environment(name): return IMPL.get_environment(name) def load_environment(name): """Unlike get_environment this method is allowed to return None.""" return IMPL.load_environment(name) def get_environments(limit=None, marker=None, sort_keys=None, sort_dirs=None, **kwargs): return IMPL.get_environments( limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs, **kwargs ) def create_environment(values): return IMPL.create_environment(values) def update_environment(name, values): return IMPL.update_environment(name, values) def create_or_update_environment(name, values): return IMPL.create_or_update_environment(name, values) def delete_environment(name): IMPL.delete_environment(name) def delete_environments(**kwargs): IMPL.delete_environments(**kwargs) # Resource members. def create_resource_member(values): return IMPL.create_resource_member(values) def get_resource_member(resource_id, res_type, member_id): return IMPL.get_resource_member(resource_id, res_type, member_id) def get_resource_members(resource_id, res_type): return IMPL.get_resource_members(resource_id, res_type) def update_resource_member(resource_id, res_type, member_id, values): return IMPL.update_resource_member( resource_id, res_type, member_id, values ) def delete_resource_member(resource_id, res_type, member_id): IMPL.delete_resource_member(resource_id, res_type, member_id) def delete_resource_members(**kwargs): IMPL.delete_resource_members(**kwargs) # Event triggers. def get_event_trigger(id, insecure=False): return IMPL.get_event_trigger(id, insecure) def load_event_trigger(id, insecure=False): return IMPL.load_event_trigger(id, insecure) def get_event_triggers(insecure=False, limit=None, marker=None, sort_keys=None, sort_dirs=None, fields=None, **kwargs): return IMPL.get_event_triggers( insecure=insecure, limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, **kwargs ) def create_event_trigger(values): return IMPL.create_event_trigger(values) def update_event_trigger(id, values): return IMPL.update_event_trigger(id, values) def delete_event_trigger(id): return IMPL.delete_event_trigger(id) def delete_event_triggers(**kwargs): return IMPL.delete_event_triggers(**kwargs) # Locks. def create_named_lock(name): return IMPL.create_named_lock(name) def get_named_locks(limit=None, marker=None): return IMPL.get_named_locks(limit=limit, marker=marker) def delete_named_lock(lock_id): return IMPL.delete_named_lock(lock_id) @contextlib.contextmanager def named_lock(name): with IMPL.named_lock(name): yield ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1135674 mistral-10.0.0.0b3/mistral/db/v2/sqlalchemy/0000755000175000017500000000000000000000000020644 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/v2/sqlalchemy/__init__.py0000644000175000017500000000000000000000000022743 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/v2/sqlalchemy/api.py0000644000175000017500000014750000000000000021776 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import datetime import re import sys import threading from oslo_config import cfg from oslo_db import exception as db_exc from oslo_db import sqlalchemy as oslo_sqlalchemy from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log as logging from oslo_utils import uuidutils # noqa import sqlalchemy as sa from mistral import context from mistral.db.sqlalchemy import base as b from mistral.db.sqlalchemy import model_base as mb from mistral.db.sqlalchemy import sqlite_lock from mistral.db import utils as m_dbutils from mistral.db.v2.sqlalchemy import filters as db_filters from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.services import security from mistral.workflow import states from mistral_lib import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) _SCHEMA_LOCK = threading.RLock() _initialized = False def get_backend(): """Consumed by openstack common code. The backend is this module itself. :return: Name of db backend. """ return sys.modules[__name__] def setup_db(): global _initialized with _SCHEMA_LOCK: if _initialized: return try: models.Workbook.metadata.create_all(b.get_engine()) _initialized = True except sa.exc.OperationalError as e: raise exc.DBError("Failed to setup database: %s" % e) def drop_db(): global _initialized with _SCHEMA_LOCK: if not _initialized: return try: models.Workbook.metadata.drop_all(b.get_engine()) _initialized = False except Exception as e: raise exc.DBError("Failed to drop database: %s" % e) # Transaction management. def start_tx(): b.start_tx() def commit_tx(): b.commit_tx() def rollback_tx(): b.rollback_tx() def end_tx(): b.end_tx() @contextlib.contextmanager def transaction(read_only=False): start_tx() try: yield if read_only: rollback_tx() else: commit_tx() finally: end_tx() @b.session_aware() def refresh(model, session=None): session.refresh(model) @b.session_aware() def expire_all(session=None): session.expire_all() @b.session_aware() def acquire_lock(model, id, session=None): # Expire all so all objects queried after lock is acquired # will be up-to-date from the DB and not from cache. session.expire_all() if b.get_driver_name() == 'sqlite': # In case of 'sqlite' we need to apply a manual lock. sqlite_lock.acquire_lock(id, session) return _lock_entity(model, id) def _lock_entity(model, id): # Get entity by ID in "FOR UPDATE" mode and expect exactly one object. return _secure_query(model).with_for_update().filter(model.id == id).one() @b.session_aware() def update_on_match(id, specimen, values, attempts, session=None): """Updates a model with the given values if it matches the given specimen. :param id: ID of a persistent model. :param specimen: Specimen used to match the :param values: Values to set to the model if fields of the object match the specimen. :param attempts: The function will then invoke the UPDATE statement and check for "success" one or more times, up to a maximum of that passed as attempts. :param session: Session. :return: Persistent object attached to the session. """ assert id is not None assert specimen is not None # We need to flush the session because when we do update_on_match() # it doesn't always update the state of the persistent object properly # when it merges a specimen state into it. Some fields get wiped out from # the history of ORM events that must be flushed later. For example, it # doesn't work well in case of Postgres. # See https://bugs.launchpad.net/mistral/+bug/1736821 session.flush() model = None model_class = type(specimen) # Use WHERE clause to exclude possible conflicts if the state has # already been changed. try: model = b.model_query(model_class).update_on_match( specimen=specimen, surrogate_key='id', values=values, attempts=attempts ) except oslo_sqlalchemy.update_match.NoRowsMatched: LOG.info( "Can't change state of persistent object " "because it has already been changed. [model_class=%s, id=%s, " "specimen=%s, values=%s]", model_class, id, specimen, values ) return model def _secure_query(model, *columns): query = b.model_query(model, columns) if not issubclass(model, mb.MistralSecureModelBase): return query shared_res_ids = [] res_type = RESOURCE_MAPPING.get(model, '') if res_type: shared_res = _get_accepted_resources(res_type) shared_res_ids = [res.resource_id for res in shared_res] query_criterion = sa.or_( model.project_id == security.get_project_id(), model.scope == 'public' ) # NOTE(kong): Include IN_ predicate in query filter only if shared_res_ids # is not empty to avoid sqlalchemy SAWarning and wasting a db call. if shared_res_ids: query_criterion = sa.or_( query_criterion, model.id.in_(shared_res_ids) ) query = query.filter(query_criterion) return query def _paginate_query(model, limit=None, marker=None, sort_keys=None, sort_dirs=None, query=None): if not query: query = _secure_query(model) sort_keys = sort_keys if sort_keys else [] # We should add sorting by id only if we use pagination or when # there is no specified ordering criteria. Otherwise # we can omit it to increase the performance. if not sort_keys or (marker or limit) and 'id' not in sort_keys: sort_keys.append('id') sort_dirs.append('asc') if sort_dirs else None query = db_utils.paginate_query( query, model, limit, sort_keys, marker=marker, sort_dirs=sort_dirs ) return query def _delete_all(model, **kwargs): # NOTE(kong): Because we use 'in_' operator in _secure_query(), delete() # method will raise error with default parameter. Please refer to # http://docs.sqlalchemy.org/en/rel_1_0/orm/query.html#sqlalchemy.orm.query.Query.delete query = _secure_query(model) query = db_filters.apply_filters(query, model, **kwargs) query.delete(synchronize_session=False) def _get_collection(model, insecure=False, limit=None, marker=None, sort_keys=None, sort_dirs=None, fields=None, **filters): columns = ( tuple([getattr(model, f) for f in fields if hasattr(model, f)]) if fields else () ) query = (b.model_query(model, columns=columns) if insecure else _secure_query(model, *columns)) query = db_filters.apply_filters(query, model, **filters) query = _paginate_query( model, limit, marker, sort_keys, sort_dirs, query ) return query.all() def _get_count(model, insecure=False, **filters): query = b.model_query(model) if insecure else _secure_query(model) query = db_filters.apply_filters(query, model, **filters) return query.count() def _get_db_object_by_name(model, name, columns=()): query = _secure_query(model, *columns) return query.filter_by(name=name).first() def _get_db_object_by_id(model, id, insecure=False, columns=()): query = ( b.model_query(model, columns=columns) if insecure else _secure_query(model, *columns) ) return query.filter_by(id=id).first() def _get_db_object_by_name_and_namespace_or_id(model, identifier, namespace=None, insecure=False, columns=()): query = ( b.model_query(model, columns=columns) if insecure else _secure_query(model, *columns) ) match_name = model.name == identifier if namespace is not None: match_name = sa.and_(match_name, model.namespace == namespace) match_id = model.id == identifier query = query.filter( sa.or_( match_id, match_name ) ) return query.first() def _get_db_object_by_name_and_namespace(model, name, namespace='', insecure=False, columns=()): query = ( b.model_query(model, columns=columns) if insecure else _secure_query(model, *columns) ) if namespace is None: namespace = '' query = query.filter( sa.and_( model.name == name, model.namespace == namespace ) ) return query.first() # Workbook definitions. @b.session_aware() def get_workbook(name, namespace=None, fields=(), session=None): wb = _get_db_object_by_name_and_namespace( models.Workbook, name, namespace, columns=fields ) if not wb: raise exc.DBEntityNotFoundError( "Workbook not found [name=%s, namespace=%s]" % (name, namespace) ) return wb @b.session_aware() def load_workbook(name, namespace=None, fields=(), session=None): return _get_db_object_by_name_and_namespace( models.Workbook, name, namespace, columns=fields ) @b.session_aware() def get_workbooks(session=None, **kwargs): return _get_collection(models.Workbook, **kwargs) @b.session_aware() def create_workbook(values, session=None): wb = models.Workbook() wb.update(values.copy()) try: wb.save(session=session) except db_exc.DBDuplicateEntry: raise exc.DBDuplicateEntryError( "Duplicate entry for WorkbookDefinition " "['name', 'namespace', 'project_id']: {}, {}, {}".format( wb.name, wb.namespace, wb.project_id) ) return wb @b.session_aware() def update_workbook(name, values, session=None): namespace = values.get('namespace') wb = get_workbook(name, namespace=namespace) wb.update(values.copy()) return wb @b.session_aware() def create_or_update_workbook(name, values, session=None): if not _get_db_object_by_name(models.Workbook, name): return create_workbook(values) else: return update_workbook(name, values) @b.session_aware() def delete_workbook(name, namespace=None, session=None): namespace = namespace or '' count = _secure_query(models.Workbook).filter( sa.and_( models.Workbook.name == name, models.Workbook.namespace == namespace ) ).delete() if count == 0: raise exc.DBEntityNotFoundError( "Workbook not found [workbook_name=%s, namespace=%s]" % (name, namespace) ) @b.session_aware() def delete_workbooks(session=None, **kwargs): return _delete_all(models.Workbook, **kwargs) # Workflow definitions. @b.session_aware() def get_workflow_definition(identifier, namespace='', fields=(), session=None): """Gets workflow definition by name or uuid. :param identifier: Identifier could be in the format of plain string or uuid. :param namespace: The namespace the workflow is in. Optional. :param fields: Fields that need to be loaded. For example, (WorkflowDefinition.name,) :return: Workflow definition. """ ctx = context.ctx() wf_def = _get_db_object_by_name_and_namespace_or_id( models.WorkflowDefinition, identifier, namespace=namespace, insecure=ctx.is_admin, columns=fields ) if not wf_def: raise exc.DBEntityNotFoundError( "Workflow not found [workflow_identifier=%s, namespace=%s]" % (identifier, namespace) ) return wf_def @b.session_aware() def get_workflow_definition_by_id(id, fields=(), session=None): wf_def = _get_db_object_by_id( models.WorkflowDefinition, id, columns=fields ) if not wf_def: raise exc.DBEntityNotFoundError( "Workflow not found [workflow_id=%s]" % id ) return wf_def @b.session_aware() def load_workflow_definition(name, namespace='', fields=(), session=None): model = models.WorkflowDefinition query = _secure_query(model, *fields) filter_ = sa.and_( model.name == name, model.namespace.in_([namespace, '']) ) # Give priority to objects not in the default namespace. order_by = model.namespace.desc() if order_by is not None: query = query.order_by(order_by) return query.filter(filter_).first() @b.session_aware() def get_workflow_definitions(fields=None, session=None, **kwargs): if fields and 'input' in fields: fields.remove('input') fields.append('spec') return _get_collection( model=models.WorkflowDefinition, fields=fields, **kwargs ) @b.session_aware() def create_workflow_definition(values, session=None): wf_def = models.WorkflowDefinition() wf_def.update(values.copy()) try: wf_def.save(session=session) except db_exc.DBDuplicateEntry: raise exc.DBDuplicateEntryError( "Duplicate entry for WorkflowDefinition ['name', 'namespace'," " 'project_id']: {}, {}, {}".format(wf_def.name, wf_def.namespace, wf_def.project_id)) return wf_def @b.session_aware() def update_workflow_definition(identifier, values, session=None): namespace = values.get('namespace') wf_def = get_workflow_definition(identifier, namespace=namespace) m_dbutils.check_db_obj_access(wf_def) if wf_def.scope == 'public' and values['scope'] == 'private': # Check cron triggers. cron_triggers = get_cron_triggers(insecure=True, workflow_id=wf_def.id) for c_t in cron_triggers: if c_t.project_id != wf_def.project_id: raise exc.NotAllowedException( "Can not update scope of workflow that has cron triggers " "associated in other tenants. [workflow_identifier=%s, " "namespace=%s]" % (identifier, namespace) ) # Check event triggers. event_triggers = get_event_triggers( insecure=True, workflow_id=wf_def.id ) for e_t in event_triggers: if e_t.project_id != wf_def.project_id: raise exc.NotAllowedException( "Can not update scope of workflow that has event triggers " "associated in other tenants. [workflow_identifier=%s, " "namespace=%s]" % (identifier, namespace) ) wf_def.update(values.copy()) return wf_def @b.session_aware() def create_or_update_workflow_definition(name, values, session=None): namespace = values.get('namespace') if _get_db_object_by_name_and_namespace_or_id( models.WorkflowDefinition, name, namespace=namespace): return update_workflow_definition(name, values) return create_workflow_definition(values) @b.session_aware() def delete_workflow_definition(identifier, namespace='', session=None): wf_def = get_workflow_definition(identifier, namespace) m_dbutils.check_db_obj_access(wf_def) cron_triggers = get_cron_triggers(insecure=True, workflow_id=wf_def.id) if cron_triggers: raise exc.DBError( "Can't delete workflow that has cron triggers associated. " "[workflow_identifier=%s, namespace=%s], [cron_trigger_id(s)=%s]" % ( identifier, namespace, ', '.join([t.id for t in cron_triggers]) ) ) event_triggers = get_event_triggers(insecure=True, workflow_id=wf_def.id) if event_triggers: raise exc.DBError( "Can't delete workflow that has event triggers associated. " "[workflow_identifier=%s], [event_trigger_id(s)=%s]" % (identifier, ', '.join([t.id for t in event_triggers])) ) # Delete workflow members first. delete_resource_members(resource_type='workflow', resource_id=wf_def.id) session.delete(wf_def) @b.session_aware() def delete_workflow_definitions(session=None, **kwargs): return _delete_all(models.WorkflowDefinition, **kwargs) # Action definitions. @b.session_aware() def get_action_definition_by_id(id, fields=(), session=None): action_def = _get_db_object_by_id( models.ActionDefinition, id, columns=fields ) if not action_def: raise exc.DBEntityNotFoundError( "Action not found [action_id=%s]" % id ) return action_def @b.session_aware() def get_action_definition(identifier, fields=(), session=None, namespace=''): a_def = _get_db_object_by_name_and_namespace_or_id( models.ActionDefinition, identifier, namespace=namespace, columns=fields ) # If the action was not found in the given namespace, # look in the default namespace if not a_def: a_def = _get_db_object_by_name_and_namespace_or_id( models.ActionDefinition, identifier, namespace='', columns=fields ) if not a_def: raise exc.DBEntityNotFoundError( "Action definition not found [action_name=%s,namespace=%s]" % (identifier, namespace) ) return a_def @b.session_aware() def load_action_definition(name, fields=(), session=None, namespace=''): return _get_db_object_by_name_and_namespace( models.ActionDefinition, name, namespace=namespace, columns=fields ) @b.session_aware() def get_action_definitions(session=None, **kwargs): return _get_collection(model=models.ActionDefinition, **kwargs) @b.session_aware() def create_action_definition(values, session=None): a_def = models.ActionDefinition() a_def.update(values) try: a_def.save(session=session) except db_exc.DBDuplicateEntry: raise exc.DBDuplicateEntryError( "Duplicate entry for Action ['name', 'namespace', 'project_id']:" " {}, {}, {}".format(a_def.name, a_def.namespace, a_def.project_id) ) return a_def @b.session_aware() def update_action_definition(identifier, values, session=None): namespace = values.get('namespace', '') a_def = get_action_definition(identifier, namespace=namespace) a_def.update(values.copy()) return a_def @b.session_aware() def create_or_update_action_definition(name, values, session=None): namespace = values.get('namespace', '') if not _get_db_object_by_name_and_namespace( models.ActionDefinition, name, namespace=namespace): return create_action_definition(values) else: return update_action_definition(name, values) @b.session_aware() def delete_action_definition(identifier, namespace='', session=None): a_def = get_action_definition(identifier, namespace=namespace) session.delete(a_def) @b.session_aware() def delete_action_definitions(session=None, **kwargs): return _delete_all(models.ActionDefinition, **kwargs) # Action executions. @b.session_aware() def get_action_execution(id, insecure=False, fields=(), session=None): a_ex = _get_db_object_by_id(models.ActionExecution, id, insecure=insecure, columns=fields) if not a_ex: raise exc.DBEntityNotFoundError( "ActionExecution not found [id=%s]" % id ) return a_ex @b.session_aware() def load_action_execution(id, fields=(), session=None): return _get_db_object_by_id(models.ActionExecution, id, columns=fields) @b.session_aware() def get_action_executions(session=None, **kwargs): return _get_action_executions(**kwargs) @b.session_aware() def create_action_execution(values, session=None): a_ex = models.ActionExecution() a_ex.update(values.copy()) try: a_ex.save(session=session) except db_exc.DBDuplicateEntry as e: raise exc.DBDuplicateEntryError( "Duplicate entry for ActionExecution ID: {}".format(e.value) ) return a_ex @b.session_aware() def update_action_execution(id, values, insecure=False, session=None): a_ex = get_action_execution(id, insecure) a_ex.update(values.copy()) return a_ex @b.session_aware() def create_or_update_action_execution(id, values, session=None): if not _get_db_object_by_id(models.ActionExecution, id): return create_action_execution(values) else: return update_action_execution(id, values) @b.session_aware() def update_action_execution_heartbeat(id, session=None): if not id: raise exc.DBEntityNotFoundError now = utils.utc_now_sec() session.query(models.ActionExecution). \ filter(models.ActionExecution.id == id). \ update({'last_heartbeat': now}) @b.session_aware() def delete_action_execution(id, session=None): count = _secure_query(models.ActionExecution).filter( models.ActionExecution.id == id).delete() if count == 0: raise exc.DBEntityNotFoundError( "ActionExecution not found [id=%s]" % id ) @b.session_aware() def delete_action_executions(session=None, **kwargs): return _delete_all(models.ActionExecution, **kwargs) def _get_action_executions(**kwargs): return _get_collection(models.ActionExecution, **kwargs) # Workflow executions. @b.session_aware() def get_workflow_execution(id, fields=(), session=None): ctx = context.ctx() wf_ex = _get_db_object_by_id( models.WorkflowExecution, id, insecure=ctx.is_admin, columns=fields ) if not wf_ex: raise exc.DBEntityNotFoundError( "WorkflowExecution not found [id=%s]" % id ) return wf_ex @b.session_aware() def load_workflow_execution(id, fields=(), session=None): return _get_db_object_by_id(models.WorkflowExecution, id, columns=fields) @b.session_aware() def get_workflow_executions(session=None, **kwargs): return _get_collection(models.WorkflowExecution, **kwargs) @b.session_aware() def create_workflow_execution(values, session=None): wf_ex = models.WorkflowExecution() wf_ex.update(values.copy()) try: wf_ex.save(session=session) except db_exc.DBDuplicateEntry as e: raise exc.DBDuplicateEntryError( "Duplicate entry for WorkflowExecution with ID: {value} ".format( value=e.value ) ) return wf_ex @b.session_aware() def update_workflow_execution(id, values, session=None): wf_ex = get_workflow_execution(id) m_dbutils.check_db_obj_access(wf_ex) wf_ex.update(values.copy()) return wf_ex @b.session_aware() def create_or_update_workflow_execution(id, values, session=None): if not _get_db_object_by_id(models.WorkflowExecution, id): return create_workflow_execution(values) else: return update_workflow_execution(id, values) @b.session_aware() def delete_workflow_execution(id, session=None): model = models.WorkflowExecution insecure = context.ctx().is_admin query = b.model_query(model) if insecure else _secure_query(model) try: count = query.filter( models.WorkflowExecution.id == id ).delete() if count == 0: raise exc.DBEntityNotFoundError( "WorkflowExecution not found [id=%s]" % id ) except db_exc.DBError as e: if is_mysql_max_depth_error(e) or is_mariadb_max_depth_error(e): # https://bugs.launchpad.net/mistral/+bug/1832300 # mysql cascade delete error delete_workflow_execution_recurse(id) else: raise def is_mysql_max_depth_error(e): pattern = ".*3008.*Foreign key cascade delete" \ "/update exceeds max depth of 15.*" return re.match(pattern, str(e)) def is_mariadb_max_depth_error(e): pattern = ".*Got error 193.*ON DELETE CASCADE.*" return re.match(pattern, str(e)) def delete_workflow_execution_recurse(wf_ex_id): sub_wf_ex_ids = _get_all_direct_subworkflows(wf_ex_id) for sub_wf_ex_id in sub_wf_ex_ids: delete_workflow_execution(sub_wf_ex_id) delete_workflow_execution(wf_ex_id) def _get_all_direct_subworkflows(wf_ex_id): model = models.WorkflowExecution insecure = context.ctx().is_admin if insecure: query = b.model_query(model, [model.id]) else: query = _secure_query(model, model.id) query = query.join( models.TaskExecution, models.WorkflowExecution.task_execution_id == models.TaskExecution.id ).filter( models.TaskExecution.workflow_execution_id == wf_ex_id ) return [i[0] for i in query.all()] @b.session_aware() def delete_workflow_executions(session=None, **kwargs): return _delete_all(models.WorkflowExecution, **kwargs) def update_workflow_execution_state(id, cur_state, state): specimen = models.WorkflowExecution(id=id, state=cur_state) return update_on_match(id, specimen, values={'state': state}, attempts=1) # Tasks executions. @b.session_aware() def get_task_execution(id, fields=(), session=None): task_ex = _get_db_object_by_id(models.TaskExecution, id, columns=fields) if not task_ex: raise exc.DBEntityNotFoundError( "Task execution not found [id=%s]" % id ) return task_ex @b.session_aware() def load_task_execution(id, fields=(), session=None): return _get_db_object_by_id(models.TaskExecution, id, columns=fields) @b.session_aware() def get_task_executions(session=None, **kwargs): return _get_collection(models.TaskExecution, **kwargs) @b.session_aware() def get_task_executions_count(session=None, **kwargs): query = b.model_query(models.TaskExecution) query = query.filter_by(**kwargs) return query.count() def _get_completed_task_executions_query(kwargs): query = b.model_query(models.TaskExecution) query = query.filter_by(**kwargs) query = query.filter( models.TaskExecution.state.in_( [states.ERROR, states.CANCELLED, states.SUCCESS] ) ) return query @b.session_aware() def get_completed_task_executions(session=None, **kwargs): query = _get_completed_task_executions_query(kwargs) return query.all() @b.session_aware() def get_completed_task_executions_as_batches(session=None, **kwargs): # NOTE: Using batch querying seriously allows to optimize memory # consumption on operations when we need to iterate through # a list of task executions and do some processing like merging # their inbound contexts. If we don't use batches Mistral has to # hold all the collection (that can be large) in memory. # Using a generator that returns batches lets GC to collect a # batch of task executions that has already been processed. query = _get_completed_task_executions_query(kwargs) # Batch size 20 may be arguable but still seems reasonable: it's big # enough to keep the total number of DB hops small (say for 100 tasks # we'll need only 5) and small enough not to drastically increase # memory footprint if the number of tasks is big like several hundreds. batch_size = 20 idx = 0 while idx < query.count(): yield query.slice(idx, idx + batch_size).all() idx += batch_size def _get_incomplete_task_executions_query(kwargs): query = b.model_query(models.TaskExecution) query = query.filter_by(**kwargs) query = query.filter( models.TaskExecution.state.in_( [states.IDLE, states.RUNNING, states.WAITING, states.RUNNING_DELAYED, states.PAUSED] ) ) return query @b.session_aware() def get_incomplete_task_executions(session=None, **kwargs): query = _get_incomplete_task_executions_query(kwargs) return query.all() @b.session_aware() def get_incomplete_task_executions_count(session=None, **kwargs): query = _get_incomplete_task_executions_query(kwargs) return query.count() @b.session_aware() def create_task_execution(values, session=None): task_ex = models.TaskExecution() task_ex.update(values) try: task_ex.save(session=session) except db_exc.DBDuplicateEntry as e: raise exc.DBDuplicateEntryError( "Duplicate entry for TaskExecution ID: {}".format(e.value) ) return task_ex @b.session_aware() def update_task_execution(id, values, session=None): task_ex = get_task_execution(id) task_ex.update(values.copy()) return task_ex @b.session_aware() def create_or_update_task_execution(id, values, session=None): if not _get_db_object_by_id(models.TaskExecution, id): return create_task_execution(values) else: return update_task_execution(id, values) @b.session_aware() def delete_task_execution(id, session=None): count = _secure_query(models.TaskExecution).filter( models.TaskExecution.id == id).delete() if count == 0: raise exc.DBEntityNotFoundError( "Task execution not found [id=%s]" % id ) @b.session_aware() def delete_task_executions(session=None, **kwargs): return _delete_all(models.TaskExecution, **kwargs) def update_task_execution_state(id, cur_state, state): specimen = models.TaskExecution(id=id, state=cur_state) return update_on_match(id, specimen, values={'state': state}, attempts=1) # Delayed calls. @b.session_aware() def create_delayed_call(values, session=None): delayed_call = models.DelayedCall() delayed_call.update(values.copy()) try: delayed_call.save(session) except db_exc.DBDuplicateEntry as e: raise exc.DBDuplicateEntryError( "Duplicate entry for DelayedCall ID: {}".format(e.value) ) return delayed_call @b.session_aware() def delete_delayed_call(id, session=None): # It's safe to use insecure query here because users can't access # delayed calls. count = b.model_query(models.DelayedCall).filter( models.DelayedCall.id == id).delete() if count == 0: raise exc.DBEntityNotFoundError( "Delayed Call not found [id=%s]" % id ) @b.session_aware() def get_delayed_calls_to_start(time, batch_size=None, session=None): query = b.model_query(models.DelayedCall) query = query.filter(models.DelayedCall.execution_time < time) query = query.filter_by(processing=False) query = query.order_by(models.DelayedCall.execution_time) query = query.limit(batch_size) return query.all() @b.session_aware() def update_delayed_call(id, values, query_filter=None, session=None): if query_filter: try: specimen = models.DelayedCall(id=id, **query_filter) delayed_call = b.model_query( models.DelayedCall).update_on_match(specimen=specimen, surrogate_key='id', values=values) return delayed_call, 1 except oslo_sqlalchemy.update_match.NoRowsMatched as e: LOG.debug( "No rows matched for update call [id=%s, values=%s, " "query_filter=%s," "exception=%s]", id, values, query_filter, e ) return None, 0 else: delayed_call = get_delayed_call(id=id, session=session) delayed_call.update(values) return delayed_call, len(session.dirty) @b.session_aware() def get_delayed_call(id, session=None): delayed_call = _get_db_object_by_id(models.DelayedCall, id) if not delayed_call: raise exc.DBEntityNotFoundError( "Delayed Call not found [id=%s]" % id ) return delayed_call @b.session_aware() def get_delayed_calls(session=None, **kwargs): return _get_collection(model=models.DelayedCall, **kwargs) @b.session_aware() def get_delayed_calls_count(session=None, **kwargs): return _get_count(model=models.DelayedCall, **kwargs) @b.session_aware() def delete_delayed_calls(session=None, **kwargs): return _delete_all(models.DelayedCall, **kwargs) # Scheduled jobs. @b.session_aware() def create_scheduled_job(values, session=None): job = models.ScheduledJob() job.update(values.copy()) try: job.save(session) except db_exc.DBDuplicateEntry as e: raise exc.DBDuplicateEntryError( "Duplicate entry for ScheduledJob ID: {}".format(e.value) ) return job @b.session_aware() def get_scheduled_jobs_to_start(time, batch_size=None, session=None): query = b.model_query(models.ScheduledJob) execute_at_col = models.ScheduledJob.execute_at captured_at_col = models.ScheduledJob.captured_at # Filter by execution time accounting for a configured job pickup interval. # TODO(rakhmerov): Configuration options should not be accessed here. query = query.filter( execute_at_col < time - datetime.timedelta(seconds=CONF.scheduler.pickup_job_after) ) # Filter by captured time accounting for a configured captured job timeout. min_captured_at = ( utils.utc_now_sec() - datetime.timedelta(seconds=CONF.scheduler.captured_job_timeout) ) query = query.filter( sa.or_( captured_at_col == sa.null(), captured_at_col <= min_captured_at ) ) query = query.order_by(execute_at_col) query = query.limit(batch_size) return query.all() @b.session_aware() def update_scheduled_job(id, values, query_filter=None, session=None): if query_filter: try: specimen = models.ScheduledJob(id=id, **query_filter) job = b.model_query( models.ScheduledJob ).update_on_match( specimen=specimen, surrogate_key='id', values=values ) return job, 1 except oslo_sqlalchemy.update_match.NoRowsMatched as e: LOG.debug( "No rows matched for update scheduled job [id=%s, values=%s, " "query_filter=%s," "exception=%s]", id, values, query_filter, e ) return None, 0 else: job = get_scheduled_job(id=id, session=session) job.update(values) return job, len(session.dirty) @b.session_aware() def get_scheduled_job(id, session=None): job = _get_db_object_by_id(models.ScheduledJob, id) if not job: raise exc.DBEntityNotFoundError( "Scheduled job not found [id=%s]" % id ) return job @b.session_aware() def delete_scheduled_job(id, session=None): # It's safe to use insecure query here because users can't access # scheduled job. count = b.model_query(models.ScheduledJob).filter( models.ScheduledJob.id == id).delete() if count == 0: raise exc.DBEntityNotFoundError( "Scheduled job not found [id=%s]" % id ) def get_scheduled_jobs(**kwargs): return _get_collection(model=models.ScheduledJob, **kwargs) @b.session_aware() def delete_scheduled_jobs(session=None, **kwargs): return _delete_all(models.ScheduledJob, **kwargs) def get_scheduled_jobs_count(**kwargs): return _get_count(model=models.ScheduledJob, **kwargs) # Other functions. @b.session_aware() def get_expired_executions(expiration_time, limit=None, columns=(), session=None): query = _get_completed_root_executions_query(columns) query = query.filter(models.WorkflowExecution.updated_at < expiration_time) if limit: query = query.limit(limit) return query.all() @b.session_aware() def get_running_expired_sync_action_executions(expiration_time, limit, session=None): query = b.model_query(models.ActionExecution) query = query.filter( models.ActionExecution.last_heartbeat < expiration_time ) query = query.filter_by(is_sync=True) query = query.filter(models.ActionExecution.state == states.RUNNING) if limit: query.limit(limit) return query.all() @b.session_aware() def get_superfluous_executions(max_finished_executions, limit=None, columns=(), session=None): if not max_finished_executions: return [] query = _get_completed_root_executions_query(columns) query = query.order_by(models.WorkflowExecution.updated_at.desc()) query = query.offset(max_finished_executions) if limit: query = query.limit(limit) return query.all() def _get_completed_root_executions_query(columns): query = b.model_query(models.WorkflowExecution, columns=columns) # This is an empty list by default. ignored_states = CONF.execution_expiration_policy.ignored_states desired_states = states.TERMINAL_STATES - set(ignored_states) # Only workflow executions that are not a child of # other workflow executions. query = query.filter( models.WorkflowExecution.task_execution_id == sa.null() ) query = query.filter( models.WorkflowExecution.state.in_(desired_states) ) return query @b.session_aware() def get_cron_trigger(identifier, session=None): ctx = context.ctx() cron_trigger = _get_db_object_by_name_and_namespace_or_id( models.CronTrigger, identifier, insecure=ctx.is_admin ) if not cron_trigger: raise exc.DBEntityNotFoundError( "Cron trigger not found [identifier=%s]" % identifier ) return cron_trigger @b.session_aware() def get_cron_trigger_by_id(id, session=None): ctx = context.ctx() cron_trigger = _get_db_object_by_id(models.CronTrigger, id, insecure=ctx.is_admin) if not cron_trigger: raise exc.DBEntityNotFoundError( "Cron trigger not found [id=%s]" % id ) return cron_trigger @b.session_aware() def load_cron_trigger(identifier, session=None): return _get_db_object_by_name_and_namespace_or_id( models.CronTrigger, identifier ) @b.session_aware() def get_cron_triggers(session=None, **kwargs): return _get_collection(models.CronTrigger, **kwargs) @b.session_aware() def get_next_cron_triggers(time, session=None): query = b.model_query(models.CronTrigger) query = query.filter(models.CronTrigger.next_execution_time < time) query = query.order_by(models.CronTrigger.next_execution_time) return query.all() @b.session_aware() def create_cron_trigger(values, session=None): cron_trigger = models.CronTrigger() cron_trigger.update(values) try: cron_trigger.save(session=session) except db_exc.DBDuplicateEntry: raise exc.DBDuplicateEntryError( "Duplicate entry for cron trigger ['name', 'project_id']: " "{}, {}".format(cron_trigger.name, cron_trigger.project_id) ) # TODO(nmakhotkin): Remove this 'except' after fixing # https://bugs.launchpad.net/oslo.db/+bug/1458583. except db_exc.DBError as e: raise exc.DBDuplicateEntryError( "Duplicate entry for cron trigger: %s" % e ) return cron_trigger @b.session_aware() def update_cron_trigger(identifier, values, session=None, query_filter=None): cron_trigger = get_cron_trigger(identifier) if query_filter: try: # Execute the UPDATE statement with the query_filter as the WHERE. specimen = models.CronTrigger(id=cron_trigger.id, **query_filter) query = b.model_query(models.CronTrigger) cron_trigger = query.update_on_match( specimen=specimen, surrogate_key='id', values=values ) return cron_trigger, 1 except oslo_sqlalchemy.update_match.NoRowsMatched: LOG.debug( "No rows matched for cron update call" "[id=%s, values=%s, query_filter=%s", id, values, query_filter ) return cron_trigger, 0 else: cron_trigger.update(values.copy()) return cron_trigger, len(session.dirty) @b.session_aware() def create_or_update_cron_trigger(identifier, values, session=None): cron_trigger = _get_db_object_by_name_and_namespace_or_id( models.CronTrigger, identifier ) if not cron_trigger: return create_cron_trigger(values) else: updated, _ = update_cron_trigger(identifier, values) return updated @b.session_aware() def delete_cron_trigger(identifier, session=None): cron_trigger = get_cron_trigger(identifier) m_dbutils.check_db_obj_access(cron_trigger) # Delete the cron trigger by ID and get the affected row count. table = models.CronTrigger.__table__ result = session.execute( table.delete().where(table.c.id == cron_trigger.id) ) return result.rowcount @b.session_aware() def delete_cron_triggers(session=None, **kwargs): return _delete_all(models.CronTrigger, **kwargs) # Environments. @b.session_aware() def get_environment(name, session=None): env = _get_db_object_by_name(models.Environment, name) if not env: raise exc.DBEntityNotFoundError( "Environment not found [name=%s]" % name ) return env @b.session_aware() def load_environment(name, session=None): return _get_db_object_by_name(models.Environment, name) @b.session_aware() def get_environments(session=None, **kwargs): return _get_collection(models.Environment, **kwargs) @b.session_aware() def create_environment(values, session=None): env = models.Environment() env.update(values) try: env.save(session=session) except db_exc.DBDuplicateEntry: raise exc.DBDuplicateEntryError( "Duplicate entry for Environment ['name', 'project_id']:" " {}, {}".format(env.name, env.project_id) ) return env @b.session_aware() def update_environment(name, values, session=None): env = get_environment(name) env.update(values) return env @b.session_aware() def create_or_update_environment(name, values, session=None): env = _get_db_object_by_name(models.Environment, name) if not env: return create_environment(values) else: return update_environment(name, values) @b.session_aware() def delete_environment(name, session=None): count = _secure_query(models.Environment).filter( models.Environment.name == name).delete() if count == 0: raise exc.DBEntityNotFoundError( "Environment not found [name=%s]" % name ) @b.session_aware() def delete_environments(session=None, **kwargs): return _delete_all(models.Environment, **kwargs) # Resource members. RESOURCE_MAPPING = { models.WorkflowDefinition: 'workflow', models.Workbook: 'workbook' } def _get_criterion(resource_id, member_id=None, is_owner=True): """Generates criterion for querying resource_member_v2 table.""" # Resource owner query resource membership with member_id. if is_owner and member_id: return sa.and_( models.ResourceMember.project_id == security.get_project_id(), models.ResourceMember.resource_id == resource_id, models.ResourceMember.member_id == member_id ) # Resource owner query resource memberships. elif is_owner and not member_id: return sa.and_( models.ResourceMember.project_id == security.get_project_id(), models.ResourceMember.resource_id == resource_id, ) # Other members query other resource membership. elif not is_owner and member_id and member_id != security.get_project_id(): return None # Resource member query resource memberships. return sa.and_( models.ResourceMember.member_id == security.get_project_id(), models.ResourceMember.resource_id == resource_id ) @b.session_aware() def create_resource_member(values, session=None): res_member = models.ResourceMember() res_member.update(values.copy()) try: res_member.save(session=session) except db_exc.DBDuplicateEntry: raise exc.DBDuplicateEntryError( "Duplicate entry for ResourceMember ['resource_id'," " 'resource_type', 'member_id']: {}, {}, " "{}".format(res_member.resource_id, res_member.resource_type, res_member.member_id) ) return res_member @b.session_aware() def get_resource_member(resource_id, res_type, member_id, session=None): query = _secure_query(models.ResourceMember).filter_by( resource_type=res_type ) # Both resource owner and resource member can do query. res_member = query.filter( sa.or_( _get_criterion(resource_id, member_id), _get_criterion(resource_id, member_id, is_owner=False) ) ).first() if not res_member: raise exc.DBEntityNotFoundError( "Resource member not found [resource_id=%s, member_id=%s]" % (resource_id, member_id) ) return res_member @b.session_aware() def get_resource_members(resource_id, res_type, session=None): query = _secure_query(models.ResourceMember).filter_by( resource_type=res_type ) # Both resource owner and resource member can do query. res_members = query.filter( sa.or_( _get_criterion(resource_id), _get_criterion(resource_id, is_owner=False), ) ).all() return res_members @b.session_aware() def update_resource_member(resource_id, res_type, member_id, values, session=None): # Only member who is not the owner of the resource can update the # membership status. if member_id != security.get_project_id(): raise exc.DBEntityNotFoundError( "Resource member not found [resource_id=%s, member_id=%s]" % (resource_id, member_id) ) query = _secure_query(models.ResourceMember).filter_by( resource_type=res_type ) res_member = query.filter( _get_criterion(resource_id, member_id, is_owner=False) ).first() if not res_member: raise exc.DBEntityNotFoundError( "Resource member not found [resource_id=%s, member_id=%s]" % (resource_id, member_id) ) res_member.update(values.copy()) return res_member @b.session_aware() def delete_resource_member(resource_id, res_type, member_id, session=None): query = _secure_query(models.ResourceMember). \ filter_by(resource_type=res_type). \ filter(_get_criterion(resource_id, member_id)) # TODO(kong): Check association with cron triggers when deleting a workflow # member which is in 'accepted' status. count = query.delete() if count == 0: raise exc.DBEntityNotFoundError( "Resource member not found [resource_id=%s, member_id=%s]" % (resource_id, member_id) ) @b.session_aware() def delete_resource_members(session=None, **kwargs): return _delete_all(models.ResourceMember, **kwargs) def _get_accepted_resources(res_type): resources = _secure_query(models.ResourceMember).filter( sa.and_( models.ResourceMember.resource_type == res_type, models.ResourceMember.status == 'accepted', models.ResourceMember.member_id == security.get_project_id() ) ).all() return resources # Event triggers. @b.session_aware() def get_event_trigger(id, insecure=False, session=None): event_trigger = _get_db_object_by_id(models.EventTrigger, id, insecure) if not event_trigger: raise exc.DBEntityNotFoundError( "Event trigger not found [id=%s]." % id ) return event_trigger @b.session_aware() def load_event_trigger(id, insecure=False, session=None): return _get_db_object_by_id(models.EventTrigger, id, insecure) @b.session_aware() def get_event_triggers(session=None, **kwargs): return _get_collection(model=models.EventTrigger, **kwargs) @b.session_aware() def create_event_trigger(values, session=None): event_trigger = models.EventTrigger() event_trigger.update(values) try: event_trigger.save(session=session) except db_exc.DBDuplicateEntry: raise exc.DBDuplicateEntryError( "Duplicate entry for EventTrigger ['exchange', 'topic'," " 'event', 'workflow_id', 'project_id']:" " {}, {}, {}, {}, {}".format(event_trigger.exchange, event_trigger.topic, event_trigger.event, event_trigger.workflow_id, event_trigger.project_id)) # TODO(nmakhotkin): Remove this 'except' after fixing # https://bugs.launchpad.net/oslo.db/+bug/1458583. except db_exc.DBError as e: raise exc.DBDuplicateEntryError( "Duplicate entry for event trigger: %s" % e ) return event_trigger @b.session_aware() def update_event_trigger(id, values, session=None): event_trigger = get_event_trigger(id) event_trigger.update(values.copy()) return event_trigger @b.session_aware() def delete_event_trigger(id, session=None): # It's safe to use insecure query here because users can't access # delayed calls. count = b.model_query(models.EventTrigger).filter( models.EventTrigger.id == id).delete() if count == 0: raise exc.DBEntityNotFoundError( "Event trigger not found [id=%s]." % id ) @b.session_aware() def delete_event_triggers(session=None, **kwargs): return _delete_all(models.EventTrigger, **kwargs) # Locks. @b.session_aware() def create_named_lock(name, session=None): # This method has to work not through SQLAlchemy session because # session may not immediately issue an SQL query to a database # and instead just schedule it whereas we need to make sure to # issue a query immediately. session.flush() insert = models.NamedLock.__table__.insert() lock_id = utils.generate_unicode_uuid() session.execute(insert.values(id=lock_id, name=name)) session.flush() return lock_id @b.session_aware() def get_named_locks(session=None, **kwargs): return _get_collection(models.NamedLock, **kwargs) @b.session_aware() def delete_named_lock(lock_id, session=None): # This method has to work without SQLAlchemy session because # session may not immediately issue an SQL query to a database # and instead just schedule it whereas we need to make sure to # issue a query immediately. session.flush() table = models.NamedLock.__table__ delete = table.delete() session.execute(delete.where(table.c.id == lock_id)) session.flush() @contextlib.contextmanager def named_lock(name): # NOTE(rakhmerov): We can't use the well-known try-finally pattern here # because if lock creation failed then it means that the SQLAlchemy # session is no longer valid and we can't use to try to delete the lock. # All we can do here is to let the exception bubble up so that the # transaction management code could rollback the transaction. lock_id = create_named_lock(name) yield delete_named_lock(lock_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/v2/sqlalchemy/filters.py0000644000175000017500000000513400000000000022671 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa def apply_filters(query, model, **filters): filter_dict = {} for key, value in filters.items(): column_attr = getattr(model, key) if key == 'tags': continue if isinstance(value, dict): if 'in' in value: query = query.filter(column_attr.in_(value['in'])) elif 'nin' in value: query = query.filter(~column_attr.in_(value['nin'])) elif 'neq' in value: query = query.filter(column_attr != value['neq']) elif 'gt' in value: query = query.filter(column_attr > value['gt']) elif 'gte' in value: query = query.filter(column_attr >= value['gte']) elif 'lt' in value: query = query.filter(column_attr < value['lt']) elif 'lte' in value: query = query.filter(column_attr <= value['lte']) elif 'eq' in value: query = query.filter(column_attr == value['eq']) elif 'has' in value: like_pattern = '%{0}%'.format(value['has']) query = query.filter(column_attr.like(like_pattern)) else: filter_dict[key] = value # We need to handle tag case seprately. As tag datatype is MutableList. # TODO(hparekh): Need to think how can we get rid of this. tags = filters.pop('tags', None) if isinstance(tags, dict): tags = tags.get("eq") # To match the tag list, a resource must contain at least all of the # tags present in the filter parameter. if tags: if ',' in tags: tags = tags.split(',') tag_attr = getattr(model, 'tags') if not isinstance(tags, list): expr = tag_attr.contains(tags) else: expr = sa.and_(*[tag_attr.contains(tag) for tag in tags]) query = query.filter(expr) if filter_dict: query = query.filter_by(**filter_dict) return query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/db/v2/sqlalchemy/models.py0000644000175000017500000005151000000000000022503 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import hashlib import json import sys from oslo_config import cfg from oslo_log import log as logging import sqlalchemy as sa from sqlalchemy import event from sqlalchemy.orm import backref from sqlalchemy.orm import relationship from mistral.db.sqlalchemy import model_base as mb from mistral.db.sqlalchemy import types as st from mistral import exceptions as exc from mistral.services import security from mistral_lib import utils # Definition objects. CONF = cfg.CONF LOG = logging.getLogger(__name__) def _get_hash_function_by(column_name): def calc_hash(context): val = context.current_parameters[column_name] or {} if isinstance(val, dict): # If the value is a dictionary we need to make sure to have # keys in the same order in a string representation. hash_base = json.dumps(sorted(val.items())) else: hash_base = str(val) return hashlib.sha256(hash_base.encode('utf-8')).hexdigest() return calc_hash def validate_long_type_length(cls, field_name, value): """Makes sure the value does not exceeds the maximum size.""" if value: # Get the configured limit. size_limit_kb = cfg.CONF.engine.execution_field_size_limit_kb # If the size is unlimited. if size_limit_kb < 0: return size_kb = int(sys.getsizeof(str(value)) / 1024) if size_kb > size_limit_kb: msg = ( "Field size limit exceeded" " [class={}, field={}, size={}KB, limit={}KB]" ).format( cls.__name__, field_name, size_kb, size_limit_kb ) LOG.error(msg) raise exc.SizeLimitExceededException(msg) def validate_name_has_no_spaces(name): """Makes sure name does not contain spaces.""" if name: if " " in name: msg = ( "Name '{}' must not contain spaces" ).format(name) LOG.error(msg) raise exc.InvalidModelException(msg) def register_length_validator(attr_name): """Register an event listener on the attribute. This event listener will validate the size every time a 'set' occurs. """ for cls in utils.iter_subclasses(Execution): if hasattr(cls, attr_name): event.listen( getattr(cls, attr_name), 'set', lambda t, v, o, i: validate_long_type_length(cls, attr_name, v) ) def register_name_validator(): """Register an event listener on the attribute. This event listener will validate that name of object does not contains spaces every time a 'set' occurs. """ for cls in utils.iter_subclasses(Definition): event.listen( getattr(cls, "name"), 'set', lambda t, v, o, i: validate_name_has_no_spaces(v) ) class Definition(mb.MistralSecureModelBase): __abstract__ = True id = mb.id_column() name = sa.Column(sa.String(255)) definition = sa.Column(st.MediumText(), nullable=True) spec = sa.Column(st.JsonMediumDictType()) tags = sa.Column(st.JsonListType()) is_system = sa.Column(sa.Boolean()) # There's no WorkbookExecution so we safely omit "Definition" in the name. class Workbook(Definition): """Contains info about workbook (including definition in Mistral DSL).""" __tablename__ = 'workbooks_v2' namespace = sa.Column(sa.String(255), nullable=True) __table_args__ = ( sa.UniqueConstraint( 'name', 'namespace', 'project_id' ), sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), ) class WorkflowDefinition(Definition): """Contains info about workflow (including definition in Mistral DSL).""" __tablename__ = 'workflow_definitions_v2' namespace = sa.Column(sa.String(255), nullable=True) __table_args__ = ( sa.UniqueConstraint( 'name', 'namespace', 'project_id' ), sa.Index('%s_is_system' % __tablename__, 'is_system'), sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), ) class ActionDefinition(Definition): """Contains info about registered Actions.""" __tablename__ = 'action_definitions_v2' namespace = sa.Column(sa.String(255), nullable=True) __table_args__ = ( sa.UniqueConstraint( 'name', 'namespace', 'project_id'), sa.Index('%s_is_system' % __tablename__, 'is_system'), sa.Index('%s_action_class' % __tablename__, 'action_class'), sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), ) # Main properties. description = sa.Column(sa.Text()) input = sa.Column(sa.Text()) # Service properties. action_class = sa.Column(sa.String(200)) attributes = sa.Column(st.JsonDictType()) # Execution objects. class Execution(mb.MistralSecureModelBase): __abstract__ = True # Common properties. id = mb.id_column() name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255), nullable=True) workflow_name = sa.Column(sa.String(255)) workflow_namespace = sa.Column(sa.String(255)) workflow_id = sa.Column(sa.String(80)) state = sa.Column(sa.String(20)) state_info = sa.Column(sa.Text(), nullable=True) tags = sa.Column(st.JsonListType()) # Internal properties which can be used by engine. runtime_context = sa.Column(st.JsonLongDictType()) class ActionExecution(Execution): """Contains action execution information.""" __tablename__ = 'action_executions_v2' __table_args__ = ( sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), sa.Index('%s_state' % __tablename__, 'state'), sa.Index('%s_updated_at' % __tablename__, 'updated_at') ) # Main properties. spec = sa.Column(st.JsonMediumDictType()) accepted = sa.Column(sa.Boolean(), default=False) input = sa.Column(st.JsonLongDictType(), nullable=True) output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True)) last_heartbeat = sa.Column( sa.DateTime, default=lambda: utils.utc_now_sec() + datetime.timedelta( seconds=CONF.action_heartbeat.first_heartbeat_timeout ) ) is_sync = sa.Column(sa.Boolean(), default=None, nullable=True) class WorkflowExecution(Execution): """Contains workflow execution information.""" __tablename__ = 'workflow_executions_v2' __table_args__ = ( sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), sa.Index('%s_state' % __tablename__, 'state'), sa.Index('%s_updated_at' % __tablename__, 'updated_at'), ) # Main properties. spec = sa.orm.deferred(sa.Column(st.JsonMediumDictType())) accepted = sa.Column(sa.Boolean(), default=False) input = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True)) output = sa.orm.deferred(sa.Column(st.JsonLongDictType(), nullable=True)) params = sa.orm.deferred(sa.Column(st.JsonLongDictType())) # Initial workflow context containing workflow variables, environment, # openstack security context etc. # NOTES: # * Data stored in this structure should not be copied into inbound # contexts of tasks. No need to duplicate it. # * This structure does not contain workflow input. context = sa.orm.deferred(sa.Column(st.JsonLongDictType())) class TaskExecution(Execution): """Contains task runtime information.""" __tablename__ = 'task_executions_v2' __table_args__ = ( sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), sa.Index('%s_state' % __tablename__, 'state'), sa.Index('%s_updated_at' % __tablename__, 'updated_at'), sa.UniqueConstraint('unique_key') ) # Main properties. spec = sa.orm.deferred(sa.Column(st.JsonMediumDictType())) action_spec = sa.Column(st.JsonLongDictType()) unique_key = sa.Column(sa.String(255), nullable=True) type = sa.Column(sa.String(10)) started_at = sa.Column(sa.DateTime, nullable=True) finished_at = sa.Column(sa.DateTime, nullable=True) # Whether the task is fully processed (publishing and calculating commands # after it). It allows to simplify workflow controller implementations # significantly. processed = sa.Column(sa.BOOLEAN, default=False) # Set to True if the completion of the task led to starting new # tasks. # The value of this property should be ignored if the task # is not completed. has_next_tasks = sa.Column(sa.Boolean, default=False) # The names of the next tasks. # [(task_name, event)] next_tasks = sa.Column(st.JsonListType()) # Set to True if the task finished with an error and the error # is handled (e.g. with 'on-error' clause for direct workflows) # so that the error shouldn't bubble up to the workflow level. # The value of this property should be ignored if the task # is not completed. error_handled = sa.Column(sa.Boolean, default=False) # Data Flow properties. in_context = sa.Column(st.JsonLongDictType()) published = sa.Column(st.JsonLongDictType()) @property def executions(self): return ( self.action_executions if not self.spec.get('workflow') else self.workflow_executions ) def to_dict(self): d = super(TaskExecution, self).to_dict() utils.datetime_to_str_in_dict(d, 'started_at') utils.datetime_to_str_in_dict(d, 'finished_at') return d for cls in utils.iter_subclasses(Execution): event.listen( # Catch and trim Execution.state_info to always fit allocated size. # Note that the limit is 65500 which is less than 65535 (2^16 -1). # The reason is that utils.cut() is not exactly accurate in case if # the value is not a string, but, for example, a dictionary. If we # limit it exactly to 65535 then once in a while it may go slightly # beyond the allowed maximum size. It may depend on the order of # keys in a string representation and other things that are hidden # inside utils.cut_dict() method. cls.state_info, 'set', lambda t, v, o, i: utils.cut(v, 65500), retval=True ) # Many-to-one for 'ActionExecution' and 'TaskExecution'. ActionExecution.task_execution_id = sa.Column( sa.String(36), sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'), nullable=True ) TaskExecution.action_executions = relationship( ActionExecution, backref=backref('task_execution', remote_side=[TaskExecution.id]), cascade='all, delete-orphan', foreign_keys=ActionExecution.task_execution_id, lazy='select', passive_deletes=True ) sa.Index( '%s_task_execution_id' % ActionExecution.__tablename__, 'task_execution_id' ) # Many-to-one for 'WorkflowExecution' and 'TaskExecution'. WorkflowExecution.task_execution_id = sa.Column( sa.String(36), sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'), nullable=True ) TaskExecution.workflow_executions = relationship( WorkflowExecution, backref=backref('task_execution', remote_side=[TaskExecution.id]), cascade='all, delete-orphan', foreign_keys=WorkflowExecution.task_execution_id, lazy='select', passive_deletes=True ) sa.Index( '%s_task_execution_id' % WorkflowExecution.__tablename__, 'task_execution_id' ) # Many-to-one for 'WorkflowExecution' and 'WorkflowExecution' WorkflowExecution.root_execution_id = sa.Column( sa.String(36), sa.ForeignKey(WorkflowExecution.id, ondelete='SET NULL'), nullable=True ) WorkflowExecution.root_execution = relationship( WorkflowExecution, remote_side=WorkflowExecution.id, lazy='select' ) # Many-to-one for 'TaskExecution' and 'WorkflowExecution'. TaskExecution.workflow_execution_id = sa.Column( sa.String(36), sa.ForeignKey(WorkflowExecution.id, ondelete='CASCADE') ) WorkflowExecution.task_executions = relationship( TaskExecution, backref=backref('workflow_execution', remote_side=[WorkflowExecution.id]), cascade='all, delete-orphan', foreign_keys=TaskExecution.workflow_execution_id, lazy='select', passive_deletes=True ) sa.Index( '%s_workflow_execution_id' % TaskExecution.__tablename__, TaskExecution.workflow_execution_id ) sa.Index( '%s_workflow_execution_id_name' % TaskExecution.__tablename__, TaskExecution.workflow_execution_id, TaskExecution.name ) # Other objects. class DelayedCall(mb.MistralModelBase): """Contains info about delayed calls.""" __tablename__ = 'delayed_calls_v2' id = mb.id_column() factory_method_path = sa.Column(sa.String(200), nullable=True) target_method_name = sa.Column(sa.String(80), nullable=False) method_arguments = sa.Column(st.JsonDictType()) serializers = sa.Column(st.JsonDictType()) key = sa.Column(sa.String(250), nullable=True) auth_context = sa.Column(st.JsonMediumDictType()) execution_time = sa.Column(sa.DateTime, nullable=False) processing = sa.Column(sa.Boolean, default=False, nullable=False) sa.Index( '%s_execution_time' % DelayedCall.__tablename__, DelayedCall.execution_time ) class ScheduledJob(mb.MistralModelBase): """Contains info about scheduled jobs.""" __tablename__ = 'scheduled_jobs_v2' id = mb.id_column() run_after = sa.Column(sa.Integer) # The full name of the factory function that returns/builds a Python # (target) object whose method should be called. Optional. target_factory_func_name = sa.Column(sa.String(200), nullable=True) # May take two different forms: # 1. Full path of a target function that should be called. For example, # "mistral.utils.random_sleep". # 2. Name of a method to call on a target object, if # "target_factory_func_name" is specified. func_name = sa.Column(sa.String(80), nullable=False) func_args = sa.Column(st.JsonDictType()) func_arg_serializers = sa.Column(st.JsonDictType()) auth_ctx = sa.Column(st.JsonDictType()) execute_at = sa.Column(sa.DateTime, nullable=False) captured_at = sa.Column(sa.DateTime, nullable=True) key = sa.Column(sa.String(250), nullable=True) class Environment(mb.MistralSecureModelBase): """Contains environment variables for workflow execution.""" __tablename__ = 'environments_v2' __table_args__ = ( sa.UniqueConstraint('name', 'project_id'), sa.Index('%s_name' % __tablename__, 'name'), sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), ) # Main properties. id = mb.id_column() name = sa.Column(sa.String(200)) description = sa.Column(sa.Text()) variables = sa.Column(st.JsonLongDictType()) class CronTrigger(mb.MistralSecureModelBase): """Contains info about cron triggers.""" __tablename__ = 'cron_triggers_v2' __table_args__ = ( sa.UniqueConstraint('name', 'project_id'), sa.UniqueConstraint( 'workflow_input_hash', 'workflow_name', 'pattern', 'project_id', 'workflow_params_hash', 'remaining_executions', 'first_execution_time' ), sa.Index( '%s_next_execution_time' % __tablename__, 'next_execution_time' ), sa.Index('%s_project_id' % __tablename__, 'project_id'), sa.Index('%s_scope' % __tablename__, 'scope'), sa.Index('%s_workflow_name' % __tablename__, 'workflow_name'), ) id = mb.id_column() name = sa.Column(sa.String(200)) pattern = sa.Column( sa.String(100), nullable=True, default='0 0 30 2 0' # Set default to 'never'. ) first_execution_time = sa.Column(sa.DateTime, nullable=True) next_execution_time = sa.Column(sa.DateTime, nullable=False) workflow_name = sa.Column(sa.String(255)) remaining_executions = sa.Column(sa.Integer) workflow_id = sa.Column( sa.String(36), sa.ForeignKey(WorkflowDefinition.id) ) workflow = relationship('WorkflowDefinition', lazy='joined') workflow_params = sa.Column(st.JsonDictType()) workflow_params_hash = sa.Column( sa.CHAR(64), default=_get_hash_function_by('workflow_params') ) workflow_input = sa.Column(st.JsonDictType()) workflow_input_hash = sa.Column( sa.CHAR(64), default=_get_hash_function_by('workflow_input') ) trust_id = sa.Column(sa.String(80)) def to_dict(self): d = super(CronTrigger, self).to_dict() utils.datetime_to_str_in_dict(d, 'first_execution_time') utils.datetime_to_str_in_dict(d, 'next_execution_time') return d # Register all hooks related to secure models. mb.register_secure_model_hooks() # TODO(rakhmerov): This is a bad solution. It's hard to find in the code, # configure flexibly etc. Fix it. # Register an event listener to verify that the size of all the long columns # affected by the user do not exceed the limit configuration. for attr_name in ['input', 'output', 'params', 'published']: register_length_validator(attr_name) register_name_validator() class ResourceMember(mb.MistralModelBase): """Contains info about resource members.""" __tablename__ = 'resource_members_v2' __table_args__ = ( sa.UniqueConstraint( 'resource_id', 'resource_type', 'member_id' ), ) id = mb.id_column() resource_id = sa.Column(sa.String(80), nullable=False) resource_type = sa.Column( sa.String(50), nullable=False, default='workflow' ) project_id = sa.Column(sa.String(80), default=security.get_project_id) member_id = sa.Column(sa.String(80), nullable=False) status = sa.Column(sa.String(20), nullable=False, default="pending") class EventTrigger(mb.MistralSecureModelBase): """Contains info about event triggers.""" __tablename__ = 'event_triggers_v2' __table_args__ = ( sa.UniqueConstraint('exchange', 'topic', 'event', 'workflow_id', 'project_id'), sa.Index('%s_project_id_workflow_id' % __tablename__, 'project_id', 'workflow_id'), ) id = mb.id_column() name = sa.Column(sa.String(200)) workflow_id = sa.Column( sa.String(36), sa.ForeignKey(WorkflowDefinition.id) ) workflow = relationship('WorkflowDefinition', lazy='joined') workflow_params = sa.Column(st.JsonDictType()) workflow_input = sa.Column(st.JsonDictType()) exchange = sa.Column(sa.String(80), nullable=False) topic = sa.Column(sa.String(80), nullable=False) event = sa.Column(sa.String(80), nullable=False) trust_id = sa.Column(sa.String(80)) class NamedLock(mb.MistralModelBase): """Contains info about named locks. Usage of named locks is based on properties of READ COMMITTED transactions of the most generally used SQL databases such as Postgres, MySQL, Oracle etc. The locking scenario is as follows: 1. Transaction A (TX-A) inserts a row with unique 'id' and some value that identifies a locked object stored in 'name'. 2. Transaction B (TX-B) and any subsequent transactions tries to insert a row with unique 'id' and the same value of 'name' field and it waits till TX-A is completed due to transactional properties of READ COMMITTED. 3. If TX-A then immediately deletes the record and commits then TX-B and or one of the subsequent transactions are released and its 'insert' is completed. 4. Then the scenario repeats with step #2 where the role of TX-A will be playing a transaction that just did insert. Practically, this table should never contain any committed rows. All its usage is around the play with transactional storages. """ __tablename__ = 'named_locks' sa.UniqueConstraint('name') id = mb.id_column() name = sa.Column(sa.String(255)) sa.UniqueConstraint(NamedLock.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1175673 mistral-10.0.0.0b3/mistral/engine/0000755000175000017500000000000000000000000017033 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/__init__.py0000644000175000017500000000152300000000000021145 0ustar00coreycorey00000000000000# Copyright 2015 - Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import importutils # NOTE(xylan): import modules for WorkflowHandler subclasses iteration importutils.import_module('mistral.workflow.direct_workflow') importutils.import_module('mistral.workflow.reverse_workflow') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/action_handler.py0000644000175000017500000000650100000000000022361 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from osprofiler import profiler import traceback as tb from mistral.db.v2.sqlalchemy import models from mistral.engine import actions from mistral.engine import task_handler from mistral import exceptions as exc LOG = logging.getLogger(__name__) @profiler.trace('action-handler-on-action-complete', hide_args=True) def on_action_complete(action_ex, result): task_ex = action_ex.task_execution action = _build_action(action_ex) try: action.complete(result) except exc.MistralException as e: msg = ( "Failed to complete action [error=%s, action=%s, task=%s]:\n%s" % (e, action_ex.name, task_ex.name, tb.format_exc()) ) LOG.error(msg) action.fail(msg) if task_ex: task_handler.force_fail_task(task_ex, msg) return if task_ex: task_handler.schedule_on_action_complete(action_ex) @profiler.trace('action-handler-on-action-update', hide_args=True) def on_action_update(action_ex, state): task_ex = action_ex.task_execution action = _build_action(action_ex) try: action.update(state) except exc.MistralException as e: # If the update of the action execution fails, do not fail # the action execution. Log the exception and re-raise the # exception. msg = ( "Failed to update action [error=%s, action=%s, task=%s]:\n%s" % (e, action_ex.name, task_ex.name, tb.format_exc()) ) LOG.error(msg) raise if task_ex: task_handler.schedule_on_action_update(action_ex) @profiler.trace('action-handler-build-action', hide_args=True) def _build_action(action_ex): if isinstance(action_ex, models.WorkflowExecution): return actions.WorkflowAction(wf_name=action_ex.name, action_ex=action_ex) adhoc_action_name = action_ex.runtime_context.get('adhoc_action_name') if adhoc_action_name: action_def = actions.resolve_action_definition( adhoc_action_name, namespace=action_ex.workflow_namespace ) return actions.AdHocAction(action_def, action_ex=action_ex) action_def = actions.resolve_action_definition(action_ex.name) return actions.PythonAction(action_def, action_ex=action_ex) def build_action_by_name(action_name, namespace=''): action_def = actions.resolve_action_definition(action_name, namespace=namespace) action_cls = (actions.PythonAction if not action_def.spec else actions.AdHocAction) return action_cls(action_def) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/actions.py0000644000175000017500000005705500000000000021061 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_config import cfg from oslo_log import log as logging from osprofiler import profiler import six from mistral.db.v2 import api as db_api from mistral.engine import post_tx_queue from mistral.engine import utils as engine_utils from mistral.engine import workflow_handler as wf_handler from mistral import exceptions as exc from mistral.executors import base as exe from mistral import expressions as expr from mistral.lang import parser as spec_parser from mistral.rpc import clients as rpc from mistral.services import action_manager as a_m from mistral.services import security from mistral.utils import wf_trace from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import actions as ml_actions from mistral_lib import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class Action(object): """Action. Represents a workflow action and defines interface that can be used by Mistral engine or its components in order to manipulate with actions. """ def __init__(self, action_def, action_ex=None, task_ex=None): self.action_def = action_def self.action_ex = action_ex self.namespace = action_def.namespace if action_def else None self.task_ex = action_ex.task_execution if action_ex else task_ex @abc.abstractmethod def complete(self, result): """Complete action and process its result. :param result: Action result. """ raise NotImplementedError def fail(self, msg): assert self.action_ex # When we set an ERROR state we should safely set output value getting # w/o exceptions due to field size limitations. msg = utils.cut_by_kb( msg, cfg.CONF.engine.execution_field_size_limit_kb ) self.action_ex.state = states.ERROR self.action_ex.output = {'result': msg} def update(self, state): assert self.action_ex if state == states.PAUSED and self.is_sync(self.action_ex.input): raise exc.InvalidStateTransitionException( 'Transition to the PAUSED state is only supported ' 'for asynchronous action execution.' ) if not states.is_valid_transition(self.action_ex.state, state): raise exc.InvalidStateTransitionException( 'Invalid state transition from %s to %s.' % (self.action_ex.state, state) ) self.action_ex.state = state @abc.abstractmethod def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False, timeout=None): """Schedule action run. This method is needed to schedule action run so its result can be received later by engine. In this sense it will be running in asynchronous mode from engine perspective (don't confuse with executor asynchrony when executor doesn't immediately send a result). :param timeout: a period of time in seconds after which execution of action will be interrupted :param input_dict: Action input. :param target: Target (group of action executors). :param index: Action execution index. Makes sense for some types. :param desc: Action execution description. :param safe_rerun: If true, action would be re-run if executor dies during execution. """ raise NotImplementedError @abc.abstractmethod def run(self, input_dict, target, index=0, desc='', save=True, safe_rerun=False, timeout=None): """Immediately run action. This method runs method w/o scheduling its run for a later time. From engine perspective action will be processed in synchronous mode. :param timeout: a period of time in seconds after which execution of action will be interrupted :param input_dict: Action input. :param target: Target (group of action executors). :param index: Action execution index. Makes sense for some types. :param desc: Action execution description. :param save: True if action execution object needs to be saved. :param safe_rerun: If true, action would be re-run if executor dies during execution. :return: Action output. """ raise NotImplementedError def validate_input(self, input_dict): """Validates action input parameters. :param input_dict: Dictionary with input parameters. """ raise NotImplementedError def is_sync(self, input_dict): """Determines if action is synchronous. :param input_dict: Dictionary with input parameters. """ return True def _create_action_execution(self, input_dict, runtime_ctx, is_sync, desc='', action_ex_id=None): action_ex_id = action_ex_id or utils.generate_unicode_uuid() values = { 'id': action_ex_id, 'name': self.action_def.name, 'spec': self.action_def.spec, 'state': states.RUNNING, 'input': input_dict, 'runtime_context': runtime_ctx, 'workflow_namespace': self.namespace, 'description': desc, 'is_sync': is_sync } if self.task_ex: values.update({ 'task_execution_id': self.task_ex.id, 'workflow_name': self.task_ex.workflow_name, 'workflow_namespace': self.task_ex.workflow_namespace, 'workflow_id': self.task_ex.workflow_id, 'project_id': self.task_ex.project_id, }) else: values.update({ 'project_id': security.get_project_id(), }) self.action_ex = db_api.create_action_execution(values) if self.task_ex: # Add to collection explicitly so that it's in a proper # state within the current session. self.task_ex.action_executions.append(self.action_ex) @profiler.trace('action-log-result', hide_args=True) def _log_result(self, prev_state, result): state = self.action_ex.state if prev_state != state: wf_trace.info( None, "Action '%s' (%s)(task=%s) [%s -> %s, %s]" % (self.action_ex.name, self.action_ex.id, self.task_ex.name if self.task_ex else None, prev_state, state, result.cut_repr()) ) class PythonAction(Action): """Regular Python action.""" def __init__(self, action_def, action_ex=None, task_ex=None): super(PythonAction, self).__init__(action_def, action_ex, task_ex) self._prepared_input = None @profiler.trace('action-complete', hide_args=True) def complete(self, result): assert self.action_ex if states.is_completed(self.action_ex.state): raise ValueError( "Action {} is already completed".format(self.action_ex.id)) prev_state = self.action_ex.state if result.is_success(): self.action_ex.state = states.SUCCESS elif result.is_cancel(): self.action_ex.state = states.CANCELLED else: self.action_ex.state = states.ERROR self.action_ex.output = self._prepare_output(result).to_dict() self.action_ex.accepted = True self._log_result(prev_state, result) @profiler.trace('action-schedule', hide_args=True) def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False, timeout=None): assert not self.action_ex self.validate_input(input_dict) # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() self._create_action_execution( self._prepare_input(input_dict), self._prepare_runtime_context(index, safe_rerun), self.is_sync(input_dict), desc=desc, action_ex_id=action_ex_id ) action_ex_ctx = self._prepare_execution_context() # Register an asynchronous command to send the action to # run on an executor outside of the main DB transaction. def _run_action(): executor = exe.get_executor(cfg.CONF.executor.type) executor.run_action( self.action_ex.id, self.action_def.action_class, self.action_def.attributes or {}, self.action_ex.input, self.action_ex.runtime_context.get('safe_rerun', False), action_ex_ctx, target=target, timeout=timeout ) post_tx_queue.register_operation(_run_action) @profiler.trace('action-run', hide_args=True) def run(self, input_dict, target, index=0, desc='', save=True, safe_rerun=False, timeout=None): assert not self.action_ex self.validate_input(input_dict) prepared_input_dict = self._prepare_input(input_dict) # Assign the action execution ID here to minimize database calls. # Otherwise, the input property of the action execution DB object needs # to be updated with the action execution ID after the action execution # DB object is created. action_ex_id = utils.generate_unicode_uuid() if save: self._create_action_execution( prepared_input_dict, self._prepare_runtime_context(index, safe_rerun), self.is_sync(input_dict), desc=desc, action_ex_id=action_ex_id ) executor = exe.get_executor(cfg.CONF.executor.type) execution_context = self._prepare_execution_context() result = executor.run_action( self.action_ex.id if self.action_ex else None, self.action_def.action_class, self.action_def.attributes or {}, prepared_input_dict, safe_rerun, execution_context, target=target, async_=False, timeout=timeout ) return self._prepare_output(result) def is_sync(self, input_dict): try: prepared_input_dict = self._prepare_input(input_dict) a = a_m.get_action_class(self.action_def.name, self.action_def.namespace)( **prepared_input_dict ) return a.is_sync() except BaseException as e: LOG.exception(e) raise exc.InputException(str(e)) def validate_input(self, input_dict): # NOTE(kong): Don't validate action input if action initialization # method contains ** argument. if '**' in self.action_def.input: return expected_input = utils.get_dict_from_string(self.action_def.input) engine_utils.validate_input( expected_input, input_dict, self.action_def.name, self.action_def.action_class ) def _prepare_execution_context(self): exc_ctx = {} if self.task_ex: wf_ex = self.task_ex.workflow_execution exc_ctx['workflow_execution_id'] = wf_ex.id exc_ctx['task_execution_id'] = self.task_ex.id exc_ctx['workflow_name'] = wf_ex.name if self.action_ex: exc_ctx['action_execution_id'] = self.action_ex.id exc_ctx['callback_url'] = ( '/v2/action_executions/%s' % self.action_ex.id ) return exc_ctx def _prepare_input(self, input_dict): """Template method to do manipulations with input parameters. Python action doesn't do anything specific with initial input. """ return input_dict def _prepare_output(self, result): """Template method to do manipulations with action result. Python action doesn't do anything specific with result. """ return result def _prepare_runtime_context(self, index, safe_rerun): """Template method to prepare action runtime context. Python action inserts index into runtime context and information if given action is safe_rerun. """ return {'index': index, 'safe_rerun': safe_rerun} class AdHocAction(PythonAction): """Ad-hoc action.""" @profiler.trace('ad-hoc-action-init', hide_args=True) def __init__(self, action_def, action_ex=None, task_ex=None, task_ctx=None, wf_ctx=None): self.action_spec = spec_parser.get_action_spec(action_def.spec) base_action_def = db_api.load_action_definition( self.action_spec.get_base(), namespace=action_def.namespace ) if not base_action_def: raise exc.InvalidActionException( "Failed to find action [action_name=%s]" % self.action_spec.get_base() ) base_action_def = self._gather_base_actions( action_def, base_action_def ) super(AdHocAction, self).__init__( base_action_def, action_ex, task_ex ) self.adhoc_action_def = action_def self.namespace = action_def.namespace self.task_ctx = task_ctx or {} self.wf_ctx = wf_ctx or {} @profiler.trace('ad-hoc-action-validate-input', hide_args=True) def validate_input(self, input_dict): expected_input = self.action_spec.get_input() engine_utils.validate_input( expected_input, input_dict, self.adhoc_action_def.name, self.action_spec.__class__.__name__ ) super(AdHocAction, self).validate_input( self._prepare_input(input_dict) ) @profiler.trace('ad-hoc-action-prepare-input', hide_args=True) def _prepare_input(self, input_dict): if self._prepared_input is not None: return self._prepared_input base_input_dict = input_dict for action_def in self.adhoc_action_defs: action_spec = spec_parser.get_action_spec(action_def.spec) for k, v in action_spec.get_input().items(): if (k not in base_input_dict or base_input_dict[k] is utils.NotDefined): base_input_dict[k] = v base_input_expr = action_spec.get_base_input() if base_input_expr: wf_ex = ( self.task_ex.workflow_execution if self.task_ex else None ) ctx_view = data_flow.ContextView( base_input_dict, self.task_ctx, data_flow.get_workflow_environment_dict(wf_ex), self.wf_ctx ) base_input_dict = expr.evaluate_recursively( base_input_expr, ctx_view ) else: base_input_dict = {} self._prepared_input = super(AdHocAction, self)._prepare_input( base_input_dict ) return self._prepared_input @profiler.trace('ad-hoc-action-prepare-output', hide_args=True) def _prepare_output(self, result): # In case of error, we don't transform a result. if not result.is_error(): for action_def in reversed(self.adhoc_action_defs): adhoc_action_spec = spec_parser.get_action_spec( action_def.spec ) transformer = adhoc_action_spec.get_output() if transformer is not None: result = ml_actions.Result( data=expr.evaluate_recursively( transformer, result.data ), error=result.error ) return result @profiler.trace('ad-hoc-action-prepare-runtime-context', hide_args=True) def _prepare_runtime_context(self, index, safe_rerun): ctx = super(AdHocAction, self)._prepare_runtime_context( index, safe_rerun ) # Insert special field into runtime context so that we track # a relationship between python action and adhoc action. return utils.merge_dicts( ctx, {'adhoc_action_name': self.adhoc_action_def.name} ) @profiler.trace('ad-hoc-action-gather-base-actions', hide_args=True) def _gather_base_actions(self, action_def, base_action_def): """Find all base ad-hoc actions and store them. An ad-hoc action may be based on another ad-hoc action and this works recursively, so that the base action can also be based on an ad-hoc action. Using the same base action more than once in this action hierarchy is not allowed to avoid infinite loops. The method stores the list of ad-hoc actions. :param action_def: Action definition :type action_def: ActionDefinition :param base_action_def: Original base action definition :type base_action_def: ActionDefinition :return: The definition of the base system action :rtype: ActionDefinition """ self.adhoc_action_defs = [action_def] original_base_name = self.action_spec.get_name() action_names = set([original_base_name]) base = base_action_def while not base.is_system and base.name not in action_names: action_names.add(base.name) self.adhoc_action_defs.append(base) base_name = base.spec['base'] try: base = db_api.get_action_definition(base_name, namespace=base.namespace) except exc.DBEntityNotFoundError: raise exc.InvalidActionException( "Failed to find action [action_name=%s namespace=%s] " % (base_name, base.namespace) ) # if the action is repeated if base.name in action_names: raise ValueError( 'An ad-hoc action cannot use twice the same action, %s is ' 'used at least twice' % base.name ) return base class WorkflowAction(Action): """Workflow action.""" def __init__(self, wf_name, **kwargs): super(WorkflowAction, self).__init__(None, **kwargs) self.wf_name = wf_name @profiler.trace('workflow-action-complete', hide_args=True) def complete(self, result): # No-op because in case of workflow result is already processed. pass @profiler.trace('workflow-action-schedule', hide_args=True) def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False, timeout=None): assert not self.action_ex self.validate_input(input_dict) parent_wf_ex = self.task_ex.workflow_execution parent_wf_spec = spec_parser.get_workflow_spec_by_execution_id( parent_wf_ex.id ) wf_def = engine_utils.resolve_workflow_definition( parent_wf_ex.workflow_name, parent_wf_spec.get_name(), namespace=parent_wf_ex.params['namespace'], wf_spec_name=self.wf_name ) wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at ) # If the parent has a root_execution_id, it must be a sub-workflow. So # we should propagate that ID down. Otherwise the parent must be the # root execution and we should use the parents ID. root_execution_id = parent_wf_ex.root_execution_id or parent_wf_ex.id wf_params = { 'root_execution_id': root_execution_id, 'task_execution_id': self.task_ex.id, 'index': index, 'namespace': parent_wf_ex.params['namespace'] } if 'notify' in parent_wf_ex.params: wf_params['notify'] = parent_wf_ex.params['notify'] for k, v in list(input_dict.items()): if k not in wf_spec.get_input(): wf_params[k] = v del input_dict[k] if cfg.CONF.engine.start_subworkflows_via_rpc: def _start_subworkflow(): rpc.get_engine_client().start_workflow( wf_def.id, wf_def.namespace, None, input_dict, "sub-workflow execution", async_=True, **wf_params ) post_tx_queue.register_operation(_start_subworkflow) else: wf_handler.start_workflow( wf_def.id, wf_def.namespace, None, input_dict, "sub-workflow execution", wf_params ) @profiler.trace('workflow-action-run', hide_args=True) def run(self, input_dict, target, index=0, desc='', save=True, safe_rerun=True, timeout=None): raise NotImplementedError('Does not apply to this WorkflowAction.') def is_sync(self, input_dict): # Workflow action is always asynchronous. return False def validate_input(self, input_dict): # TODO(rakhmerov): Implement. pass def resolve_action_definition(action_spec_name, wf_name=None, wf_spec_name=None, namespace=''): """Resolve action definition accounting for ad-hoc action namespacing. :param action_spec_name: Action name according to a spec. :param wf_name: Workflow name. :param wf_spec_name: Workflow name according to a spec. :param namespace: The namespace of the action. :return: Action definition (python or ad-hoc). """ action_db = None if wf_name and wf_name != wf_spec_name: # If workflow belongs to a workbook then check # action within the same workbook (to be able to # use short names within workbooks). # If it doesn't exist then use a name from spec # to find an action in DB. wb_name = wf_name.rstrip(wf_spec_name)[:-1] action_full_name = "%s.%s" % (wb_name, action_spec_name) action_db = db_api.load_action_definition(action_full_name, namespace=namespace) if not action_db: action_db = db_api.load_action_definition(action_spec_name, namespace=namespace) if not action_db: raise exc.InvalidActionException( "Failed to find action [action_name=%s] in [namespace=%s]" % (action_spec_name, namespace) ) return action_db ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/base.py0000644000175000017500000001574100000000000020327 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2017 - Brocade Communications Systems, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import jsonschema import six from mistral import exceptions as exc from mistral.workflow import data_flow from mistral_lib.utils import inspect_utils @six.add_metaclass(abc.ABCMeta) class Engine(object): """Engine interface.""" @abc.abstractmethod def start_workflow(self, wf_identifier, wf_namespace='', wf_ex_id=None, wf_input=None, description='', async_=False, **params): """Starts the specified workflow. :param wf_identifier: Workflow ID or name. Workflow ID is recommended, workflow name will be deprecated since Mitaka. :param wf_namespace: Workflow namespace. :param wf_input: Workflow input data as a dictionary. :param wf_ex_id: Workflow execution id. If passed, it will be set in the new execution object. :param description: Execution description. :param async_: If True, start workflow in asynchronous mode (w/o waiting for completion). :param params: Additional workflow type specific parameters. :return: Workflow execution object. """ raise NotImplementedError @abc.abstractmethod def start_action(self, action_name, action_input, description=None, namespace='', **params): """Starts the specific action. :param action_name: Action name. :param action_input: Action input data as a dictionary. :param description: Execution description. :param namespace: The namespace of the action. :param params: Additional options for action running. :return: Action execution object. """ raise NotImplementedError @abc.abstractmethod def on_action_complete(self, action_ex_id, result, wf_action=False, async_=False): """Accepts action result and continues the workflow. Action execution result here is a result which comes from an action/workflow associated which the task. :param action_ex_id: Action execution id. :param result: Action/workflow result. Instance of mistral.workflow.base.Result :param wf_action: If True it means that the given id points to a workflow execution rather than action execution. It happens when a nested workflow execution sends its result to a parent workflow. :param async: If True, run action in asynchronous mode (w/o waiting for completion). :return: Action(or workflow if wf_action=True) execution object. """ raise NotImplementedError @abc.abstractmethod def pause_workflow(self, wf_ex_id): """Pauses workflow. :param wf_ex_id: Execution id. :return: Workflow execution object. """ raise NotImplementedError @abc.abstractmethod def resume_workflow(self, wf_ex_id, env=None): """Resumes workflow. :param wf_ex_id: Execution id. :param env: Workflow environment. :return: Workflow execution object. """ raise NotImplementedError @abc.abstractmethod def rerun_workflow(self, task_ex_id, reset=True, env=None): """Rerun workflow from the specified task. :param task_ex_id: Task execution id. :param reset: If True, reset task state including deleting its action executions. :param env: Workflow environment. :return: Workflow execution object. """ raise NotImplementedError @abc.abstractmethod def stop_workflow(self, wf_ex_id, state, message): """Stops workflow. :param wf_ex_id: Workflow execution id. :param state: State assigned to the workflow. Permitted states are SUCCESS or ERROR. :param message: Optional information string. :return: Workflow execution. """ raise NotImplementedError @abc.abstractmethod def rollback_workflow(self, wf_ex_id): """Rolls back workflow execution. :param wf_ex_id: Execution id. :return: Workflow execution object. """ raise NotImplementedError @abc.abstractmethod def process_action_heartbeats(self, action_ex_ids): """Receives the heartbeat about the running actions. :param action_ex_ids: The action execution ids. """ raise NotImplementedError @six.add_metaclass(abc.ABCMeta) class TaskPolicy(object): """Task policy. Provides interface to perform any work after a task has completed. An example of task policy may be 'retry' policy that makes engine to run a task repeatedly if it finishes with a failure. """ _schema = {} def before_task_start(self, task_ex, task_spec): """Called right before task start. :param task_ex: DB model for task that is about to start. :param task_spec: Task specification. """ wf_ex = task_ex.workflow_execution ctx_view = data_flow.ContextView( task_ex.in_context, data_flow.get_current_task_dict(task_ex), data_flow.get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input ) data_flow.evaluate_object_fields(self, ctx_view) self._validate() def after_task_complete(self, task_ex, task_spec): """Called right after task completes. :param task_ex: Completed task DB model. :param task_spec: Completed task specification. """ wf_ex = task_ex.workflow_execution ctx_view = data_flow.ContextView( task_ex.in_context, data_flow.get_current_task_dict(task_ex), data_flow.get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input ) data_flow.evaluate_object_fields(self, ctx_view) self._validate() def _validate(self): """Validation of types after YAQL evaluation.""" props = inspect_utils.get_public_fields(self) try: jsonschema.validate(props, self._schema) except Exception as e: raise exc.InvalidModelException( "Invalid data type in %s: %s. Value(s) can be shown after " "YAQL evaluating. If you use YAQL here, please correct it." % (self.__class__.__name__, str(e)) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/default_engine.py0000644000175000017500000002070000000000000022355 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from osprofiler import profiler from mistral_lib import actions as ml_actions from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models as db_models from mistral.engine import action_handler from mistral.engine import base from mistral.engine import post_tx_queue from mistral.engine import workflow_handler as wf_handler from mistral import exceptions from mistral.workflow import states from mistral_lib import utils as u # Submodules of mistral.engine will throw NoSuchOptError if configuration # options required at top level of this __init__.py are not imported before # the submodules are referenced. LOG = logging.getLogger(__name__) class DefaultEngine(base.Engine): @db_utils.retry_on_db_error @post_tx_queue.run @profiler.trace('engine-start-workflow', hide_args=True) def start_workflow(self, wf_identifier, wf_namespace='', wf_ex_id=None, wf_input=None, description='', async_=False, **params): if wf_namespace: params['namespace'] = wf_namespace if cfg.CONF.notifier.notify: if 'notify' not in params or not params['notify']: params['notify'] = [] params['notify'].extend(cfg.CONF.notifier.notify) try: with db_api.transaction(): wf_ex = wf_handler.start_workflow( wf_identifier, wf_namespace, wf_ex_id, wf_input or {}, description, params ) # Checking a case when all tasks are completed immediately. wf_handler.check_and_complete(wf_ex.id) return wf_ex.get_clone() except exceptions.DBDuplicateEntryError: # NOTE(akovi): the workflow execution with a provided # wf_ex_id may already exist. In this case, simply # return the existing entity. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) return wf_ex.get_clone() @db_utils.retry_on_db_error @post_tx_queue.run def start_action(self, action_name, action_input, description=None, namespace='', **params): with db_api.transaction(): action = action_handler.build_action_by_name(action_name, namespace=namespace) action.validate_input(action_input) sync = params.get('run_sync') save = params.get('save_result') target = params.get('target') timeout = params.get('timeout') is_action_sync = action.is_sync(action_input) if sync and not is_action_sync: raise exceptions.InputException( "Action does not support synchronous execution.") if not sync and (save or not is_action_sync): action.schedule(action_input, target, timeout=timeout) return action.action_ex.get_clone() output = action.run( action_input, target, save=False, timeout=timeout ) state = states.SUCCESS if output.is_success() else states.ERROR if not save: # Action execution is not created but we need to return similar # object to the client anyway. return db_models.ActionExecution( name=action_name, description=description, input=action_input, output=output.to_dict(), state=state, workflow_namespace=namespace ) action_ex_id = u.generate_unicode_uuid() values = { 'id': action_ex_id, 'name': action_name, 'description': description, 'input': action_input, 'output': output.to_dict(), 'state': state, 'is_sync': is_action_sync, 'workflow_namespace': namespace } return db_api.create_action_execution(values) @db_utils.retry_on_db_error @post_tx_queue.run @profiler.trace('engine-on-action-complete', hide_args=True) def on_action_complete(self, action_ex_id, result, wf_action=False, async_=False): with db_api.transaction(): if wf_action: action_ex = db_api.get_workflow_execution(action_ex_id) # If result is None it means that it's a normal subworkflow # output and we just need to fetch it from the model. # This is just an optimization to not send data over RPC if result is None: result = ml_actions.Result(data=action_ex.output) else: action_ex = db_api.get_action_execution(action_ex_id) action_handler.on_action_complete(action_ex, result) return action_ex.get_clone() @db_utils.retry_on_db_error @post_tx_queue.run @profiler.trace('engine-on-action-update', hide_args=True) def on_action_update(self, action_ex_id, state, wf_action=False, async_=False): with db_api.transaction(): if wf_action: action_ex = db_api.get_workflow_execution(action_ex_id) else: action_ex = db_api.get_action_execution(action_ex_id) action_handler.on_action_update(action_ex, state) return action_ex.get_clone() @db_utils.retry_on_db_error @post_tx_queue.run def pause_workflow(self, wf_ex_id): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) wf_handler.pause_workflow(wf_ex) return wf_ex.get_clone() @db_utils.retry_on_db_error @post_tx_queue.run def rerun_workflow(self, task_ex_id, reset=True, env=None): with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex_id) wf_ex = task_ex.workflow_execution wf_handler.rerun_workflow(wf_ex, task_ex, reset=reset, env=env) return wf_ex.get_clone() @db_utils.retry_on_db_error @post_tx_queue.run def resume_workflow(self, wf_ex_id, env=None): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) wf_handler.resume_workflow(wf_ex, env=env) return wf_ex.get_clone() @db_utils.retry_on_db_error @post_tx_queue.run def stop_workflow(self, wf_ex_id, state, message=None): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) wf_handler.stop_workflow(wf_ex, state, message) return wf_ex.get_clone() def rollback_workflow(self, wf_ex_id): # TODO(rakhmerov): Implement. raise NotImplementedError @db_utils.retry_on_db_error @post_tx_queue.run def process_action_heartbeats(self, action_ex_ids): with db_api.transaction(): for exec_id in action_ex_ids: try: db_api.update_action_execution_heartbeat(exec_id) except exceptions.DBEntityNotFoundError: LOG.debug( "Action execution heartbeat update failed. {}" .format(exec_id), exc_info=True ) # Ignore this error and continue with the # remaining ids. pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/dispatcher.py0000644000175000017500000001157000000000000021537 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from osprofiler import profiler from mistral import exceptions as exc from mistral.workflow import commands from mistral.workflow import states BACKLOG_KEY = 'backlog_commands' def _compare_task_commands(a, b): if not isinstance(a, commands.RunTask) or not a.is_waiting(): return -1 if not isinstance(b, commands.RunTask) or not b.is_waiting(): return 1 if a.unique_key == b.unique_key: return 0 if a.unique_key < b.unique_key: return -1 return 1 def _rearrange_commands(cmds): """Takes workflow commands and does required pre-processing. The main idea of the method is to sort task commands with 'waiting' flag by 'unique_key' property in order guarantee the same locking order for them in parallel transactions and thereby prevent deadlocks. It also removes commands that don't make sense. For example, if there are some commands after a command that changes a workflow state then they must not be dispatched. """ # Remove all 'noop' commands. cmds = list([c for c in cmds if not isinstance(c, commands.Noop)]) state_cmd_idx = -1 state_cmd = None for i, cmd in enumerate(cmds): if isinstance(cmd, commands.SetWorkflowState): state_cmd_idx = i state_cmd = cmd break # Find a position of a 'fail|succeed|pause' command # and sort all task commands before it. if state_cmd_idx < 0: cmds.sort(key=functools.cmp_to_key(_compare_task_commands)) return cmds elif (state_cmd_idx == 0 and not isinstance(state_cmd, commands.PauseWorkflow)): return cmds[0:1] res = cmds[0:state_cmd_idx] res.sort(key=functools.cmp_to_key(_compare_task_commands)) res.append(state_cmd) # If the previously found state changing command is 'pause' then we need # to also add a tail of the initial command list to the result so that # we can save them to the command backlog. if isinstance(state_cmd, commands.PauseWorkflow): res.extend(cmds[state_cmd_idx + 1:]) return res def _save_command_to_backlog(wf_ex, cmd): backlog_cmds = wf_ex.runtime_context.get(BACKLOG_KEY, []) if not backlog_cmds: wf_ex.runtime_context[BACKLOG_KEY] = backlog_cmds backlog_cmds.append(cmd.to_dict()) def _poll_commands_from_backlog(wf_ex): # NOTE: We need to always use a guard condition that checks # if a persistent structure is empty and, as in this case, # return immediately w/o doing any further manipulations. # Otherwise, if we do pop() operation with a default value # then the ORM framework will consider it a modification of # the persistent object and generate a corresponding SQL # UPDATE operation. In this particular case it will increase # contention for workflow executions table drastically and # decrease performance. if not wf_ex.runtime_context.get(BACKLOG_KEY): return [] backlog_cmds = wf_ex.runtime_context.pop(BACKLOG_KEY) return [ commands.restore_command_from_dict(wf_ex, cmd_dict) for cmd_dict in backlog_cmds ] @profiler.trace('dispatcher-dispatch-commands', hide_args=True) def dispatch_workflow_commands(wf_ex, wf_cmds): # Run commands from the backlog. _process_commands(wf_ex, _poll_commands_from_backlog(wf_ex)) # Run new commands. _process_commands(wf_ex, wf_cmds) def _process_commands(wf_ex, cmds): if not cmds: return from mistral.engine import task_handler from mistral.engine import workflow_handler as wf_handler for cmd in _rearrange_commands(cmds): if states.is_completed(wf_ex.state): break if wf_ex.state == states.PAUSED: # Save all commands after 'pause' to the backlog so that # they can be processed after the workflow is resumed. _save_command_to_backlog(wf_ex, cmd) continue if isinstance(cmd, (commands.RunTask, commands.RunExistingTask)): task_handler.run_task(cmd) elif isinstance(cmd, commands.SetWorkflowState): wf_handler.set_workflow_state(wf_ex, cmd.new_state, cmd.msg) else: raise exc.MistralError('Unsupported workflow command: %s' % cmd) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/engine_server.py0000644000175000017500000002716500000000000022253 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from mistral import config as cfg from mistral.db.v2 import api as db_api from mistral.engine import default_engine from mistral import exceptions as exc from mistral.rpc import base as rpc from mistral.scheduler import base as sched_base from mistral.service import base as service_base from mistral.services import action_heartbeat_checker from mistral.services import action_heartbeat_sender from mistral.services import expiration_policy from mistral.utils import profiler as profiler_utils from mistral_lib import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF def _validate_config(): if not CONF.yaql.convert_output_data and CONF.yaql.convert_input_data: raise exc.MistralError( "The config property 'yaql.convert_output_data' is set to False " "so 'yaql.convert_input_data' must also be set to False." ) class EngineServer(service_base.MistralService): """Engine server. This class manages engine life-cycle and gets registered as an RPC endpoint to process engine specific calls. It also registers a cluster member associated with this instance of engine. """ def __init__(self, engine, setup_profiler=True): super(EngineServer, self).__init__('engine_group', setup_profiler) self.engine = engine self._rpc_server = None self._scheduler = None self._expiration_policy_tg = None def start(self): super(EngineServer, self).start() _validate_config() db_api.setup_db() self._scheduler = sched_base.get_system_scheduler() self._scheduler.start() self._expiration_policy_tg = expiration_policy.setup() action_heartbeat_checker.start() # If the current engine instance uses a local action executor # then we also need to initialize a heartbeat reporter for it. # Heartbeats will be sent to the engine tier in the same way as # with a remote executor. So if the current cluster node crashes # in the middle of executing an action then one of the remaining # engine instances will expire the action in a configured period # of time. if CONF.executor.type == 'local': action_heartbeat_sender.start() if self._setup_profiler: profiler_utils.setup('mistral-engine', CONF.engine.host) # Initialize and start RPC server. self._rpc_server = rpc.get_rpc_server_driver()(CONF.engine) self._rpc_server.register_endpoint(self) self._rpc_server.run(executor=CONF.oslo_rpc_executor) self._notify_started('Engine server started.') def stop(self, graceful=False): # NOTE(rakhmerov): Unfortunately, oslo.service doesn't pass the # 'graceful' parameter with a correct value. It's simply ignored # in the corresponding call chain leading to a concrete service. # The only workaround for now is to check 'graceful_shutdown_timeout' # configuration option. If it's not empty (not None or 0) then we # should treat it a graceful shutdown. graceful = bool(CONF.graceful_shutdown_timeout) LOG.info( 'Stopping an engine server [graceful=%s, timeout=%s]', graceful, CONF.graceful_shutdown_timeout ) super(EngineServer, self).stop(graceful) # The rpc server needs to be stopped first so that the engine # server stops receiving new RPC calls. Under load, this operation # may take much time in case of graceful shutdown because there # still may be RPC messages already polled from the queue and # waiting for processing. So an underlying RPC server has to wait # until they are processed. if self._rpc_server: self._rpc_server.stop(graceful) action_heartbeat_checker.stop(graceful) if CONF.executor.type == 'local': action_heartbeat_sender.stop(graceful) if self._scheduler: self._scheduler.stop(graceful) sched_base.destroy_system_scheduler() if self._expiration_policy_tg: self._expiration_policy_tg.stop(graceful) def wait(self): LOG.info("Waiting for an engine server to exit...") def start_workflow(self, rpc_ctx, wf_identifier, wf_namespace, wf_ex_id, wf_input, description, params): """Receives calls over RPC to start workflows on engine. :param rpc_ctx: RPC request context. :param wf_identifier: Workflow definition identifier. :param wf_namespace: Workflow namespace. :param wf_input: Workflow input. :param wf_ex_id: Workflow execution id. If passed, it will be set in the new execution object. :param description: Workflow execution description. :param params: Additional workflow type specific parameters. :return: Workflow execution. """ LOG.info( "Received RPC request 'start_workflow'[workflow_identifier=%s, " "workflow_input=%s, description=%s, params=%s]", wf_identifier, utils.cut(wf_input), description, params ) return self.engine.start_workflow( wf_identifier, wf_namespace, wf_ex_id, wf_input, description, **params ) def start_action(self, rpc_ctx, action_name, action_input, description, namespace, params): """Receives calls over RPC to start actions on engine. :param rpc_ctx: RPC request context. :param action_name: name of the Action. :param action_input: input dictionary for Action. :param description: description of new Action execution. :param namespace: The namespace of the action. :param params: extra parameters to run Action. :return: Action execution. """ LOG.info( "Received RPC request 'start_action'[name=%s, input=%s, " "description=%s, namespace=%s params=%s]", action_name, utils.cut(action_input), description, namespace, params ) return self.engine.start_action( action_name, action_input, description, namespace=namespace, **params ) def on_action_complete(self, rpc_ctx, action_ex_id, result, wf_action): """Receives RPC calls to communicate action result to engine. :param rpc_ctx: RPC request context. :param action_ex_id: Action execution id. :param result: Action result data. :param wf_action: True if given id points to a workflow execution. :return: Action execution. """ LOG.info( "Received RPC request 'on_action_complete'[action_ex_id=%s, " "result=%s]", action_ex_id, result.cut_repr() if result else '' ) return self.engine.on_action_complete(action_ex_id, result, wf_action) def on_action_update(self, rpc_ctx, action_ex_id, state, wf_action): """Receives RPC calls to communicate action execution state to engine. :param rpc_ctx: RPC request context. :param action_ex_id: Action execution id. :param state: Action execution state. :param wf_action: True if given id points to a workflow execution. :return: Action execution. """ LOG.info( "Received RPC request 'on_action_update'" "[action_ex_id=%s, state=%s]", action_ex_id, state ) return self.engine.on_action_update(action_ex_id, state, wf_action) def pause_workflow(self, rpc_ctx, wf_ex_id): """Receives calls over RPC to pause workflows on engine. :param rpc_ctx: Request context. :param wf_ex_id: Workflow execution id. :return: Workflow execution. """ LOG.info( "Received RPC request 'pause_workflow'[execution_id=%s]", wf_ex_id ) return self.engine.pause_workflow(wf_ex_id) def rerun_workflow(self, rpc_ctx, task_ex_id, reset=True, env=None): """Receives calls over RPC to rerun workflows on engine. :param rpc_ctx: RPC request context. :param task_ex_id: Task execution id. :param reset: If true, then purge action execution for the task. :param env: Environment variables to update. :return: Workflow execution. """ LOG.info( "Received RPC request 'rerun_workflow'[task_ex_id=%s]", task_ex_id ) return self.engine.rerun_workflow(task_ex_id, reset, env) def resume_workflow(self, rpc_ctx, wf_ex_id, env=None): """Receives calls over RPC to resume workflows on engine. :param rpc_ctx: RPC request context. :param wf_ex_id: Workflow execution id. :param env: Environment variables to update. :return: Workflow execution. """ LOG.info( "Received RPC request 'resume_workflow'[wf_ex_id=%s]", wf_ex_id ) return self.engine.resume_workflow(wf_ex_id, env) def stop_workflow(self, rpc_ctx, wf_ex_id, state, message=None): """Receives calls over RPC to stop workflows on engine. Sets execution state to SUCCESS or ERROR. No more tasks will be scheduled. Running tasks won't be killed, but their results will be ignored. :param rpc_ctx: RPC request context. :param wf_ex_id: Workflow execution id. :param state: State assigned to the workflow. Permitted states are SUCCESS or ERROR. :param message: Optional information string. :return: Workflow execution. """ LOG.info( "Received RPC request 'stop_workflow'[execution_id=%s," " state=%s, message=%s]", wf_ex_id, state, message ) return self.engine.stop_workflow(wf_ex_id, state, message) def rollback_workflow(self, rpc_ctx, wf_ex_id): """Receives calls over RPC to rollback workflows on engine. :param rpc_ctx: RPC request context. :param wf_ex_id Workflow execution id. :return: Workflow execution. """ LOG.info( "Received RPC request 'rollback_workflow'[execution_id=%s]", wf_ex_id ) return self.engine.rollback_workflow(wf_ex_id) def report_running_actions(self, rpc_ctx, action_ex_ids): """Receives calls over RPC to receive action execution heartbeats. :param rpc_ctx: RPC request context. :param action_ex_ids: Action execution ids. """ LOG.info( "Received RPC request 'report_running_actions'[action_ex_ids=%s]", action_ex_ids ) return self.engine.process_action_heartbeats(action_ex_ids) def get_oslo_service(setup_profiler=True): return EngineServer( default_engine.DefaultEngine(), setup_profiler=setup_profiler ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/policies.py0000644000175000017500000004061400000000000021221 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral.engine import base from mistral.engine import post_tx_queue from mistral.engine import workflow_handler as wf_handler from mistral import expressions from mistral.scheduler import base as sched_base from mistral.utils import wf_trace from mistral.workflow import data_flow from mistral.workflow import states import six _CONTINUE_TASK_PATH = 'mistral.engine.policies._continue_task' _COMPLETE_TASK_PATH = 'mistral.engine.policies._complete_task' _FAIL_IF_INCOMPLETE_TASK_PATH = ( 'mistral.engine.policies._fail_task_if_incomplete' ) def _log_task_delay(task_ex, delay_sec, state=states.RUNNING_DELAYED): wf_trace.info( task_ex, "Task '%s' [%s -> %s, delay = %s sec]" % (task_ex.name, task_ex.state, state, delay_sec) ) def build_policies(policies_spec, wf_spec): task_defaults = wf_spec.get_task_defaults() wf_policies = task_defaults.get_policies() if task_defaults else None if not (policies_spec or wf_policies): return [] return construct_policies_list(policies_spec, wf_policies) def get_policy_factories(): return [ build_pause_before_policy, build_wait_before_policy, build_wait_after_policy, build_fail_on_policy, build_retry_policy, build_timeout_policy, build_concurrency_policy ] def construct_policies_list(policies_spec, wf_policies): policies = [] for factory in get_policy_factories(): policy = factory(policies_spec) if wf_policies and not policy: policy = factory(wf_policies) if policy: policies.append(policy) return policies def build_wait_before_policy(policies_spec): if not policies_spec: return None wait_before = policies_spec.get_wait_before() if isinstance(wait_before, six.string_types) or wait_before > 0: return WaitBeforePolicy(wait_before) else: return None def build_wait_after_policy(policies_spec): if not policies_spec: return None wait_after = policies_spec.get_wait_after() if isinstance(wait_after, six.string_types) or wait_after > 0: return WaitAfterPolicy(wait_after) else: return None def build_retry_policy(policies_spec): if not policies_spec: return None retry = policies_spec.get_retry() if not retry: return None return RetryPolicy( retry.get_count(), retry.get_delay(), retry.get_break_on(), retry.get_continue_on() ) def build_timeout_policy(policies_spec): if not policies_spec: return None timeout_policy = policies_spec.get_timeout() if isinstance(timeout_policy, six.string_types) or timeout_policy > 0: return TimeoutPolicy(timeout_policy) else: return None def build_pause_before_policy(policies_spec): if not policies_spec: return None pause_before_policy = policies_spec.get_pause_before() return (PauseBeforePolicy(pause_before_policy) if pause_before_policy else None) def build_concurrency_policy(policies_spec): if not policies_spec: return None concurrency_policy = policies_spec.get_concurrency() return (ConcurrencyPolicy(concurrency_policy) if concurrency_policy else None) def build_fail_on_policy(policies_spec): if not policies_spec: return None fail_on_policy = policies_spec.get_fail_on() return (FailOnPolicy(fail_on_policy) if fail_on_policy else None) def _ensure_context_has_key(runtime_context, key): if not runtime_context: runtime_context = {} if key not in runtime_context: runtime_context.update({key: {}}) return runtime_context class WaitBeforePolicy(base.TaskPolicy): _schema = { "properties": { "delay": { "type": "integer", "minimum": 0 } } } def __init__(self, delay): self.delay = delay def before_task_start(self, task_ex, task_spec): super(WaitBeforePolicy, self).before_task_start(task_ex, task_spec) # No need to wait for a task if delay is 0 if self.delay == 0: return context_key = 'wait_before_policy' runtime_context = _ensure_context_has_key( task_ex.runtime_context, context_key ) task_ex.runtime_context = runtime_context policy_context = runtime_context[context_key] if policy_context.get('skip'): # Unset state 'RUNNING_DELAYED'. wf_trace.info( task_ex, "Task '%s' [%s -> %s]" % (task_ex.name, states.RUNNING_DELAYED, states.RUNNING) ) task_ex.state = states.RUNNING return if task_ex.state != states.IDLE: policy_context.update({'skip': True}) _log_task_delay(task_ex, self.delay) task_ex.state = states.RUNNING_DELAYED sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=self.delay, func_name=_CONTINUE_TASK_PATH, func_args={ 'task_ex_id': task_ex.id } ) sched.schedule(job) class WaitAfterPolicy(base.TaskPolicy): _schema = { "properties": { "delay": { "type": "integer", "minimum": 0 } } } def __init__(self, delay): self.delay = delay def after_task_complete(self, task_ex, task_spec): super(WaitAfterPolicy, self).after_task_complete(task_ex, task_spec) # No need to postpone a task if delay is 0 if self.delay == 0: return context_key = 'wait_after_policy' runtime_context = _ensure_context_has_key( task_ex.runtime_context, context_key ) task_ex.runtime_context = runtime_context policy_context = runtime_context[context_key] if policy_context.get('skip'): # Skip, already processed. return policy_context.update({'skip': True}) _log_task_delay(task_ex, self.delay) end_state = task_ex.state end_state_info = task_ex.state_info # TODO(rakhmerov): Policies probably need to have tasks.Task # interface in order to manage task state safely. # Set task state to 'RUNNING_DELAYED'. task_ex.state = states.RUNNING_DELAYED task_ex.state_info = ( 'Suspended by wait-after policy for %s seconds' % self.delay ) # Schedule to change task state to RUNNING again. sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=self.delay, func_name=_COMPLETE_TASK_PATH, func_args={ 'task_ex_id': task_ex.id, 'state': end_state, 'state_info': end_state_info } ) sched.schedule(job) class RetryPolicy(base.TaskPolicy): _schema = { "properties": { "delay": { "type": "integer", "minimum": 0 }, "count": { "type": "integer", "minimum": 0 }, } } def __init__(self, count, delay, break_on, continue_on): self.count = count self.delay = delay self._break_on_clause = break_on self._continue_on_clause = continue_on def after_task_complete(self, task_ex, task_spec): """Possible Cases: 1. state = SUCCESS if continue_on is not specified, no need to move to next iteration; if current:count achieve retry:count then policy breaks the loop (regardless on continue-on condition); otherwise - check continue_on condition and if it is True - schedule the next iteration, otherwise policy breaks the loop. 2. retry:count = 5, current:count = 2, state = ERROR, state = IDLE/DELAYED, current:count = 3 3. retry:count = 5, current:count = 4, state = ERROR Iterations complete therefore state = #{state}, current:count = 4. """ super(RetryPolicy, self).after_task_complete(task_ex, task_spec) # There is nothing to repeat if self.count == 0: return # TODO(m4dcoder): If the task_ex.action_executions and # task_ex.workflow_executions collection are not called, # then the retry_no in the runtime_context of the task_ex will not # be updated accurately. To be exact, the retry_no will be one # iteration behind. ex = task_ex.executions # noqa context_key = 'retry_task_policy' runtime_context = _ensure_context_has_key( task_ex.runtime_context, context_key ) wf_ex = task_ex.workflow_execution ctx_view = data_flow.ContextView( data_flow.get_current_task_dict(task_ex), data_flow.evaluate_task_outbound_context(task_ex), wf_ex.context, wf_ex.input ) continue_on_evaluation = expressions.evaluate( self._continue_on_clause, ctx_view ) break_on_evaluation = expressions.evaluate( self._break_on_clause, ctx_view ) task_ex.runtime_context = runtime_context state = task_ex.state if not states.is_completed(state) or states.is_cancelled(state): return policy_context = runtime_context[context_key] retry_no = 0 if 'retry_no' in policy_context: retry_no = policy_context['retry_no'] del policy_context['retry_no'] retries_remain = retry_no < self.count stop_continue_flag = ( task_ex.state == states.SUCCESS and not self._continue_on_clause ) stop_continue_flag = ( stop_continue_flag or (self._continue_on_clause and not continue_on_evaluation) ) break_triggered = ( task_ex.state == states.ERROR and break_on_evaluation ) if not retries_remain or break_triggered or stop_continue_flag: return data_flow.invalidate_task_execution_result(task_ex) policy_context['retry_no'] = retry_no + 1 runtime_context[context_key] = policy_context # NOTE(vgvoleg): join tasks in direct workflows can't be # retried as-is, because these tasks can't start without # a correct logical state. if hasattr(task_spec, "get_join") and task_spec.get_join(): from mistral.engine import task_handler as t_h _log_task_delay(task_ex, self.delay, states.WAITING) task_ex.state = states.WAITING t_h._schedule_refresh_task_state(task_ex.id, self.delay) return _log_task_delay(task_ex, self.delay) task_ex.state = states.RUNNING_DELAYED sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=self.delay, func_name=_CONTINUE_TASK_PATH, func_args={'task_ex_id': task_ex.id} ) sched.schedule(job) @staticmethod def refresh_runtime_context(task_ex): runtime_context = task_ex.runtime_context or {} retry_task_policy = runtime_context.get('retry_task_policy') if retry_task_policy: retry_task_policy['retry_no'] = 0 task_ex.runtime_context['retry_task_policy'] = retry_task_policy class TimeoutPolicy(base.TaskPolicy): _schema = { "properties": { "delay": { "type": "integer", "minimum": 0 } } } def __init__(self, timeout_sec): self.delay = timeout_sec def before_task_start(self, task_ex, task_spec): super(TimeoutPolicy, self).before_task_start(task_ex, task_spec) # No timeout if delay is 0 if self.delay == 0: return sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=self.delay, func_name=_FAIL_IF_INCOMPLETE_TASK_PATH, func_args={ 'task_ex_id': task_ex.id, 'timeout': self.delay } ) sched.schedule(job) wf_trace.info( task_ex, "Timeout check scheduled [task=%s, timeout(s)=%s]." % (task_ex.id, self.delay) ) class PauseBeforePolicy(base.TaskPolicy): _schema = { "properties": { "expr": {"type": "boolean"} } } def __init__(self, expression): self.expr = expression def before_task_start(self, task_ex, task_spec): super(PauseBeforePolicy, self).before_task_start(task_ex, task_spec) if not self.expr: return wf_trace.info( task_ex, "Workflow paused before task '%s' [%s -> %s]" % (task_ex.name, task_ex.workflow_execution.state, states.PAUSED) ) task_ex.state = states.IDLE wf_handler.pause_workflow(task_ex.workflow_execution) class ConcurrencyPolicy(base.TaskPolicy): _schema = { "properties": { "concurrency": { "type": "integer", "minimum": 0 } } } def __init__(self, concurrency): self.concurrency = concurrency def before_task_start(self, task_ex, task_spec): super(ConcurrencyPolicy, self).before_task_start(task_ex, task_spec) if self.concurrency == 0: return # This policy doesn't do anything except validating "concurrency" # property value and setting a variable into task runtime context. # This variable is then used to define how many action executions # may be started in parallel. context_key = 'concurrency' runtime_context = _ensure_context_has_key( task_ex.runtime_context, context_key ) runtime_context[context_key] = self.concurrency task_ex.runtime_context = runtime_context class FailOnPolicy(base.TaskPolicy): _schema = { "properties": { "fail-on": {"type": "boolean"}, } } def __init__(self, fail_on): self.fail_on = fail_on def before_task_start(self, task_ex, task_spec): pass def after_task_complete(self, task_ex, task_spec): if task_ex.state != states.SUCCESS: return super(FailOnPolicy, self).after_task_complete(task_ex, task_spec) if self.fail_on: task_ex.state = states.ERROR task_ex.state_info = 'Failed by fail-on policy' @db_utils.retry_on_db_error @post_tx_queue.run def _continue_task(task_ex_id): from mistral.engine import task_handler with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) task_handler.continue_task(task_ex) @db_utils.retry_on_db_error @post_tx_queue.run def _complete_task(task_ex_id, state, state_info): from mistral.engine import task_handler with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) task_handler.complete_task(task_ex, state, state_info) @db_utils.retry_on_db_error @post_tx_queue.run def _fail_task_if_incomplete(task_ex_id, timeout): from mistral.engine import task_handler with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not states.is_completed(task_ex.state): msg = 'Task timed out [timeout(s)=%s].' % timeout task_handler.complete_task(task_ex, states.ERROR, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/post_tx_queue.py0000644000175000017500000000744600000000000022324 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet import functools from oslo_config import cfg from oslo_log import log as logging from osprofiler import profiler from mistral import context from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral_lib import utils """ This module contains a mini framework for scheduling operations while performing transactional processing of a workflow event such as completing a workflow action. The scheduled operations will run after the main DB transaction, in a new transaction, if needed. """ LOG = logging.getLogger(__name__) _THREAD_LOCAL_NAME = "__operation_queue_thread_local" def _prepare(): # Register two queues: transactional and non transactional operations. utils.set_thread_local(_THREAD_LOCAL_NAME, (list(), list())) def _clear(): utils.set_thread_local(_THREAD_LOCAL_NAME, None) def register_operation(func, args=None, in_tx=False): """Register an operation.""" _get_queues()[0 if in_tx else 1].append((func, args or [])) def _get_queues(): queues = utils.get_thread_local(_THREAD_LOCAL_NAME) if queues is None: raise RuntimeError( 'Operation queue is not initialized for the current thread.' ' Most likely some engine method is not decorated with' ' operation_queue.run()' ) return queues def run(func): """Decorator that runs all operations registered in the operation queue. Various engine methods may register such operations. All such methods must be decorated with this decorator. """ @functools.wraps(func) def decorate(*args, **kw): _prepare() try: res = func(*args, **kw) queues = _get_queues() tx_queue = queues[0] non_tx_queue = queues[1] if not tx_queue and not non_tx_queue: return res auth_ctx = context.ctx() if context.has_ctx() else None def _within_new_thread(): # This is a new thread so we need to init a profiler again. if cfg.CONF.profiler.enabled: profiler.init(cfg.CONF.profiler.hmac_keys) old_auth_ctx = context.ctx() if context.has_ctx() else None context.set_ctx(auth_ctx) try: if tx_queue: _process_tx_queue(tx_queue) if non_tx_queue: _process_non_tx_queue(non_tx_queue) finally: context.set_ctx(old_auth_ctx) eventlet.spawn(_within_new_thread) finally: _clear() return res return decorate @db_utils.retry_on_db_error @run def _process_tx_queue(queue): with db_api.transaction(): for func, args in queue: try: func(*args) except Exception: LOG.exception("Failed to run transactional engine operation.") raise def _process_non_tx_queue(queue): for func, args in queue: try: func(*args) except Exception: LOG.exception("Failed to run non-transactional engine operation.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/task_handler.py0000644000175000017500000004222400000000000022050 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Nokia Networks. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from osprofiler import profiler import traceback as tb from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral.engine import post_tx_queue from mistral.engine import tasks from mistral.engine import workflow_handler as wf_handler from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.scheduler import base as sched_base from mistral.workflow import base as wf_base from mistral.workflow import commands as wf_cmds from mistral.workflow import states """Responsible for running tasks and handling results.""" LOG = logging.getLogger(__name__) _REFRESH_TASK_STATE_PATH = ( 'mistral.engine.task_handler._refresh_task_state' ) _SCHEDULED_ON_ACTION_COMPLETE_PATH = ( 'mistral.engine.task_handler._scheduled_on_action_complete' ) _SCHEDULED_ON_ACTION_UPDATE_PATH = ( 'mistral.engine.task_handler._scheduled_on_action_update' ) @profiler.trace('task-handler-run-task', hide_args=True) def run_task(wf_cmd): """Runs workflow task. :param wf_cmd: Workflow command. """ task = _build_task_from_command(wf_cmd) try: if task.waiting and task.rerun: task.set_state(states.WAITING, 'Task is waiting.') _schedule_refresh_task_state(task.task_ex.id) task.run() except exc.MistralException as e: wf_ex = wf_cmd.wf_ex task_spec = wf_cmd.task_spec msg = ( "Failed to run task [error=%s, wf=%s, task=%s]:\n%s" % (e, wf_ex.name, task_spec.get_name(), tb.format_exc()) ) force_fail_task(task.task_ex, msg, task=task) return _check_affected_tasks(task) def mark_task_running(task_ex, wf_spec): task = _build_task_from_execution(wf_spec, task_ex) old_task_state = task_ex.state task.set_state(states.RUNNING, None, False) task.notify(old_task_state, states.RUNNING) @profiler.trace('task-handler-on-action-complete', hide_args=True) def _on_action_complete(action_ex): """Handles action completion event. :param action_ex: Action execution. """ task_ex = action_ex.task_execution if not task_ex: return task_spec = spec_parser.get_task_spec(task_ex.spec) wf_ex = task_ex.workflow_execution task = _create_task( wf_ex, spec_parser.get_workflow_spec_by_execution_id(wf_ex.id), task_spec, task_ex.in_context, task_ex ) try: task.on_action_complete(action_ex) except exc.MistralException as e: wf_ex = task_ex.workflow_execution msg = ("Failed to handle action completion [error=%s, wf=%s, task=%s," " action=%s]:\n%s" % (e, wf_ex.name, task_ex.name, action_ex.name, tb.format_exc())) force_fail_task(task_ex, msg, task=task) return _check_affected_tasks(task) @profiler.trace('task-handler-on-action-update', hide_args=True) def _on_action_update(action_ex): """Handles action update event. :param action_ex: Action execution. """ task_ex = action_ex.task_execution if not task_ex: return task_spec = spec_parser.get_task_spec(task_ex.spec) wf_ex = task_ex.workflow_execution task = _create_task( wf_ex, spec_parser.get_workflow_spec_by_execution_id(wf_ex.id), task_spec, task_ex.in_context, task_ex ) try: task.on_action_update(action_ex) if states.is_paused(action_ex.state): wf_handler.pause_workflow(wf_ex) if states.is_running(action_ex.state): # If any subworkflow of the parent workflow is paused, # then keep the parent workflow execution paused. for task_ex in wf_ex.task_executions: if states.is_paused(task_ex.state): return # Otherwise if no other subworkflow is paused, # then resume the parent workflow execution. wf_handler.resume_workflow(wf_ex) except exc.MistralException as e: wf_ex = task_ex.workflow_execution msg = ("Failed to handle action update [error=%s, wf=%s, task=%s," " action=%s]:\n%s" % (e, wf_ex.name, task_ex.name, action_ex.name, tb.format_exc())) force_fail_task(task_ex, msg, task=task) return _check_affected_tasks(task) def force_fail_task(task_ex, msg, task=None): """Forces the given task to fail. This method implements the 'forced' task fail without giving a chance to a workflow controller to handle the error. Its main purpose is to reflect errors caused by workflow structure (errors 'publish', 'on-xxx' clauses etc.) rather than failed actions. If such an error happens we should also force the entire workflow to fail. I.e., this kind of error must be propagated to a higher level, to the workflow. :param task_ex: Task execution. :param msg: Error message. :param task: Task object. Optional. """ LOG.error(msg) if not task: wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) task = _build_task_from_execution(wf_spec, task_ex) old_task_state = task_ex.state task.set_state(states.ERROR, msg) task.notify(old_task_state, states.ERROR) task.save_finished_time() wf_handler.force_fail_workflow(task_ex.workflow_execution, msg) def continue_task(task_ex): if not task_ex: return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) task = _build_task_from_execution(wf_spec, task_ex) try: task.set_state(states.RUNNING, None) task.run() except exc.MistralException as e: wf_ex = task_ex.workflow_execution msg = ( "Failed to run task [error=%s, wf=%s, task=%s]:\n%s" % (e, wf_ex.name, task_ex.name, tb.format_exc()) ) force_fail_task(task_ex, msg, task=task) return _check_affected_tasks(task) def complete_task(task_ex, state, state_info): if not task_ex: return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) task = _build_task_from_execution(wf_spec, task_ex) try: task.complete(state, state_info) except exc.MistralException as e: wf_ex = task_ex.workflow_execution msg = ( "Failed to complete task [error=%s, wf=%s, task=%s]:\n%s" % (e, wf_ex.name, task_ex.name, tb.format_exc()) ) force_fail_task(task_ex, msg, task=task) return _check_affected_tasks(task) @profiler.trace('task-handler-check-affected-tasks', hide_args=True) def _check_affected_tasks(task): if not task.is_completed(): return task_ex = task.task_ex wf_ex = task_ex.workflow_execution if states.is_completed(wf_ex.state): return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) affected_task_execs = wf_ctrl.find_indirectly_affected_task_executions( task_ex.name ) def _schedule_if_needed(t_ex_id): # NOTE(rakhmerov): we need to minimize the number of scheduled jobs # that refresh state of "join" tasks. We'll check if corresponding # jobs are already scheduled. Note that we must ignore scheduled jobs # that are currently being processed because of a possible race with # the transaction that deletes scheduled jobs, i.e. the job may still # exist in DB (the deleting transaction didn't commit yet) but it has # already been processed and the task state hasn't changed. sched = sched_base.get_system_scheduler() jobs_exist = sched.has_scheduled_jobs( key=_get_refresh_state_job_key(t_ex_id), processing=False ) if not jobs_exist: _schedule_refresh_task_state(t_ex_id) for t_ex in affected_task_execs: post_tx_queue.register_operation( _schedule_if_needed, args=[t_ex.id], in_tx=True ) def _build_task_from_execution(wf_spec, task_ex): return _create_task( task_ex.workflow_execution, wf_spec, wf_spec.get_task(task_ex.name), task_ex.in_context, task_ex ) @profiler.trace('task-handler-build-task-from-command', hide_args=True) def _build_task_from_command(cmd): if isinstance(cmd, wf_cmds.RunExistingTask): task = _create_task( cmd.wf_ex, cmd.wf_spec, spec_parser.get_task_spec(cmd.task_ex.spec), cmd.ctx, task_ex=cmd.task_ex, unique_key=cmd.task_ex.unique_key, waiting=cmd.task_ex.state == states.WAITING, triggered_by=cmd.triggered_by, rerun=cmd.rerun ) if cmd.reset: task.reset() return task if isinstance(cmd, wf_cmds.RunTask): task = _create_task( cmd.wf_ex, cmd.wf_spec, cmd.task_spec, cmd.ctx, unique_key=cmd.unique_key, waiting=cmd.is_waiting(), triggered_by=cmd.triggered_by ) return task raise exc.MistralError('Unsupported workflow command: %s' % cmd) def _create_task(wf_ex, wf_spec, task_spec, ctx, task_ex=None, unique_key=None, waiting=False, triggered_by=None, rerun=False): if task_spec.get_with_items(): cls = tasks.WithItemsTask else: cls = tasks.RegularTask return cls( wf_ex, wf_spec, task_spec, ctx, task_ex=task_ex, unique_key=unique_key, waiting=waiting, triggered_by=triggered_by, rerun=rerun ) @db_utils.retry_on_db_error @post_tx_queue.run @profiler.trace('task-handler-refresh-task-state', hide_args=True) def _refresh_task_state(task_ex_id): with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not task_ex: return if (states.is_completed(task_ex.state) or task_ex.state == states.RUNNING): return wf_ex = task_ex.workflow_execution if states.is_completed(wf_ex.state): return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) with db_api.named_lock(task_ex.id): # NOTE: we have to use this lock to prevent two (or more) such # methods from changing task state and starting its action or # workflow. Checking task state outside of this section is a # performance optimization because locking is pretty expensive. db_api.refresh(task_ex) if (states.is_completed(task_ex.state) or task_ex.state == states.RUNNING): return log_state = wf_ctrl.get_logical_task_state(task_ex) state = log_state.state state_info = log_state.state_info # Update 'triggered_by' because it could have changed. task_ex.runtime_context['triggered_by'] = log_state.triggered_by if state == states.RUNNING: continue_task(task_ex) elif state == states.ERROR: complete_task(task_ex, state, state_info) elif state == states.WAITING: LOG.info( "Task execution is still in WAITING state" " [task_ex_id=%s, task_name=%s]", task_ex_id, task_ex.name ) else: # Must never get here. raise RuntimeError( 'Unexpected logical task state [task_ex_id=%s, ' 'task_name=%s, state=%s]' % (task_ex_id, task_ex.name, state) ) def _schedule_refresh_task_state(task_ex_id, delay=0): """Schedules task preconditions check. This method provides transactional decoupling of task preconditions check from events that can potentially satisfy those preconditions. It's needed in non-locking model in order to avoid 'phantom read' phenomena when reading state of multiple tasks to see if a task that depends on them can start. Just starting a separate transaction without using scheduler is not safe due to concurrency window that we'll have in this case (time between transactions) whereas scheduler is a special component that is designed to be resistant to failures. :param task_ex_id: Task execution ID. :param delay: Delay. """ sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=delay, func_name=_REFRESH_TASK_STATE_PATH, func_args={'task_ex_id': task_ex_id}, key=_get_refresh_state_job_key(task_ex_id) ) sched.schedule(job) def _get_refresh_state_job_key(task_ex_id): return 'th_r_t_s-%s' % task_ex_id @db_utils.retry_on_db_error @post_tx_queue.run def _scheduled_on_action_complete(action_ex_id, wf_action): with db_api.transaction(): if wf_action: action_ex = db_api.load_workflow_execution(action_ex_id) else: action_ex = db_api.load_action_execution(action_ex_id) if action_ex: _on_action_complete(action_ex) def schedule_on_action_complete(action_ex, delay=0): """Schedules task completion check. This method provides transactional decoupling of action completion from task completion check. It's needed in non-locking model in order to avoid 'phantom read' phenomena when reading state of multiple actions to see if a task is completed. Just starting a separate transaction without using scheduler is not safe due to concurrency window that we'll have in this case (time between transactions) whereas scheduler is a special component that is designed to be resistant to failures. :param action_ex: Action execution. :param delay: Minimum amount of time before task completion check should be made. """ # Optimization to avoid opening a new transaction if it's not needed. if not action_ex.task_execution.spec.get('with-items'): _on_action_complete(action_ex) return sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=delay, func_name=_SCHEDULED_ON_ACTION_COMPLETE_PATH, func_args={ 'action_ex_id': action_ex.id, 'wf_action': isinstance(action_ex, models.WorkflowExecution) }, key='th_on_a_c-%s' % action_ex.task_execution_id ) sched.schedule(job) @db_utils.retry_on_db_error @post_tx_queue.run def _scheduled_on_action_update(action_ex_id, wf_action): with db_api.transaction(): if wf_action: action_ex = db_api.load_workflow_execution(action_ex_id) else: action_ex = db_api.load_action_execution(action_ex_id) if action_ex: _on_action_update(action_ex) def schedule_on_action_update(action_ex, delay=0): """Schedules task update check. This method provides transactional decoupling of action update from task update check. It's needed in non-locking model in order to avoid 'phantom read' phenomena when reading state of multiple actions to see if a task is updated. Just starting a separate transaction without using scheduler is not safe due to concurrency window that we'll have in this case (time between transactions) whereas scheduler is a special component that is designed to be resistant to failures. :param action_ex: Action execution. :param delay: Minimum amount of time before task update check should be made. """ # Optimization to avoid opening a new transaction if it's not needed. if not action_ex.task_execution.spec.get('with-items'): _on_action_update(action_ex) return sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=delay, func_name=_SCHEDULED_ON_ACTION_UPDATE_PATH, func_args={ 'action_ex_id': action_ex.id, 'wf_action': isinstance(action_ex, models.WorkflowExecution) }, key='th_on_a_u-%s' % action_ex.task_execution_id ) sched.schedule(job) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/tasks.py0000644000175000017500000010131700000000000020535 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # Copyright 2019 - NetCracker Technology Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections import copy import json from oslo_config import cfg from oslo_log import log as logging from osprofiler import profiler import six from mistral.db.v2 import api as db_api from mistral.engine import actions from mistral.engine import dispatcher from mistral.engine import policies from mistral.engine import post_tx_queue from mistral.engine import workflow_handler as wf_handler from mistral import exceptions as exc from mistral import expressions as expr from mistral.notifiers import base as notif from mistral.notifiers import notification_events as events from mistral.utils import wf_trace from mistral.workflow import base as wf_base from mistral.workflow import commands from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import utils LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Task(object): """Task. Represents a workflow task and defines interface that can be used by Mistral engine or its components in order to manipulate with tasks. """ def __init__(self, wf_ex, wf_spec, task_spec, ctx, task_ex=None, unique_key=None, waiting=False, triggered_by=None, rerun=False): self.wf_ex = wf_ex self.task_spec = task_spec self.ctx = ctx self.task_ex = task_ex self.wf_spec = wf_spec self.unique_key = unique_key self.waiting = waiting self.triggered_by = triggered_by self.rerun = rerun self.reset_flag = False self.created = False self.state_changed = False def notify(self, old_task_state, new_task_state): publishers = self.wf_ex.params.get('notify') if not publishers and not isinstance(publishers, list): return notifier = notif.get_notifier(cfg.CONF.notifier.type) event = events.identify_task_event(old_task_state, new_task_state) filtered_publishers = [] for publisher in publishers: if not isinstance(publisher, dict): continue target_events = publisher.get('event_types', []) if not target_events or event in target_events: filtered_publishers.append(publisher) if not filtered_publishers: return def _convert_to_notification_data(): return { "id": self.task_ex.id, "name": self.task_ex.name, "workflow_execution_id": self.task_ex.workflow_execution_id, "workflow_name": self.task_ex.workflow_name, "workflow_namespace": self.task_ex.workflow_namespace, "workflow_id": self.task_ex.workflow_id, "state": self.task_ex.state, "state_info": self.task_ex.state_info, "type": self.task_ex.type, "project_id": self.task_ex.project_id, "created_at": utils.datetime_to_str(self.task_ex.created_at), "updated_at": utils.datetime_to_str(self.task_ex.updated_at), "started_at": utils.datetime_to_str(self.task_ex.started_at), "finished_at": utils.datetime_to_str(self.task_ex.finished_at) } def _send_notification(): notifier.notify( self.task_ex.id, _convert_to_notification_data(), event, self.task_ex.updated_at, filtered_publishers ) post_tx_queue.register_operation(_send_notification) def is_completed(self): return self.task_ex and states.is_completed(self.task_ex.state) def is_waiting(self): return self.waiting def is_created(self): return self.created def is_state_changed(self): return self.state_changed @abc.abstractmethod def on_action_complete(self, action_ex): """Handle action completion. :param action_ex: Action execution. """ raise NotImplementedError @abc.abstractmethod def on_action_update(self, action_ex): """Handle action update. :param action_ex: Action execution. """ raise NotImplementedError @abc.abstractmethod def run(self): """Runs task.""" raise NotImplementedError @profiler.trace('task-defer') def defer(self): """Defers task. This method puts task to a waiting state. """ # NOTE(rakhmerov): using named locks may cause problems under load # with MySQL that raises a lot of deadlocks in case of high # parallelism so it makes sense to do a fast check if the object # already exists in DB outside of the lock. if not self.task_ex: t_execs = db_api.get_task_executions( workflow_execution_id=self.wf_ex.id, unique_key=self.unique_key, state=states.WAITING ) self.task_ex = t_execs[0] if t_execs else None if self.task_ex: return with db_api.named_lock(self.unique_key): if not self.task_ex: t_execs = db_api.get_task_executions( workflow_execution_id=self.wf_ex.id, unique_key=self.unique_key ) self.task_ex = t_execs[0] if t_execs else None msg = 'Task is waiting.' if not self.task_ex: self._create_task_execution( state=states.WAITING, state_info=msg ) elif self.task_ex.state != states.WAITING: self.set_state(states.WAITING, msg) def reset(self): self.reset_flag = True @profiler.trace('task-set-state') def set_state(self, state, state_info, processed=None): """Sets task state without executing post completion logic. :param state: New task state. :param state_info: New state information (i.e. error message). :param processed: New "processed" flag value. :return: True if the state was changed as a result of this call, False otherwise. """ assert self.task_ex cur_state = self.task_ex.state # Set initial started_at in case of waiting => running. # We can't set this just in run_existing, because task retries # will update started_at, which is incorrect. if cur_state == states.WAITING and state == states.RUNNING: self.save_started_time() if cur_state != state or self.task_ex.state_info != state_info: task_ex = db_api.update_task_execution_state( id=self.task_ex.id, cur_state=cur_state, state=state ) if task_ex is None: # Do nothing because the update query did not change the DB. return False self.task_ex = task_ex self.task_ex.state_info = json.dumps(state_info) \ if isinstance(state_info, dict) else state_info self.state_changed = True if processed is not None: self.task_ex.processed = processed wf_trace.info( self.task_ex.workflow_execution, "Task '%s' (%s) [%s -> %s, msg=%s]" % (self.task_ex.name, self.task_ex.id, cur_state, state, self.task_ex.state_info) ) return True @profiler.trace('task-complete') def complete(self, state, state_info=None): """Complete task and set specified state. Method sets specified task state and runs all necessary post completion logic such as publishing workflow variables and scheduling new workflow commands. :param state: New task state. :param state_info: New state information (i.e. error message). """ assert self.task_ex # Record the current task state. old_task_state = self.task_ex.state # Ignore if task already completed. if self.is_completed(): # Publish task event again so subscribers know # task completed state is being processed again. self.notify(old_task_state, self.task_ex.state) return # If we were unable to change the task state it means that it was # already changed by a concurrent process. In this case we need to # skip all regular completion logic like scheduling new tasks, # running engine commands and publishing. if not self.set_state(state, state_info): return data_flow.publish_variables(self.task_ex, self.task_spec) if not self.task_spec.get_keep_result(): # Destroy task result. for ex in self.task_ex.action_executions: if hasattr(ex, 'output'): ex.output = {} self._after_task_complete() # Ignore DELAYED state. if self.task_ex.state == states.RUNNING_DELAYED: return wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow(task_ex=self.task_ex) # Save next task names in DB to avoid evaluating them again # in the future. self.task_ex.next_tasks = [] for c in cmds: if commands.is_engine_command(c): continue event = c.triggered_by[0]['event'] if c.triggered_by else None self.task_ex.next_tasks.append((c.task_spec.get_name(), event)) self.task_ex.has_next_tasks = bool(self.task_ex.next_tasks) # Check whether the error is handled. if self.task_ex.state == states.ERROR: self.task_ex.error_handled = any([c.handles_error for c in cmds]) # If workflow is paused we shouldn't schedule new commands # and mark task as processed. if states.is_paused(self.wf_ex.state): # Publish task event even if the workflow is paused. self.notify(old_task_state, self.task_ex.state) return # Mark task as processed after all decisions have been made # upon its completion. self.task_ex.processed = True self.register_workflow_completion_check() self.save_finished_time() # Publish task event. self.notify(old_task_state, self.task_ex.state) dispatcher.dispatch_workflow_commands(self.wf_ex, cmds) def register_workflow_completion_check(self): wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) # Register an asynchronous command to check workflow completion # in a separate transaction if the task may potentially lead to # workflow completion. def _check(): wf_handler.check_and_complete(self.wf_ex.id) if wf_ctrl.may_complete_workflow(self.task_ex): post_tx_queue.register_operation(_check, in_tx=True) @profiler.trace('task-update') def update(self, state, state_info=None): """Update task and set specified state. Method sets specified task state. :param state: New task state. :param state_info: New state information (i.e. error message). """ assert self.task_ex # Record the current task state. old_task_state = self.task_ex.state # Ignore if task already completed. if states.is_completed(self.task_ex.state): # Publish task event again so subscribers know # task completed state is being processed again. self.notify(old_task_state, self.task_ex.state) return # Update only if state transition is valid. if not states.is_valid_transition(self.task_ex.state, state): return # We can't set the task state to RUNNING if some other # child executions are paused. child_states = [a_ex.state for a_ex in self.task_ex.executions] if state == states.RUNNING and states.PAUSED in child_states: return self.set_state(state, state_info) if states.is_completed(self.task_ex.state): self.register_workflow_completion_check() # Publish event. self.notify(old_task_state, self.task_ex.state) def _before_task_start(self): policies_spec = self.task_spec.get_policies() for p in policies.build_policies(policies_spec, self.wf_spec): p.before_task_start(self.task_ex, self.task_spec) def _after_task_complete(self): policies_spec = self.task_spec.get_policies() for p in policies.build_policies(policies_spec, self.wf_spec): p.after_task_complete(self.task_ex, self.task_spec) @profiler.trace('task-create-task-execution') def _create_task_execution(self, state=states.RUNNING, state_info=None): task_id = utils.generate_unicode_uuid() task_name = self.task_spec.get_name() task_type = self.task_spec.get_type() task_tags = self.task_spec.get_tags() values = { 'id': task_id, 'name': task_name, 'workflow_execution_id': self.wf_ex.id, 'workflow_name': self.wf_ex.workflow_name, 'workflow_namespace': self.wf_ex.workflow_namespace, 'workflow_id': self.wf_ex.workflow_id, 'tags': task_tags, 'state': state, 'state_info': state_info, 'spec': self.task_spec.to_dict(), 'unique_key': self.unique_key, 'in_context': self.ctx, 'published': {}, 'runtime_context': {}, 'project_id': self.wf_ex.project_id, 'type': task_type } if self.triggered_by: values['runtime_context']['triggered_by'] = self.triggered_by self.task_ex = db_api.create_task_execution(values) self.created = True def _get_safe_rerun(self): safe_rerun = self.task_spec.get_safe_rerun() if safe_rerun is not None: return safe_rerun task_defaults = self.wf_spec.get_task_defaults() if task_defaults: default_safe_rerun = task_defaults.get_safe_rerun() if default_safe_rerun is not None: return default_safe_rerun return False def _get_action_defaults(self): action_name = self.task_spec.get_action_name() if not action_name: return {} env = self.wf_ex.params['env'] return env.get('__actions', {}).get(action_name, {}) def save_started_time(self, value='default'): if not self.task_ex: return time = value if value != 'default' else utils.utc_now_sec() self.task_ex.started_at = time def save_finished_time(self, value='default'): if not self.task_ex: return time = value if value != 'default' else utils.utc_now_sec() self.task_ex.finished_at = time class RegularTask(Task): """Regular task. Takes care of processing regular tasks with one action. """ @profiler.trace('regular-task-on-action-complete', hide_args=True) def on_action_complete(self, action_ex): state = action_ex.state # TODO(rakhmerov): Here we can define more informative messages for # cases when action is successful and when it's not. For example, # in state_info we can specify the cause action. if state == states.SUCCESS: state_info = None else: action_result = action_ex.output.get('result') state_info = str(action_result) if action_result else None self.complete(state, state_info) @profiler.trace('regular-task-on-action-update', hide_args=True) def on_action_update(self, action_ex): self.update(action_ex.state) @profiler.trace('task-run') def run(self): if not self.task_ex: self._run_new() else: self._run_existing() @profiler.trace('task-run-new') def _run_new(self): if self.waiting: self.defer() return self._create_task_execution() self.save_started_time() # Publish event. self.notify(None, self.task_ex.state) LOG.debug( 'Starting task [name=%s, init_state=%s, workflow_name=%s,' ' execution_id=%s]', self.task_spec.get_name(), self.task_ex.state, self.wf_ex.name, self.wf_ex.id ) self._before_task_start() # Policies could possibly change task state. if self.task_ex.state != states.RUNNING: return self._schedule_actions() @profiler.trace('task-run-existing') def _run_existing(self): if self.waiting: return # Explicitly change task state to RUNNING. # Throw exception if the existing task already succeeded. if self.task_ex.state == states.SUCCESS: raise exc.MistralError( 'Rerunning succeeded tasks is not supported.' ) # Record the current task state. old_task_state = self.task_ex.state self.set_state(states.RUNNING, None, processed=False) # Publish event. self.notify(old_task_state, self.task_ex.state) if self.rerun: self.save_started_time() self.save_finished_time(value=None) self._before_task_start() # Policies could possibly change task state. if self.task_ex.state != states.RUNNING: return self._update_inbound_context() self._update_triggered_by() self._reset_actions() self._schedule_actions() def _update_inbound_context(self): assert self.task_ex wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) self.ctx = wf_ctrl.get_task_inbound_context(self.task_spec) utils.update_dict(self.task_ex.in_context, self.ctx) def _update_triggered_by(self): assert self.task_ex if not self.triggered_by: return self.task_ex.runtime_context['triggered_by'] = self.triggered_by def _reset_actions(self): """Resets task state. Depending on task type this method may reset task state. For example, delete all task actions etc. """ # Reset state of processed task and related action executions. if self.reset_flag: execs = self.task_ex.executions else: execs = [e for e in self.task_ex.executions if (e.accepted and e.state in [states.ERROR, states.CANCELLED])] for ex in execs: ex.accepted = False def _schedule_actions(self): # Regular task schedules just one action. input_dict = self._get_action_input() target = self._get_target(input_dict) action = self._build_action() action.validate_input(input_dict) action.schedule( input_dict, target, safe_rerun=self._get_safe_rerun(), timeout=self._get_timeout() ) @profiler.trace('regular-task-get-target', hide_args=True) def _get_target(self, input_dict): if not self.task_spec.get_target(): return None ctx_view = data_flow.ContextView( input_dict, self.ctx, data_flow.get_workflow_environment_dict(self.wf_ex), self.wf_ex.context, self.wf_ex.input ) return expr.evaluate_recursively( self.task_spec.get_target(), ctx_view ) @profiler.trace('regular-task-get-action-input', hide_args=True) def _get_action_input(self, ctx=None): input_spec = self.task_spec.get_input() input_dict = ( self._evaluate_expression(input_spec, ctx) if input_spec else {} ) if not isinstance(input_dict, dict): raise exc.InputException( "Wrong dynamic input for task: %s. Dict type is expected. " "Actual type: %s. Actual value: %s" % (self.task_spec.get_name(), type(input_dict), str(input_dict)) ) return utils.merge_dicts( input_dict, self._get_action_defaults(), overwrite=False ) def _evaluate_expression(self, expression, ctx=None): ctx_view = data_flow.ContextView( data_flow.get_current_task_dict(self.task_ex), data_flow.get_workflow_environment_dict(self.wf_ex), ctx or {}, self.task_ex.in_context, self.wf_ex.context, self.wf_ex.input, ) return expr.evaluate_recursively(expression, ctx_view) def _build_action(self): action_name = self.task_spec.get_action_name() wf_name = self.task_spec.get_workflow_name() # For dynamic workflow evaluation we regenerate the action. if wf_name: return actions.WorkflowAction( wf_name=self._evaluate_expression(wf_name), task_ex=self.task_ex ) # For dynamic action evaluation we just regenerate the name. if action_name: action_name = self._evaluate_expression(action_name) if not action_name: action_name = 'std.noop' action_def = actions.resolve_action_definition( action_name, self.wf_ex.name, self.wf_spec.get_name(), namespace=self.wf_ex.workflow_namespace ) if action_def.spec: return actions.AdHocAction(action_def, task_ex=self.task_ex, task_ctx=self.ctx, wf_ctx=self.wf_ex.context) return actions.PythonAction(action_def, task_ex=self.task_ex) def _get_timeout(self): timeout = self.task_spec.get_policies().get_timeout() if not isinstance(timeout, (int, float)): wf_ex = self.task_ex.workflow_execution ctx_view = data_flow.ContextView( self.task_ex.in_context, wf_ex.context, wf_ex.input ) timeout = expr.evaluate_recursively(data=timeout, context=ctx_view) return timeout if timeout > 0 else None class WithItemsTask(RegularTask): """With-items task. Takes care of processing "with-items" tasks. """ _CONCURRENCY = 'concurrency' _CAPACITY = 'capacity' _COUNT = 'count' _WITH_ITEMS = 'with_items' _DEFAULT_WITH_ITEMS = { _COUNT: 0, _CONCURRENCY: 0, _CAPACITY: 0 } @profiler.trace('with-items-task-on-action-complete', hide_args=True) def on_action_complete(self, action_ex): assert self.task_ex with db_api.named_lock('with-items-%s' % self.task_ex.id): # NOTE: We need to refresh task execution object right # after the lock is acquired to make sure that we're # working with a fresh state of its runtime context. # Otherwise, SQLAlchemy session can contain a stale # cached version of it so that we don't modify actual # values (i.e. capacity). db_api.refresh(self.task_ex) if self.is_completed(): return self._increase_capacity() if self.is_with_items_completed(): state = self._get_final_state() # TODO(rakhmerov): Here we can define more informative messages # in cases when action is successful and when it's not. # For example, in state_info we can specify the cause action. # The use of action_ex.output.get('result') for state_info is # not accurate because there could be action executions that # had failed or was cancelled prior to this action execution. state_info = { states.SUCCESS: None, states.ERROR: 'One or more actions had failed.', states.CANCELLED: 'One or more actions was cancelled.' } self.complete(state, state_info[state]) return if self._has_more_iterations() and self._get_concurrency(): self._schedule_actions() def _schedule_actions(self): with_items_values = self._get_with_items_values() if self._is_new(): action_count = len(six.next(iter(with_items_values.values()))) self._prepare_runtime_context(action_count) input_dicts = self._get_input_dicts(with_items_values) if not input_dicts: self.complete(states.SUCCESS) return for i, input_dict in input_dicts: target = self._get_target(input_dict) action = self._build_action() action.validate_input(input_dict) action.schedule( input_dict, target, index=i, safe_rerun=self._get_safe_rerun(), timeout=self._get_timeout() ) self._decrease_capacity(1) def _get_with_items_values(self): """Returns all values evaluated from 'with-items' expression. Example: DSL: with-items: - var1 in <% $.arrayI %> - var2 in <% $.arrayJ %> where arrayI = [1,2,3] and arrayJ = [a,b,c] The result of the method in this case will be: { 'var1': [1,2,3], 'var2': [a,b,c] } :return: Evaluated 'with-items' expression values. """ exp_res = self._evaluate_expression(self.task_spec.get_with_items()) # Expression result may contain iterables instead of lists in the # dictionary values. So we need to convert them into lists and # perform all needed checks. result = {} required_len = -1 for var, items in exp_res.items(): if not isinstance(items, collections.Iterable): raise exc.InputException( "Wrong input format for: %s. Iterable type is" " expected for each value." % result ) items_list = list(items) result[var] = items_list if required_len < 0: required_len = len(items_list) elif len(items_list) != required_len: raise exc.InputException( "Wrong input format for: %s. All arrays must" " have the same length." % exp_res ) return result def _get_input_dicts(self, with_items_values): """Calculate input dictionaries for another portion of actions. :return: a list of tuples containing indexes and corresponding input dicts. """ result = [] for i in self._get_next_indexes(): ctx = {} for k, v in with_items_values.items(): ctx.update({k: v[i]}) ctx = utils.merge_dicts(ctx, self.ctx) result.append((i, self._get_action_input(ctx))) return result def _get_with_items_context(self): return self.task_ex.runtime_context.get( self._WITH_ITEMS, self._DEFAULT_WITH_ITEMS ) def _get_with_items_count(self): return self._get_with_items_context()[self._COUNT] def _get_with_items_capacity(self): return self._get_with_items_context()[self._CAPACITY] def _get_concurrency(self): return self.task_ex.runtime_context.get(self._CONCURRENCY) def is_with_items_completed(self): find_cancelled = lambda x: x.accepted and x.state == states.CANCELLED if list(filter(find_cancelled, self.task_ex.executions)): return True execs = list([t for t in self.task_ex.executions if t.accepted]) count = self._get_with_items_count() or 1 # We need to make sure that method on_action_complete() has been # called for every action. Just looking at number of actions and # their 'accepted' flag is not enough because action gets accepted # before on_action_complete() is called for it. This call is # mandatory in order to do all needed processing from task # perspective. So we can simply check if capacity is fully reset # to its initial state. full_capacity = ( not self._get_concurrency() or self._get_with_items_capacity() == self._get_concurrency() ) return count == len(execs) and full_capacity def _get_final_state(self): find_cancelled = lambda x: x.accepted and x.state == states.CANCELLED find_error = lambda x: x.accepted and x.state == states.ERROR if list(filter(find_cancelled, self.task_ex.executions)): return states.CANCELLED elif list(filter(find_error, self.task_ex.executions)): return states.ERROR else: return states.SUCCESS def _get_accepted_executions(self): # Choose only if not accepted but completed. return list( [x for x in self.task_ex.executions if x.accepted and states.is_completed(x.state)] ) def _get_unaccepted_executions(self): # Choose only if not accepted but completed. return list( filter( lambda x: not x.accepted and states.is_completed(x.state), self.task_ex.executions ) ) def _get_next_start_index(self): f = lambda x: ( x.accepted or states.is_running(x.state) or states.is_idle(x.state) ) return len(list(filter(f, self.task_ex.executions))) def _get_next_indexes(self): capacity = self._get_with_items_capacity() count = self._get_with_items_count() def _get_indexes(exs): return sorted(set([ex.runtime_context['index'] for ex in exs])) accepted = _get_indexes(self._get_accepted_executions()) unaccepted = _get_indexes(self._get_unaccepted_executions()) candidates = sorted(list(set(unaccepted) - set(accepted))) if candidates: indices = copy.copy(candidates) if max(candidates) < count - 1: indices += list(six.moves.range(max(candidates) + 1, count)) else: i = self._get_next_start_index() indices = list(six.moves.range(i, count)) return indices[:capacity] def _increase_capacity(self): ctx = self._get_with_items_context() concurrency = self._get_concurrency() if concurrency and ctx[self._CAPACITY] < concurrency: ctx[self._CAPACITY] += 1 self.task_ex.runtime_context.update({self._WITH_ITEMS: ctx}) def _decrease_capacity(self, count): ctx = self._get_with_items_context() capacity = ctx[self._CAPACITY] if capacity is not None: if capacity >= count: ctx[self._CAPACITY] -= count else: raise RuntimeError( "Can't decrease with-items capacity" " [capacity=%s, count=%s]" % (capacity, count) ) self.task_ex.runtime_context.update({self._WITH_ITEMS: ctx}) def _is_new(self): return not self.task_ex.runtime_context.get(self._WITH_ITEMS) def _prepare_runtime_context(self, action_count): runtime_ctx = self.task_ex.runtime_context if not runtime_ctx.get(self._WITH_ITEMS): # Prepare current indexes and parallel limitation. runtime_ctx[self._WITH_ITEMS] = { self._CAPACITY: self._get_concurrency(), self._COUNT: action_count } def _has_more_iterations(self): # See action executions which have been already # accepted or are still running. action_exs = list(filter( lambda x: x.accepted or x.state == states.RUNNING, self.task_ex.executions )) return self._get_with_items_count() > len(action_exs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/utils.py0000644000175000017500000000574100000000000020554 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral_lib import utils def _compare_parameters(expected_input, actual_input): """Compares the expected parameters with the actual parameters. :param expected_input: Expected dict of parameters. :param actual_input: Actual dict of parameters. :return: Tuple {missing parameter names, unexpected parameter names} """ missing_params = [] unexpected_params = copy.deepcopy(list((actual_input or {}).keys())) for p_name, p_value in expected_input.items(): if p_value is utils.NotDefined and p_name not in unexpected_params: missing_params.append(str(p_name)) if p_name in unexpected_params: unexpected_params.remove(p_name) return missing_params, unexpected_params def validate_input(expected_input, actual_input, obj_name, obj_class): actual_input = actual_input or {} missing, unexpected = _compare_parameters( expected_input, actual_input ) if missing or unexpected: msg = 'Invalid input [name=%s, class=%s' msg_props = [obj_name, obj_class] if missing: msg += ', missing=%s' msg_props.append(missing) if unexpected: msg += ', unexpected=%s' msg_props.append(unexpected) msg += ']' raise exc.InputException(msg % tuple(msg_props)) def resolve_workflow_definition(parent_wf_name, parent_wf_spec_name, namespace, wf_spec_name): wf_def = None if parent_wf_name != parent_wf_spec_name: # If parent workflow belongs to a workbook then # check child workflow within the same workbook # (to be able to use short names within workbooks). # If it doesn't exist then use a name from spec # to find a workflow in DB. wb_name = parent_wf_name.rstrip(parent_wf_spec_name)[:-1] wf_full_name = "%s.%s" % (wb_name, wf_spec_name) wf_def = db_api.load_workflow_definition(wf_full_name, namespace) if not wf_def: wf_def = db_api.load_workflow_definition(wf_spec_name, namespace) if not wf_def: raise exc.WorkflowException( "Failed to find workflow [name=%s] [namespace=%s]" % (wf_spec_name, namespace) ) return wf_def ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/workflow_handler.py0000644000175000017500000002174500000000000022765 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from osprofiler import profiler import traceback as tb from mistral.db.v2 import api as db_api from mistral.engine import post_tx_queue from mistral.engine import workflows from mistral import exceptions as exc from mistral.scheduler import base as sched_base from mistral.workflow import states LOG = logging.getLogger(__name__) CONF = cfg.CONF _CHECK_AND_FIX_INTEGRITY_PATH = ( 'mistral.engine.workflow_handler._check_and_fix_integrity' ) @profiler.trace('workflow-handler-start-workflow', hide_args=True) def start_workflow(wf_identifier, wf_namespace, wf_ex_id, wf_input, desc, params): wf = workflows.Workflow() wf_def = db_api.get_workflow_definition(wf_identifier, wf_namespace) if 'namespace' not in params: params['namespace'] = wf_def.namespace wf.start( wf_def=wf_def, wf_ex_id=wf_ex_id, input_dict=wf_input, desc=desc, params=params ) _schedule_check_and_fix_integrity(wf.wf_ex, delay=10) return wf.wf_ex def stop_workflow(wf_ex, state, msg=None): wf = workflows.Workflow(wf_ex=wf_ex) # In this case we should not try to handle possible errors. Instead, # we need to let them pop up since the typical way of failing objects # doesn't work here. Failing a workflow is the same as stopping it # with ERROR state. wf.stop(state, msg) # Cancels subworkflows. if state == states.CANCELLED: for task_ex in wf_ex.task_executions: sub_wf_exs = db_api.get_workflow_executions( task_execution_id=task_ex.id ) for sub_wf_ex in sub_wf_exs: if not states.is_completed(sub_wf_ex.state): stop_workflow(sub_wf_ex, state, msg=msg) def force_fail_workflow(wf_ex, msg=None): stop_workflow(wf_ex, states.ERROR, msg) def cancel_workflow(wf_ex, msg=None): stop_workflow(wf_ex, states.CANCELLED, msg) @profiler.trace('workflow-handler-check-and-complete', hide_args=True) def check_and_complete(wf_ex_id): wf_ex = db_api.load_workflow_execution(wf_ex_id) if not wf_ex or states.is_completed(wf_ex.state): return wf = workflows.Workflow(wf_ex=wf_ex) try: wf.check_and_complete() except exc.MistralException as e: msg = ( "Failed to check and complete [wf_ex_id=%s, wf_name=%s]:" " %s\n%s" % (wf_ex_id, wf_ex.name, e, tb.format_exc()) ) LOG.error(msg) force_fail_workflow(wf.wf_ex, msg) @post_tx_queue.run @profiler.trace('workflow-handler-check-and-fix-integrity') def _check_and_fix_integrity(wf_ex_id): check_after_seconds = CONF.engine.execution_integrity_check_delay if check_after_seconds < 0: # Never check integrity if it's a negative value. return # To break cyclic dependency. from mistral.engine import task_handler with db_api.transaction(): wf_ex = db_api.load_workflow_execution(wf_ex_id) if not wf_ex: return if states.is_completed(wf_ex.state): return _schedule_check_and_fix_integrity(wf_ex, delay=120) running_task_execs = db_api.get_task_executions( workflow_execution_id=wf_ex.id, state=states.RUNNING, limit=CONF.engine.execution_integrity_check_batch_size ) for t_ex in running_task_execs: # The idea is that we take the latest known timestamp of the task # execution and consider it eligible for checking and fixing only # if some minimum period of time elapsed since the last update. timestamp = t_ex.updated_at or t_ex.created_at delta = timeutils.delta_seconds(timestamp, timeutils.utcnow()) if delta < check_after_seconds: continue child_executions = t_ex.executions if not child_executions: continue all_finished = all( [states.is_completed(c_ex.state) for c_ex in child_executions] ) if all_finished: # Find the timestamp of the most recently finished child. most_recent_child_timestamp = max( [c_ex.updated_at or c_ex.created_at for c_ex in child_executions] ) interval = timeutils.delta_seconds( most_recent_child_timestamp, timeutils.utcnow() ) if interval > check_after_seconds: # We found a task execution in RUNNING state for which all # child executions are finished. We need to call # "schedule_on_action_complete" on the task handler for # any of the child executions so that the task state is # calculated and updated properly. LOG.warning( "Found a task execution that is likely stuck in" " RUNNING state because all child executions are" " finished, will try to recover [task_execution=%s]", t_ex.id ) task_handler.schedule_on_action_complete( child_executions[-1] ) def pause_workflow(wf_ex, msg=None): # Pause subworkflows first. for task_ex in wf_ex.task_executions: sub_wf_exs = db_api.get_workflow_executions( task_execution_id=task_ex.id ) for sub_wf_ex in sub_wf_exs: if not states.is_completed(sub_wf_ex.state): pause_workflow(sub_wf_ex, msg=msg) # If all subworkflows paused successfully, pause the main workflow. # If any subworkflows failed to pause for temporary reason, this # allows pause to be executed again on the main workflow. wf = workflows.Workflow(wf_ex=wf_ex) wf.pause(msg=msg) def rerun_workflow(wf_ex, task_ex, reset=True, env=None): if wf_ex.state == states.PAUSED: return wf_ex.get_clone() wf = workflows.Workflow(wf_ex=wf_ex) wf.rerun(task_ex, reset=reset, env=env) _schedule_check_and_fix_integrity( wf_ex, delay=CONF.engine.execution_integrity_check_delay ) if wf_ex.task_execution_id: _schedule_check_and_fix_integrity( wf_ex.task_execution.workflow_execution, delay=CONF.engine.execution_integrity_check_delay ) def resume_workflow(wf_ex, env=None): if not states.is_paused_or_idle(wf_ex.state): return wf_ex.get_clone() # Resume subworkflows first. for task_ex in wf_ex.task_executions: sub_wf_exs = db_api.get_workflow_executions( task_execution_id=task_ex.id ) for sub_wf_ex in sub_wf_exs: if not states.is_completed(sub_wf_ex.state): resume_workflow(sub_wf_ex) # Resume current workflow here so to trigger continue workflow only # after all other subworkflows are placed back in running state. wf = workflows.Workflow(wf_ex=wf_ex) wf.resume(env=env) @profiler.trace('workflow-handler-set-state', hide_args=True) def set_workflow_state(wf_ex, state, msg=None): if states.is_completed(state): stop_workflow(wf_ex, state, msg) elif states.is_paused(state): pause_workflow(wf_ex, msg) else: raise exc.MistralError( 'Invalid workflow execution state [wf_ex_id=%s, wf_name=%s, ' 'state=%s]' % (wf_ex.id, wf_ex.name, state) ) def _get_integrity_check_key(wf_ex): return 'wfh_c_a_f_i-%s' % wf_ex.id @profiler.trace( 'workflow-handler-schedule-check-and-fix-integrity', hide_args=True ) def _schedule_check_and_fix_integrity(wf_ex, delay=0): """Schedules workflow integrity check. :param wf_ex: Workflow execution. :param delay: Minimum amount of time before the check should be made. """ if CONF.engine.execution_integrity_check_delay < 0: # Never check integrity if it's a negative value. return sched = sched_base.get_system_scheduler() job = sched_base.SchedulerJob( run_after=delay, func_name=_CHECK_AND_FIX_INTEGRITY_PATH, func_args={'wf_ex_id': wf_ex.id}, key=_get_integrity_check_key(wf_ex) ) sched.schedule(job) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/engine/workflows.py0000644000175000017500000005426000000000000021451 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # Copyright 2019 - NetCracker Technology Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import json from oslo_config import cfg from oslo_log import log as logging from osprofiler import profiler import six from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models as db_models from mistral.engine import dispatcher from mistral.engine import post_tx_queue from mistral.engine import utils as engine_utils from mistral import exceptions as exc from mistral import expressions as expr from mistral.lang import parser as spec_parser from mistral.notifiers import base as notif from mistral.notifiers import notification_events as events from mistral.rpc import clients as rpc from mistral.services import triggers from mistral.services import workflows as wf_service from mistral.utils import wf_trace from mistral.workflow import base as wf_base from mistral.workflow import commands from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import actions as ml_actions from mistral_lib import utils LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Workflow(object): """Workflow. Represents a workflow and defines interface that can be used by Mistral engine or its components in order to manipulate with workflows. """ def __init__(self, wf_ex=None): self.wf_ex = wf_ex if wf_ex: # We're processing a workflow that's already in progress. self.wf_spec = spec_parser.get_workflow_spec_by_execution_id( wf_ex.id ) else: self.wf_spec = None def notify(self, event): publishers = self.wf_ex.params.get('notify') if not publishers and not isinstance(publishers, list): return notifier = notif.get_notifier(cfg.CONF.notifier.type) filtered_publishers = [] for publisher in publishers: if not isinstance(publisher, dict): continue target_events = publisher.get('event_types', []) if not target_events or event in target_events: filtered_publishers.append(publisher) if not filtered_publishers: return def _convert_to_notification_data(): return { "id": self.wf_ex.id, "name": self.wf_ex.name, "workflow_name": self.wf_ex.workflow_name, "workflow_namespace": self.wf_ex.workflow_namespace, "workflow_id": self.wf_ex.workflow_id, "state": self.wf_ex.state, "state_info": self.wf_ex.state_info, "project_id": self.wf_ex.project_id, "task_execution_id": self.wf_ex.task_execution_id, "root_execution_id": self.wf_ex.root_execution_id, "created_at": utils.datetime_to_str(self.wf_ex.created_at), "updated_at": utils.datetime_to_str(self.wf_ex.updated_at) } def _send_notification(): notifier.notify( self.wf_ex.id, _convert_to_notification_data(), event, self.wf_ex.updated_at, filtered_publishers ) post_tx_queue.register_operation(_send_notification) @profiler.trace('workflow-start') def start(self, wf_def, wf_ex_id, input_dict, desc='', params=None): """Start workflow. :param wf_def: Workflow definition. :param wf_ex_id: Workflow execution id. :param input_dict: Workflow input. :param desc: Workflow execution description. :param params: Workflow type specific parameters. :raises """ assert not self.wf_ex # New workflow execution. self.wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at ) wf_trace.info( self.wf_ex, 'Starting workflow [name=%s, input=%s]' % (wf_def.name, utils.cut(input_dict)) ) self.validate_input(input_dict) self._create_execution( wf_def, wf_ex_id, self.prepare_input(input_dict), desc, params ) self.set_state(states.RUNNING) # Publish event as soon as state is set to running. self.notify(events.WORKFLOW_LAUNCHED) wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) dispatcher.dispatch_workflow_commands( self.wf_ex, wf_ctrl.continue_workflow() ) def stop(self, state, msg=None): """Stop workflow. :param state: New workflow state. :param msg: Additional explaining message. """ assert self.wf_ex if state == states.SUCCESS: self._succeed_workflow(self._get_final_context(), msg) elif state == states.ERROR: self._fail_workflow(self._get_final_context(), msg) elif state == states.CANCELLED: self._cancel_workflow(msg) def pause(self, msg=None): """Pause workflow. :param msg: Additional explaining message. """ assert self.wf_ex if states.is_paused(self.wf_ex.state): return # Set the state of this workflow to paused. self.set_state(states.PAUSED, state_info=msg) # Publish event. self.notify(events.WORKFLOW_PAUSED) # If workflow execution is a subworkflow, # schedule update to the task execution. if self.wf_ex.task_execution_id: # Import the task_handler module here to avoid circular reference. from mistral.engine import task_handler task_handler.schedule_on_action_update(self.wf_ex) def resume(self, env=None): """Resume workflow. :param env: Environment. """ assert self.wf_ex wf_service.update_workflow_execution_env(self.wf_ex, env) self.set_state(states.RUNNING) # Publish event. self.notify(events.WORKFLOW_RESUMED) wf_ctrl = wf_base.get_controller(self.wf_ex) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow() self._continue_workflow(cmds) # If workflow execution is a subworkflow, # schedule update to the task execution. if self.wf_ex.task_execution_id: # Import the task_handler module here to avoid circular reference. from mistral.engine import task_handler task_handler.schedule_on_action_update(self.wf_ex) def prepare_input(self, input_dict): for k, v in self.wf_spec.get_input().items(): if k not in input_dict or input_dict[k] is utils.NotDefined: input_dict[k] = v return input_dict def validate_input(self, input_dict): engine_utils.validate_input( self.wf_spec.get_input(), input_dict, self.wf_spec.get_name(), self.wf_spec.__class__.__name__ ) def rerun(self, task_ex, reset=True, env=None): """Rerun workflow from the given task. :param task_ex: Task execution that the workflow needs to rerun from. :param reset: If True, reset task state including deleting its action executions. :param env: Environment. """ assert self.wf_ex wf_service.update_workflow_execution_env(self.wf_ex, env) self._recursive_rerun() wf_ctrl = wf_base.get_controller(self.wf_ex) # Calculate commands to process next. cmds = wf_ctrl.rerun_tasks([task_ex], reset=reset) if cmds: # Import the task_handler module here to avoid circular reference. from mistral.engine import policies policies.RetryPolicy.refresh_runtime_context(task_ex) self._continue_workflow(cmds) def _recursive_rerun(self): """Rerun all parent workflow executions recursively. If there is a parent execution then it reruns as well. """ from mistral.engine import workflow_handler self.set_state(states.RUNNING) self.notify(events.WORKFLOW_RERUN) # TODO(rakhmerov): We call an internal method of a module here. # The simplest way is to make it public, however, I believe # it's another "bad smell" that tells that some refactoring # of the architecture is needed. workflow_handler._schedule_check_and_fix_integrity(self.wf_ex) if self.wf_ex.task_execution_id: parent_task_ex = db_api.get_task_execution( self.wf_ex.task_execution_id ) parent_wf = Workflow(wf_ex=parent_task_ex.workflow_execution) parent_wf.lock() parent_wf._recursive_rerun() from mistral.engine import task_handler task_handler.mark_task_running(parent_task_ex, parent_wf.wf_spec) def _get_backlog(self): return self.wf_ex.runtime_context.get(dispatcher.BACKLOG_KEY) def _continue_workflow(self, cmds): # When resuming a workflow we need to ignore all 'pause' # commands because workflow controller takes tasks that # completed within the period when the workflow was paused. cmds = list( [c for c in cmds if not isinstance(c, commands.PauseWorkflow)] ) # Since there's no explicit task causing the operation # we need to mark all not processed tasks as processed # because workflow controller takes only completed tasks # with flag 'processed' equal to False. for t_ex in self.wf_ex.task_executions: if states.is_completed(t_ex.state) and not t_ex.processed: t_ex.processed = True if cmds or self._get_backlog(): dispatcher.dispatch_workflow_commands(self.wf_ex, cmds) else: self.check_and_complete() @profiler.trace('workflow-lock') def lock(self): assert self.wf_ex return db_api.acquire_lock(db_models.WorkflowExecution, self.wf_ex.id) def _get_final_context(self): final_ctx = {} wf_ctrl = wf_base.get_controller(self.wf_ex) try: final_ctx = wf_ctrl.evaluate_workflow_final_context() except Exception as e: LOG.warning( 'Failed to get final context for workflow execution. ' '[wf_ex_id: %s, wf_name: %s, error: %s]', self.wf_ex.id, self.wf_ex.name, str(e) ) return final_ctx def _create_execution(self, wf_def, wf_ex_id, input_dict, desc, params): self.wf_ex = db_api.create_workflow_execution({ 'id': wf_ex_id, 'name': wf_def.name, 'description': desc, 'tags': wf_def.tags, 'workflow_name': wf_def.name, 'workflow_namespace': wf_def.namespace, 'workflow_id': wf_def.id, 'spec': self.wf_spec.to_dict(), 'state': states.IDLE, 'output': {}, 'task_execution_id': params.get('task_execution_id'), 'root_execution_id': params.get('root_execution_id'), 'runtime_context': { 'index': params.get('index', 0) }, }) self.wf_ex.input = input_dict or {} params['env'] = _get_environment(params) self.wf_ex.params = params data_flow.add_openstack_data_to_context(self.wf_ex) data_flow.add_execution_to_context(self.wf_ex) data_flow.add_workflow_variables_to_context(self.wf_ex, self.wf_spec) spec_parser.cache_workflow_spec_by_execution_id( self.wf_ex.id, self.wf_spec ) @profiler.trace('workflow-set-state') def set_state(self, state, state_info=None): assert self.wf_ex cur_state = self.wf_ex.state if states.is_valid_transition(cur_state, state): wf_ex = db_api.update_workflow_execution_state( id=self.wf_ex.id, cur_state=cur_state, state=state ) if wf_ex is None: # Do nothing because the state was updated previously. return False self.wf_ex = wf_ex self.wf_ex.state_info = json.dumps(state_info) \ if isinstance(state_info, dict) else state_info wf_trace.info( self.wf_ex, "Workflow '%s' [%s -> %s, msg=%s]" % (self.wf_ex.workflow_name, cur_state, state, self.wf_ex.state_info) ) else: msg = ("Can't change workflow execution state from %s to %s. " "[workflow=%s, execution_id=%s]" % (cur_state, state, self.wf_ex.name, self.wf_ex.id)) raise exc.WorkflowException(msg) # Workflow result should be accepted by parent workflows (if any) # only if it completed successfully or failed. self.wf_ex.accepted = states.is_completed(state) if states.is_completed(state): triggers.on_workflow_complete(self.wf_ex) return True @profiler.trace('workflow-check-and-complete') def check_and_complete(self): """Completes the workflow if it needs to be completed. The method simply checks if there are any tasks that are not in a terminal state. If there aren't any then it performs all necessary logic to finalize the workflow (calculate output etc.). :return: Number of incomplete tasks. """ if states.is_paused_or_completed(self.wf_ex.state): return 0 # Workflow is not completed if there are any incomplete task # executions. incomplete_tasks_count = db_api.get_incomplete_task_executions_count( workflow_execution_id=self.wf_ex.id, ) if incomplete_tasks_count > 0: return incomplete_tasks_count LOG.debug("Workflow completed [id=%s]", self.wf_ex.id) # NOTE(rakhmerov): Once we know that the workflow has completed, # we need to expire all the objects in the DB session to make sure # to read the most relevant data from the DB (that's already been # committed in parallel transactions). Otherwise, some data like # workflow context may be stale and decisions made upon it will be # wrong. db_api.expire_all() wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) if wf_ctrl.any_cancels(): msg = _build_cancel_info_message(wf_ctrl, self.wf_ex) self._cancel_workflow(msg) elif wf_ctrl.all_errors_handled(): ctx = wf_ctrl.evaluate_workflow_final_context() self._succeed_workflow(ctx) else: msg = _build_fail_info_message(wf_ctrl, self.wf_ex) final_context = wf_ctrl.evaluate_workflow_final_context() self._fail_workflow(final_context, msg) return 0 def _succeed_workflow(self, final_context, msg=None): output = data_flow.evaluate_workflow_output( self.wf_ex, self.wf_spec.get_output(), final_context ) # Set workflow execution to success after output is evaluated. if not self.set_state(states.SUCCESS, msg): return self.wf_ex.output = output # Publish event. self.notify(events.WORKFLOW_SUCCEEDED) if self.wf_ex.task_execution_id: self._send_result_to_parent_workflow() def _fail_workflow(self, final_context, msg): if states.is_paused_or_completed(self.wf_ex.state): return output_on_error = {} try: output_on_error = data_flow.evaluate_workflow_output( self.wf_ex, self.wf_spec.get_output_on_error(), final_context ) except exc.MistralException as e: msg = ( "Failed to evaluate expression in output-on-error! " "(output-on-error: '%s', exception: '%s' Cause: '%s'" % (self.wf_spec.get_output_on_error(), e, msg) ) LOG.error(msg) if not self.set_state(states.ERROR, state_info=msg): return # When we set an ERROR state we should safely set output value getting # w/o exceptions due to field size limitations. length_output_on_error = len(str(output_on_error).encode("utf-8")) total_output_length = utils.get_number_of_chars_from_kilobytes( cfg.CONF.engine.execution_field_size_limit_kb) if length_output_on_error < total_output_length: msg = utils.cut_by_char( msg, total_output_length - length_output_on_error ) else: msg = utils.cut_by_kb( msg, cfg.CONF.engine.execution_field_size_limit_kb ) self.wf_ex.output = utils.merge_dicts({'result': msg}, output_on_error) # Publish event. self.notify(events.WORKFLOW_FAILED) if self.wf_ex.task_execution_id: self._send_result_to_parent_workflow() def _cancel_workflow(self, msg): if states.is_completed(self.wf_ex.state): return if not self.set_state(states.CANCELLED, state_info=msg): return # When we set an ERROR state we should safely set output value getting # w/o exceptions due to field size limitations. msg = utils.cut_by_kb( msg, cfg.CONF.engine.execution_field_size_limit_kb ) self.wf_ex.output = {'result': msg} # Publish event. self.notify(events.WORKFLOW_CANCELLED) if self.wf_ex.task_execution_id: self._send_result_to_parent_workflow() def _send_result_to_parent_workflow(self): if self.wf_ex.state == states.SUCCESS: # The result of the sub workflow is already saved # so there's no need to send it over RPC. result = None elif self.wf_ex.state == states.ERROR: err_msg = ( self.wf_ex.state_info or 'Failed subworkflow [execution_id=%s]' % self.wf_ex.id ) result = ml_actions.Result(error=err_msg) elif self.wf_ex.state == states.CANCELLED: err_msg = ( self.wf_ex.state_info or 'Cancelled subworkflow [execution_id=%s]' % self.wf_ex.id ) result = ml_actions.Result(error=err_msg, cancel=True) else: raise RuntimeError( "Method _send_result_to_parent_workflow() must never be called" " if a workflow is not in SUCCESS, ERROR or CANCELLED state." ) # Register a command executed in a separate thread to send the result # to the parent workflow outside of the main DB transaction. def _send_result(): rpc.get_engine_client().on_action_complete( self.wf_ex.id, result, wf_action=True ) post_tx_queue.register_operation(_send_result) def _get_environment(params): env = params.get('env', {}) if not env: return {} if isinstance(env, dict): env_dict = env elif isinstance(env, six.string_types): env_db = db_api.load_environment(env) if not env_db: raise exc.InputException( 'Environment is not found: %s' % env ) env_dict = env_db.variables else: raise exc.InputException( 'Unexpected value type for environment [env=%s, type=%s]' % (env, type(env)) ) if ('evaluate_env' in params and not params['evaluate_env']): return env_dict else: return expr.evaluate_recursively(env_dict, {'__env': env_dict}) def _build_fail_info_message(wf_ctrl, wf_ex): # Try to find where error is exactly. failed_tasks = [ t_ex for t_ex in db_api.get_task_executions( workflow_execution_id=wf_ex.id, state=states.ERROR, sort_keys=['name'] ) if not wf_ctrl.is_error_handled_for(t_ex) ] msg = ('Failure caused by error in tasks: %s\n' % ', '.join([t.name for t in failed_tasks])) for t in failed_tasks: msg += '\n %s [task_ex_id=%s] -> %s\n' % (t.name, t.id, t.state_info) for i, ex in enumerate(t.action_executions): if ex.state == states.ERROR: output = (ex.output or dict()).get('result', 'Unknown') msg += ( ' [action_ex_id=%s, idx=%s]: %s\n' % ( ex.id, i, str(output) ) ) for i, ex in enumerate(t.workflow_executions): if ex.state == states.ERROR: output = (ex.output or dict()).get('result', 'Unknown') msg += ( ' [wf_ex_id=%s, idx=%s]: %s\n' % ( ex.id, i, str(output) ) ) return msg def _build_cancel_info_message(wf_ctrl, wf_ex): # Try to find where cancel is exactly. cancelled_tasks = [ t_ex for t_ex in db_api.get_task_executions( workflow_execution_id=wf_ex.id, state=states.CANCELLED, sort_keys=['name'] ) ] return ( 'Cancelled tasks: %s' % ', '.join([t.name for t in cancelled_tasks]) ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1175673 mistral-10.0.0.0b3/mistral/event_engine/0000755000175000017500000000000000000000000020234 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/event_engine/__init__.py0000644000175000017500000000000000000000000022333 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/event_engine/base.py0000644000175000017500000000214300000000000021520 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class EventEngine(object): """Action event trigger interface.""" @abc.abstractmethod def create_event_trigger(self, trigger, events): raise NotImplementedError() @abc.abstractmethod def update_event_trigger(self, trigger): raise NotImplementedError() @abc.abstractmethod def delete_event_trigger(self, trigger, events): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/event_engine/default_event_engine.py0000644000175000017500000003357100000000000024771 0ustar00coreycorey00000000000000# Copyright 2016 Catalyst IT Ltd # Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict import json import os import threading from oslo_config import cfg from oslo_log import log as logging from oslo_service import threadgroup from oslo_utils import fnmatch import six from mistral import context as auth_ctx from mistral.db.v2 import api as db_api from mistral.event_engine import base from mistral import exceptions from mistral import expressions from mistral import messaging as mistral_messaging from mistral.rpc import clients as rpc from mistral.services import security from mistral.utils import safe_yaml LOG = logging.getLogger(__name__) CONF = cfg.CONF DEFAULT_PROPERTIES = { 'service': '<% $.publisher %>', 'project_id': '<% $.context.project_id %>', 'user_id': '<% $.context.user_id %>', 'timestamp': '<% $.timestamp %>' } class EventDefinition(object): def __init__(self, definition_cfg): self.cfg = definition_cfg try: self.event_types = self.cfg['event_types'] self.properties = self.cfg['properties'] except KeyError as err: raise exceptions.MistralException( "Required field %s not specified" % err.args[0] ) if isinstance(self.event_types, six.string_types): self.event_types = [self.event_types] def match_type(self, event_type): for t in self.event_types: if fnmatch.fnmatch(event_type, t): return True return False def convert(self, event): return expressions.evaluate_recursively(self.properties, event) class NotificationsConverter(object): def __init__(self): config_file = CONF.event_engine.event_definitions_cfg_file definition_cfg = [] if os.path.exists(config_file): with open(config_file) as cf: config = cf.read() try: definition_cfg = safe_yaml.load(config) except safe_yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = ( "Invalid YAML syntax in Definitions file " "%(file)s at line: %(line)s, column: %(column)s." % dict(file=config_file, line=mark.line + 1, column=mark.column + 1) ) else: errmsg = ( "YAML error reading Definitions file %s" % CONF.event_engine.event_definitions_cfg_file ) LOG.error(errmsg) raise exceptions.MistralError( 'Invalid event definition configuration file. %s' % config_file ) self.definitions = [EventDefinition(event_def) for event_def in reversed(definition_cfg)] def get_event_definition(self, event_type): for d in self.definitions: if d.match_type(event_type): return d return None def convert(self, event_type, event): edef = self.get_event_definition(event_type) if edef is None: LOG.debug('No event definition found for type: %s, use default ' 'settings instead.', event_type) return expressions.evaluate_recursively(DEFAULT_PROPERTIES, event) return edef.convert(event) class DefaultEventEngine(base.EventEngine): """Event engine server. A separate service that is responsible for listening event notification and triggering workflows defined by end user. """ def __init__(self): self.engine_client = rpc.get_engine_client() self.event_queue = six.moves.queue.Queue() self.handler_tg = threadgroup.ThreadGroup() self.event_triggers_map = defaultdict(list) self.exchange_topic_events_map = defaultdict(set) self.exchange_topic_listener_map = {} self.lock = threading.Lock() LOG.debug('Loading notification definitions.') self.notification_converter = NotificationsConverter() self._start_handler() self._start_listeners() def _get_endpoint_cls(self, events): """Create a messaging endpoint class. The endpoint implements the method named like the priority, and only handle the notification match the NotificationFilter rule set into the filter_rule attribute of the endpoint. """ # Handle each priority of notification messages. event_priorities = ['audit', 'critical', 'debug', 'error', 'info'] attrs = dict.fromkeys( event_priorities, mistral_messaging.handle_event ) attrs['event_types'] = events endpoint_cls = type( 'MistralNotificationEndpoint', (mistral_messaging.NotificationEndpoint,), attrs, ) return endpoint_cls def _add_event_listener(self, exchange, topic, events): """Add or update event listener for specified exchange, topic. Create a new event listener for the event trigger if no existing listener relates to (exchange, topic). Or, restart existing event listener with updated events. """ key = (exchange, topic) if key in self.exchange_topic_listener_map: listener = self.exchange_topic_listener_map[key] listener.stop() listener.wait() endpoint = self._get_endpoint_cls(events)(self) LOG.debug("Starting to listen to AMQP. exchange: %s, topic: %s", exchange, topic) listener = mistral_messaging.start_listener( CONF, exchange, topic, [endpoint] ) self.exchange_topic_listener_map[key] = listener def stop_all_listeners(self): for listener in six.itervalues(self.exchange_topic_listener_map): listener.stop() listener.wait() def _start_listeners(self): triggers = db_api.get_event_triggers(insecure=True) LOG.info('Found %s event triggers.', len(triggers)) for trigger in triggers: exchange_topic = (trigger.exchange, trigger.topic) self.exchange_topic_events_map[exchange_topic].add(trigger.event) trigger_info = trigger.to_dict() trigger_info['workflow_namespace'] = trigger.workflow.namespace self.event_triggers_map[trigger.event].append(trigger_info) for (ex_t, events) in self.exchange_topic_events_map.items(): exchange, topic = ex_t self._add_event_listener(exchange, topic, events) def _start_workflow(self, triggers, event_params): """Start workflows defined in event triggers.""" for t in triggers: LOG.info('Start to process event trigger: %s', t['id']) workflow_params = t.get('workflow_params', {}) workflow_params.update({'event_params': event_params}) # Setup context before schedule triggers. ctx = security.create_context(t['trust_id'], t['project_id']) auth_ctx.set_ctx(ctx) description = { "description": ( "Workflow execution created by event" " trigger '(%s)'." % t['id'] ), "triggered_by": { "type": "event_trigger", "id": t['id'], "name": t['name'] } } try: self.engine_client.start_workflow( t['workflow_id'], t['workflow_namespace'], None, t['workflow_input'], description=json.dumps(description), **workflow_params ) except Exception as e: LOG.exception("Failed to process event trigger %s, " "error: %s", t['id'], str(e)) finally: auth_ctx.set_ctx(None) def _process_event_queue(self, *args, **kwargs): """Process notification events. This function is called in a thread. """ while True: event = self.event_queue.get() context = event.get('context') event_type = event.get('event_type') # NOTE(kong): Use lock here to protect event_triggers_map variable # from being updated outside the thread. with self.lock: if event_type in self.event_triggers_map: triggers = self.event_triggers_map[event_type] # There may be more projects registered the same event. project_ids = [t['project_id'] for t in triggers] any_public = any( [t['scope'] == 'public' for t in triggers] ) # Skip the event doesn't belong to any event trigger owner. if (not any_public and CONF.pecan.auth_enable and context.get('project_id', '') not in project_ids): self.event_queue.task_done() continue # Need to choose what trigger(s) should be called exactly. triggers_to_call = [] for t in triggers: project_trigger = ( t['project_id'] == context.get('project_id') ) public_trigger = t['scope'] == 'public' if project_trigger or public_trigger: triggers_to_call.append(t) LOG.debug('Start to handle event: %s, %d trigger(s) ' 'registered.', event_type, len(triggers)) event_params = self.notification_converter.convert( event_type, event ) self._start_workflow(triggers_to_call, event_params) self.event_queue.task_done() def _start_handler(self): """Starts event queue handler in a thread group.""" LOG.info('Starting event notification task...') self.handler_tg.add_thread(self._process_event_queue) def process_notification_event(self, notification): """Callback function by event handler. Just put notification into a queue. """ LOG.debug("Putting notification event to event queue.") self.event_queue.put(notification) def create_event_trigger(self, trigger, events): """An endpoint method for creating event trigger. When creating an event trigger in API layer, we need to create a new listener or update an existing listener. :param trigger: a dict containing event trigger information. :param events: a list of events binding to the (exchange, topic) of the event trigger. """ with self.lock: ids = [t['id'] for t in self.event_triggers_map[trigger['event']]] if trigger['id'] not in ids: self.event_triggers_map[trigger['event']].append(trigger) self._add_event_listener(trigger['exchange'], trigger['topic'], events) def update_event_trigger(self, trigger): """An endpoint method for updating event trigger. Because only workflow related information is allowed to be updated, we only need to update event_triggers_map(in a synchronous way). :param trigger: a dict containing event trigger information. """ assert trigger['event'] in self.event_triggers_map with self.lock: for t in self.event_triggers_map[trigger['event']]: if trigger['id'] == t['id']: t.update(trigger) def delete_event_trigger(self, trigger, events): """An endpoint method for deleting event trigger. If there is no event binding to (exchange, topic) after deletion, we need to delete the related listener. Otherwise, we need to restart that listener. :param trigger: a dict containing event trigger information. :param events: a list of events binding to the (exchange, topic) of the event trigger. """ assert trigger['event'] in self.event_triggers_map with self.lock: for t in self.event_triggers_map[trigger['event']]: if t['id'] == trigger['id']: self.event_triggers_map[trigger['event']].remove(t) break if not self.event_triggers_map[trigger['event']]: del self.event_triggers_map[trigger['event']] if not events: key = (trigger['exchange'], trigger['topic']) listener = self.exchange_topic_listener_map[key] listener.stop() listener.wait() del self.exchange_topic_listener_map[key] LOG.info( 'Deleted listener for exchange: %s, topic: %s', trigger['exchange'], trigger['topic'] ) return self._add_event_listener(trigger['exchange'], trigger['topic'], events) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/event_engine/event_engine_server.py0000644000175000017500000000572700000000000024655 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks # Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from mistral import config as cfg from mistral.event_engine import default_event_engine as evt_eng from mistral.rpc import base as rpc from mistral.service import base as service_base from mistral.utils import profiler as profiler_utils LOG = logging.getLogger(__name__) class EventEngineServer(service_base.MistralService): """RPC EventEngine server. This class manages event engine life-cycle and gets registered as an RPC endpoint to process event engine specific calls. It also registers a cluster member associated with this instance of event engine. """ def __init__(self, event_engine): super(EventEngineServer, self).__init__('event-engine_group') self._event_engine = event_engine self._rpc_server = None def start(self): super(EventEngineServer, self).start() profiler_utils.setup( 'mistral-event-engine', cfg.CONF.event_engine.host ) # Initialize and start RPC server. self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.event_engine) self._rpc_server.register_endpoint(self) self._rpc_server.run() self._notify_started('Event engine server started.') def stop(self, graceful=False): super(EventEngineServer, self).stop(graceful) if self._rpc_server: self._rpc_server.stop(graceful) def create_event_trigger(self, rpc_ctx, trigger, events): LOG.info( "Received RPC request 'create_event_trigger'[rpc_ctx=%s," " trigger=%s, events=%s", rpc_ctx, trigger, events ) return self._event_engine.create_event_trigger(trigger, events) def delete_event_trigger(self, rpc_ctx, trigger, events): LOG.info( "Received RPC request 'delete_event_trigger'[rpc_ctx=%s," " trigger=%s, events=%s", rpc_ctx, trigger, events ) return self._event_engine.delete_event_trigger(trigger, events) def update_event_trigger(self, rpc_ctx, trigger): LOG.info( "Received RPC request 'update_event_trigger'[rpc_ctx=%s," " trigger=%s", rpc_ctx, trigger ) return self._event_engine.update_event_trigger(trigger) def get_oslo_service(): return EventEngineServer(evt_eng.DefaultEventEngine()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/exceptions.py0000644000175000017500000001222500000000000020323 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class MistralFailuresBase(Exception): """Base class for mistral errors and exceptions""" message = "An unknow failure occured" http_code = 500 def __init__(self, message=None): if message is not None: self.message = message super(MistralFailuresBase, self).__init__( '%d: %s' % (self.http_code, self.message)) @property def code(self): """This is here for webob to read. https://github.com/Pylons/webob/blob/master/webob/exc.py """ return self.http_code def __str__(self): return self.message class MistralError(MistralFailuresBase): """Mistral specific error. Reserved for situations that can't be automatically handled. When it occurs it signals that there is a major environmental problem like invalid startup configuration or implementation problem (e.g. some code doesn't take care of certain corner cases). From architectural perspective it's pointless to try to handle this type of problems except doing some finalization work like transaction rollback, deleting temporary files etc. """ message = "An unknown error occurred" class MistralException(MistralFailuresBase): """Mistral specific exception. Reserved for situations that are not critical for program continuation. It is possible to recover from this type of problems automatically and continue program execution. Such problems may be related with invalid user input (such as invalid syntax) or temporary environmental problems. In case if an instance of a certain exception type bubbles up to API layer then this type of exception it must be associated with an http code so it's clear how to represent it for a client. To correctly use this class, inherit from it and define a 'message' and 'http_code' properties. """ message = "An unknown exception occurred" # Database errors. class DBError(MistralError): http_code = 400 class DBDuplicateEntryError(DBError): http_code = 409 message = "Database object already exists" class DBEntityNotFoundError(DBError): http_code = 404 message = "Object not found" # DSL exceptions. class DSLParsingException(MistralException): http_code = 400 class ExpressionGrammarException(DSLParsingException): http_code = 400 class JinjaGrammarException(ExpressionGrammarException): message = "Invalid grammar of Jinja expression" class YaqlGrammarException(ExpressionGrammarException): message = "Invalid grammar of YAQL expression" class InvalidModelException(DSLParsingException): http_code = 400 message = "Wrong entity definition" # Various common exceptions and errors. class EvaluationException(MistralException): http_code = 400 class JinjaEvaluationException(EvaluationException): message = "Can not evaluate Jinja expression" class YaqlEvaluationException(EvaluationException): message = "Can not evaluate YAQL expression" class DataAccessException(MistralException): http_code = 400 class ActionException(MistralException): http_code = 400 class InvalidActionException(MistralException): http_code = 400 class ActionRegistrationException(MistralException): message = "Failed to register action" class EngineException(MistralException): http_code = 500 class WorkflowException(MistralException): http_code = 400 class EventTriggerException(MistralException): http_code = 400 class InputException(MistralException): http_code = 400 class ApplicationContextNotFoundException(MistralException): http_code = 400 message = "Application context not found" class InvalidResultException(MistralException): http_code = 400 message = "Unable to parse result" class SizeLimitExceededException(MistralException): http_code = 400 class CoordinationException(MistralException): http_code = 500 class CoordinationNotSupportedException(MistralException): http_code = 406 class NotAllowedException(MistralException): http_code = 403 message = "Operation forbidden (insufficient permissions)" class UnauthorizedException(MistralException): http_code = 401 message = "Unauthorized" class KombuException(Exception): def __init__(self, e): super(KombuException, self).__init__(e) self.exc_type = e.__class__.__name__ self.value = str(e) class InvalidStateTransitionException(MistralException): http_code = 400 message = 'Invalid state transition' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1175673 mistral-10.0.0.0b3/mistral/executors/0000755000175000017500000000000000000000000017607 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/executors/__init__.py0000644000175000017500000000000000000000000021706 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/executors/base.py0000644000175000017500000000443100000000000021075 0ustar00coreycorey00000000000000# Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from stevedore import driver _EXECUTORS = {} def cleanup(): global _EXECUTORS _EXECUTORS = {} def get_executor(exec_type): global _EXECUTORS if not _EXECUTORS.get(exec_type): mgr = driver.DriverManager( 'mistral.executors', exec_type, invoke_on_load=True ) _EXECUTORS[exec_type] = mgr.driver return _EXECUTORS[exec_type] @six.add_metaclass(abc.ABCMeta) class Executor(object): """Action executor interface.""" @abc.abstractmethod def run_action(self, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, redelivered=False, target=None, async_=True, timeout=None): """Runs action. :param timeout: a period of time in seconds after which execution of action will be interrupted :param action_ex_id: Corresponding action execution id. :param action_cls_str: Path to action class in dot notation. :param action_cls_attrs: Attributes of action class which will be set to. :param params: Action parameters. :param safe_rerun: Tells if given action can be safely rerun. :param execution_context: A dict of values providing information about the current execution. :param redelivered: Tells if given action was run before on another executor. :param target: Target (group of action executors). :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :return: Action result. """ raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/executors/default_executor.py0000644000175000017500000001636200000000000023533 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import timeout as ev_timeout from mistral_lib import actions as mistral_lib from oslo_log import log as logging from osprofiler import profiler from mistral.actions import action_factory as a_f from mistral import context from mistral import exceptions as exc from mistral.executors import base from mistral.rpc import clients as rpc from mistral.services import action_heartbeat_sender from mistral_lib.utils import inspect_utils as i_u LOG = logging.getLogger(__name__) class DefaultExecutor(base.Executor): def __init__(self): self._engine_client = rpc.get_engine_client() @profiler.trace('default-executor-run-action', hide_args=True) def run_action(self, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, redelivered=False, target=None, async_=True, timeout=None): """Runs action. :param action_ex_id: Action execution id. :param action_cls_str: Path to action class in dot notation. :param action_cls_attrs: Attributes of action class which will be set to. :param params: Action parameters. :param safe_rerun: Tells if given action can be safely rerun. :param execution_context: A dict of values providing information about the current execution. :param redelivered: Tells if given action was run before on another executor. :param target: Target (group of action executors). :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :param timeout: a period of time in seconds after which execution of action will be interrupted :return: Action result. """ try: action_heartbeat_sender.add_action(action_ex_id) return self._do_run_action( action_cls_attrs, action_cls_str, action_ex_id, execution_context, params, redelivered, safe_rerun, timeout ) finally: action_heartbeat_sender.remove_action(action_ex_id) def _do_run_action(self, action_cls_attrs, action_cls_str, action_ex_id, execution_context, params, redelivered, safe_rerun, timeout): def send_error_back(error_msg): error_result = mistral_lib.Result(error=error_msg) if action_ex_id: self._engine_client.on_action_complete( action_ex_id, error_result ) return None return error_result if redelivered and not safe_rerun: msg = ( "Request to run action %s was redelivered, but action %s " "cannot be re-run safely. The only safe thing to do is fail " "action." % (action_cls_str, action_cls_str) ) return send_error_back(msg) # Load action module. action_cls = a_f.construct_action_class( action_cls_str, action_cls_attrs ) # Instantiate action. try: action = action_cls(**params) except Exception as e: msg = ( "Failed to initialize action %s. Action init params = %s. " "Actual init params = %s. More info: %s" % ( action_cls_str, i_u.get_arg_list(action_cls.__init__), params.keys(), e ) ) LOG.warning(msg) return send_error_back(msg) # Run action. try: with ev_timeout.Timeout(seconds=timeout): # NOTE(d0ugal): If the action is a subclass of mistral-lib we # know that it expects to be passed the context. if isinstance(action, mistral_lib.Action): action_ctx = context.create_action_context( execution_context) result = action.run(action_ctx) else: result = action.run() # Note: it's made for backwards compatibility with already # existing Mistral actions which don't return result as # instance of workflow.utils.Result. if not isinstance(result, mistral_lib.Result): result = mistral_lib.Result(data=result) except BaseException as e: msg = ( "The action raised an exception [action_ex_id=%s, msg='%s', " "action_cls='%s', attributes='%s', params='%s']" % ( action_ex_id, e, action_cls, action_cls_attrs, params ) ) LOG.warning(msg, exc_info=True) return send_error_back(msg) # Send action result. try: if action_ex_id and (action.is_sync() or result.is_error()): self._engine_client.on_action_complete( action_ex_id, result, async_=True ) except exc.MistralException as e: # In case of a Mistral exception we can try to send error info to # engine because most likely it's not related to the infrastructure # such as message bus or network. One known case is when the action # returns a bad result (e.g. invalid unicode) which can't be # serialized. msg = ( "Failed to complete action due to a Mistral exception " "[action_ex_id=%s, action_cls='%s', " "attributes='%s', params='%s']\n %s" % ( action_ex_id, action_cls, action_cls_attrs, params, e ) ) LOG.exception(msg) return send_error_back(msg) except Exception as e: # If it's not a Mistral exception all we can do is only # log the error. msg = ( "Failed to complete action due to an unexpected exception " "[action_ex_id=%s, action_cls='%s', " "attributes='%s', params='%s']\n %s" % ( action_ex_id, action_cls, action_cls_attrs, params, e ) ) LOG.exception(msg) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/executors/executor_server.py0000644000175000017500000000766100000000000023417 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from mistral import config as cfg from mistral.executors import default_executor as exe from mistral.rpc import base as rpc from mistral.service import base as service_base from mistral.services import action_heartbeat_sender from mistral.utils import profiler as profiler_utils from mistral_lib import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class ExecutorServer(service_base.MistralService): """Executor server. This class manages executor life-cycle and gets registered as an RPC endpoint to process executor specific calls. It also registers a cluster member associated with this instance of executor. """ def __init__(self, executor, setup_profiler=True): super(ExecutorServer, self).__init__('executor_group', setup_profiler) self.executor = executor self._rpc_server = None def start(self): super(ExecutorServer, self).start() action_heartbeat_sender.start() if self._setup_profiler: profiler_utils.setup('mistral-executor', cfg.CONF.executor.host) # Initialize and start RPC server. self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.executor) self._rpc_server.register_endpoint(self) self._rpc_server.run(executor='threading') self._notify_started('Executor server started.') def stop(self, graceful=False): super(ExecutorServer, self).stop(graceful) action_heartbeat_sender.stop() if self._rpc_server: self._rpc_server.stop(graceful) def run_action(self, rpc_ctx, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, timeout): """Receives calls over RPC to run action on executor. :param timeout: a period of time in seconds after which execution of action will be interrupted :param execution_context: A dict of values providing information about the current execution. :param rpc_ctx: RPC request context dictionary. :param action_ex_id: Action execution id. :param action_cls_str: Action class name. :param action_cls_attrs: Action class attributes. :param params: Action input parameters. :param safe_rerun: Tells if given action can be safely rerun. :return: Action result. """ LOG.debug( "Received RPC request 'run_action'[action_ex_id=%s, " "action_cls_str=%s, action_cls_attrs=%s, params=%s, " "timeout=%s]", action_ex_id, action_cls_str, action_cls_attrs, utils.cut(params), timeout ) redelivered = rpc_ctx.redelivered or False res = self.executor.run_action( action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, redelivered, timeout=timeout ) LOG.debug( "Sending action result to engine" " [action_ex_id=%s, action_cls=%s]", action_ex_id, action_cls_str ) return res def get_oslo_service(setup_profiler=True): return ExecutorServer( exe.DefaultExecutor(), setup_profiler=setup_profiler ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/executors/remote_executor.py0000644000175000017500000000173600000000000023401 0ustar00coreycorey00000000000000# Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from mistral.rpc import clients as rpc_clients LOG = logging.getLogger(__name__) class RemoteExecutor(rpc_clients.ExecutorClient): """Executor that passes execution request to a remote executor.""" def __init__(self): super(RemoteExecutor, self).__init__(cfg.CONF.executor) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1175673 mistral-10.0.0.0b3/mistral/expressions/0000755000175000017500000000000000000000000020150 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/expressions/__init__.py0000644000175000017500000000621100000000000022261 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from oslo_log import log as logging import six from stevedore import extension from mistral import exceptions as exc LOG = logging.getLogger(__name__) _mgr = extension.ExtensionManager( namespace='mistral.expression.evaluators', invoke_on_load=False ) _evaluators = [] patterns = {} for name in sorted(_mgr.names()): evaluator = _mgr[name].plugin _evaluators.append((name, evaluator)) patterns[name] = evaluator.find_expression_pattern.pattern def validate(expression): LOG.debug("Validating expression [expression='%s']", expression) if not isinstance(expression, six.string_types): return expression_found = None for name, evaluator in _evaluators: if evaluator.is_expression(expression): if expression_found: raise exc.ExpressionGrammarException( "The line already contains an expression of type '%s'. " "Mixing expression types in a single line is not allowed." % expression_found) try: evaluator.validate(expression) except Exception: raise else: expression_found = name def evaluate(expression, context): for name, evaluator in _evaluators: # Check if the passed value is expression so we don't need to do this # every time on a caller side. if (isinstance(expression, six.string_types) and evaluator.is_expression(expression)): return evaluator.evaluate(expression, context) return expression def _evaluate_item(item, context): if isinstance(item, six.string_types): try: return evaluate(item, context) except AttributeError as e: LOG.debug( "Expression %s is not evaluated, [context=%s]: %s", item, context, e ) return item else: return evaluate_recursively(item, context) def evaluate_recursively(data, context): data = copy.deepcopy(data) if not context: return data if isinstance(data, dict): for key in data: data[key] = _evaluate_item(data[key], context) elif isinstance(data, list): for index, item in enumerate(data): data[index] = _evaluate_item(item, context) elif isinstance(data, six.string_types): return _evaluate_item(data, context) return data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/expressions/base.py0000644000175000017500000000413400000000000021436 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from stevedore import extension class Evaluator(object): """Expression evaluator interface. Having this interface gives the flexibility to change the actual expression language used in Mistral DSL for conditions, output calculation etc. """ @classmethod @abc.abstractmethod def validate(cls, expression): """Parse and validates the expression. :param expression: Expression string :return: True if expression is valid """ pass @classmethod @abc.abstractmethod def evaluate(cls, expression, context): """Evaluates the expression against the given data context. :param expression: Expression string :param context: Data context :return: Expression result """ pass @classmethod @abc.abstractmethod def is_expression(cls, expression): """Check expression string and decide whether it is expression or not. :param expression: Expression string :return: True if string is expression """ pass def get_custom_functions(): """Get custom functions. Retrieves the list of custom functions used in YAQL/Jinja expressions. """ # {name => function object). result = dict() mgr = extension.ExtensionManager( namespace='mistral.expression.functions', invoke_on_load=False ) for name in mgr.names(): result[name] = mgr[name].plugin return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/expressions/jinja_expression.py0000644000175000017500000001324500000000000024101 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import re import jinja2 from jinja2 import parser as jinja_parse from jinja2.sandbox import SandboxedEnvironment from oslo_db import exception as db_exc from oslo_log import log as logging import six from mistral import exceptions as exc from mistral.expressions import base LOG = logging.getLogger(__name__) ANY_JINJA_REGEXP = "{{.*}}|{%.*%}" JINJA_REGEXP = '({{(.*?)}})' JINJA_BLOCK_REGEXP = '({%(.*?)%})' JINJA_OPTS = {'undefined_to_none': False} _environment = SandboxedEnvironment( undefined=jinja2.StrictUndefined, trim_blocks=True, lstrip_blocks=True ) _filters = base.get_custom_functions() for name in _filters: _environment.filters[name] = _filters[name] def get_jinja_context(data_context): new_ctx = {'_': data_context} _register_jinja_functions(new_ctx) if isinstance(data_context, dict): new_ctx['__env'] = data_context.get('__env') new_ctx['__execution'] = data_context.get('__execution') new_ctx['__task_execution'] = data_context.get('__task_execution') return new_ctx def _register_jinja_functions(jinja_ctx): functions = base.get_custom_functions() for name in functions: jinja_ctx[name] = partial(functions[name], jinja_ctx['_']) class JinjaEvaluator(base.Evaluator): _env = _environment.overlay() @classmethod def validate(cls, expression): if not isinstance(expression, six.string_types): raise exc.JinjaEvaluationException( "Unsupported type '%s'." % type(expression) ) try: parser = jinja_parse.Parser(cls._env, expression, state='variable') parser.parse_expression() except jinja2.exceptions.TemplateError as e: raise exc.JinjaGrammarException("Syntax error '%s'." % str(e)) @classmethod def evaluate(cls, expression, data_context): ctx = get_jinja_context(data_context) result = cls._env.compile_expression(expression, **JINJA_OPTS)(**ctx) # For StrictUndefined values, UndefinedError only gets raised when # the value is accessed, not when it gets created. The simplest way # to access it is to try and cast it to string. str(result) return result @classmethod def is_expression(cls, s): # The class should only be called from within InlineJinjaEvaluator. The # return value prevents the class from being accidentally added as # Extension return False class InlineJinjaEvaluator(base.Evaluator): # The regular expression for Jinja variables and blocks find_expression_pattern = re.compile(JINJA_REGEXP) find_block_pattern = re.compile(JINJA_BLOCK_REGEXP) _env = _environment.overlay() @classmethod def validate(cls, expression): if not isinstance(expression, six.string_types): raise exc.JinjaEvaluationException( "Unsupported type '%s'." % type(expression) ) try: cls._env.parse(expression) except jinja2.exceptions.TemplateError as e: raise exc.JinjaGrammarException( "Syntax error '%s'." % str(e) ) @classmethod def evaluate(cls, expression, data_context): LOG.debug( "Start to evaluate Jinja expression. " "[expression='%s', context=%s]", expression, data_context ) patterns = cls.find_expression_pattern.findall(expression) try: if patterns[0][0] == expression: result = JinjaEvaluator.evaluate(patterns[0][1], data_context) else: ctx = get_jinja_context(data_context) result = cls._env.from_string(expression).render(**ctx) except Exception as e: # NOTE(rakhmerov): if we hit a database error then we need to # re-raise the initial exception so that upper layers had a # chance to handle it properly (e.g. in case of DB deadlock # the operations needs to retry. Essentially, such situation # indicates a problem with DB rather than with the expression # syntax or values. if isinstance(e, db_exc.DBError): LOG.error( "Failed to evaluate Jinja expression due to a database" " error, re-raising initial exception [expression=%s," " error=%s, data=%s]", expression, str(e), data_context ) raise e raise exc.JinjaEvaluationException( "Can not evaluate Jinja expression [expression=%s, error=%s" ", data=%s]" % (expression, str(e), data_context) ) LOG.debug( "Finished evaluation. [expression='%s', result: %s]", expression, result ) return result @classmethod def is_expression(cls, s): return (cls.find_expression_pattern.search(s) or cls.find_block_pattern.search(s)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/expressions/std_functions.py0000644000175000017500000002116600000000000023412 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from oslo_log import log as logging from oslo_serialization import jsonutils import yaml from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral.utils import filter_utils from mistral_lib import utils # Additional YAQL/Jinja functions provided by Mistral out of the box. # If a function name ends with underscore then it doesn't need to pass # the name of the function when context registers it. LOG = logging.getLogger(__name__) def env_(context): return context['__env'] @db_utils.tx_cached(ignore_args='context') def executions_(context, id=None, root_execution_id=None, state=None, from_time=None, to_time=None): filter_ = {} if id is not None: filter_ = filter_utils.create_or_update_filter('id', id, "eq", filter_) if root_execution_id is not None: filter_ = filter_utils.create_or_update_filter( 'root_execution_id', root_execution_id, 'eq', filter_ ) if state is not None: filter_ = filter_utils.create_or_update_filter( 'state', state, 'eq', filter_ ) if from_time is not None: filter_ = filter_utils.create_or_update_filter( 'created_at', from_time, 'gte', filter_ ) if to_time is not None: filter_ = filter_utils.create_or_update_filter( 'created_at', to_time, 'lt', filter_ ) return db_api.get_workflow_executions(**filter_) @db_utils.tx_cached(ignore_args='context') def execution_(context): wf_ex = db_api.get_workflow_execution(context['__execution']['id']) return { 'id': wf_ex.id, 'name': wf_ex.name, 'spec': wf_ex.spec, 'input': wf_ex.input, 'params': wf_ex.params, 'created_at': wf_ex.created_at.isoformat(' '), 'updated_at': wf_ex.updated_at.isoformat(' '), 'root_execution_id': wf_ex.root_execution_id } def json_pp_(context, data=None): warnings.warn( "json_pp was deprecated in Queens and will be removed in the S cycle. " "The json_dump expression function can be used for outputting JSON", DeprecationWarning ) return jsonutils.dumps( data or context, indent=4 ).replace("\\n", "\n").replace(" \n", "\n") def json_dump_(context, data): return jsonutils.dumps(data, indent=4) def yaml_dump_(context, data): return yaml.safe_dump(data, default_flow_style=False) @db_utils.tx_cached(ignore_args='context') def task_(context, task_name=None): # This section may not exist in a context if it's calculated not in # task scope. cur_task = context['__task_execution'] # 1. If task_name is empty it's 'task()' use case, we need to get the # current task. # 2. if task_name is not empty but it's equal to the current task name # we need to take exactly the current instance of this task. Otherwise # there may be ambiguity if there are many tasks with this name. # 3. In other case we just find a task in DB by the given name. if cur_task and (not task_name or cur_task['name'] == task_name): task_ex = db_api.get_task_execution(cur_task['id']) else: task_execs = db_api.get_task_executions( workflow_execution_id=context['__execution']['id'], name=task_name ) # TODO(rakhmerov): Account for multiple executions (i.e. in case of # cycles). task_ex = task_execs[-1] if len(task_execs) > 0 else None if not task_ex: LOG.warning( "Task '%s' not found by the task() expression function", task_name ) return None # We don't use to_dict() db model method because not all fields # make sense for user. return _convert_to_user_model(task_ex) def _should_pass_filter(t, state, flat): # Start from assuming all is true, check only if needed. state_match = True flat_match = True if state: state_match = t['state'] == state if flat: is_action = t['type'] == utils.ACTION_TASK_TYPE if not is_action: nested_execs = db_api.get_workflow_executions( task_execution_id=t.id ) for n in nested_execs: flat_match = flat_match and n.state != t.state return state_match and flat_match def _get_tasks_from_db(workflow_execution_id=None, recursive=False, state=None, flat=False): task_execs = [] nested_task_exs = [] kwargs = {} if workflow_execution_id: kwargs['workflow_execution_id'] = workflow_execution_id # We can't add state to query if we want to filter by workflow_execution_id # recursively. There might be a workflow_execution in one state with a # nested workflow execution that has a task in the desired state until we # have an optimization for queering all workflow executions under a given # top level workflow execution, this is the way to go. if state and not (workflow_execution_id and recursive): kwargs['state'] = state task_execs.extend(db_api.get_task_executions(**kwargs)) # If it is not recursive no need to check nested workflows. # If there is no workflow execution id, we already have all we need, and # doing more queries will just create duplication in the results. if recursive and workflow_execution_id: for t in task_execs: if t.type == utils.WORKFLOW_TASK_TYPE: # Get nested workflow execution that matches the task. nested_workflow_executions = db_api.get_workflow_executions( task_execution_id=t.id ) # There might be zero nested executions. for nested_workflow_execution in nested_workflow_executions: nested_task_exs.extend( _get_tasks_from_db( nested_workflow_execution.id, recursive, state, flat ) ) if state or flat: # Filter by state and flat. task_execs = [ t for t in task_execs if _should_pass_filter(t, state, flat) ] # The nested tasks were already filtered, since this is a recursion. task_execs.extend(nested_task_exs) return task_execs @db_utils.tx_cached(ignore_args='context') def tasks_(context, workflow_execution_id=None, recursive=False, state=None, flat=False): task_execs = _get_tasks_from_db( workflow_execution_id, recursive, state, flat ) # Convert task_execs to user model and return. return [_convert_to_user_model(t) for t in task_execs] def _convert_to_user_model(task_ex): # Importing data_flow in order to break cycle dependency between modules. from mistral.workflow import data_flow # We don't use to_dict() db model method because not all fields # make sense for user. return { 'id': task_ex.id, 'name': task_ex.name, 'spec': task_ex.spec, 'state': task_ex.state, 'state_info': task_ex.state_info, 'result': data_flow.get_task_execution_result(task_ex), 'published': task_ex.published, 'type': task_ex.type, 'workflow_execution_id': task_ex.workflow_execution_id, 'created_at': task_ex.created_at.isoformat(' '), 'updated_at': task_ex.updated_at.isoformat(' ') if task_ex.updated_at is not None else None } def uuid_(context=None): return utils.generate_unicode_uuid() def global_(context, var_name): wf_ex = db_api.get_workflow_execution(context['__execution']['id']) return wf_ex.context.get(var_name) def json_parse_(context, data): return jsonutils.loads(data) def yaml_parse_(context, data): return yaml.safe_load(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/expressions/yaql_expression.py0000644000175000017500000001720300000000000023752 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import inspect import re from oslo_db import exception as db_exc from oslo_log import log as logging import six from yaml import representer import yaql from yaql.language import exceptions as yaql_exc from yaql.language import factory from yaql.language import utils as yaql_utils from mistral.config import cfg from mistral import exceptions as exc from mistral.expressions import base from mistral_lib import utils LOG = logging.getLogger(__name__) _YAQL_CONF = cfg.CONF.yaql INLINE_YAQL_REGEXP = '<%.*?%>' YAQL_ENGINE = None ROOT_YAQL_CONTEXT = None # TODO(rakhmerov): it's work around the bug in YAQL. # YAQL shouldn't expose internal types to custom functions. representer.SafeRepresenter.add_representer( yaql_utils.FrozenDict, representer.SafeRepresenter.represent_dict ) def get_yaql_context(data_context): global ROOT_YAQL_CONTEXT if not ROOT_YAQL_CONTEXT: ROOT_YAQL_CONTEXT = yaql.create_context() _register_yaql_functions(ROOT_YAQL_CONTEXT) new_ctx = ROOT_YAQL_CONTEXT.create_child_context() new_ctx['$'] = ( data_context if not cfg.CONF.yaql.convert_input_data else yaql_utils.convert_input_data(data_context) ) if isinstance(data_context, dict): new_ctx['__env'] = data_context.get('__env') new_ctx['__execution'] = data_context.get('__execution') new_ctx['__task_execution'] = data_context.get('__task_execution') return new_ctx def _register_yaql_functions(yaql_ctx): functions = base.get_custom_functions() for name in functions: yaql_ctx.register_function(functions[name], name=name) def get_yaql_engine_options(): return { "yaql.limitIterators": _YAQL_CONF.limit_iterators, "yaql.memoryQuota": _YAQL_CONF.memory_quota, "yaql.convertTuplesToLists": _YAQL_CONF.convert_tuples_to_lists, "yaql.convertSetsToLists": _YAQL_CONF.convert_sets_to_lists, "yaql.iterableDicts": _YAQL_CONF.iterable_dicts, "yaql.convertOutputData": _YAQL_CONF.convert_output_data } def create_yaql_engine_class(keyword_operator, allow_delegates, engine_options): return factory.YaqlFactory( keyword_operator=keyword_operator, allow_delegates=allow_delegates ).create(options=engine_options) def get_yaql_engine_class(): global YAQL_ENGINE if YAQL_ENGINE is not None: return YAQL_ENGINE YAQL_ENGINE = create_yaql_engine_class( _YAQL_CONF.keyword_operator, _YAQL_CONF.allow_delegates, get_yaql_engine_options() ) LOG.info( "YAQL engine has been initialized with the options: \n%s", utils.merge_dicts( get_yaql_engine_options(), { "keyword_operator": _YAQL_CONF.keyword_operator, "allow_delegates": _YAQL_CONF.allow_delegates } ) ) return YAQL_ENGINE def _sanitize_yaql_result(result): # Expression output conversion can be disabled but we can still # do some basic unboxing if we got an internal YAQL type. # TODO(rakhmerov): FrozenDict doesn't provide any public method # or property to access a regular dict that it wraps so ideally # we need to add it to YAQL. Once it's there we need to make a # fix here. if isinstance(result, yaql_utils.FrozenDict): return result._d if inspect.isgenerator(result) or isinstance(result, collections.Iterator): return list(result) return result class YAQLEvaluator(base.Evaluator): @classmethod def validate(cls, expression): try: get_yaql_engine_class()(expression) except (yaql_exc.YaqlException, KeyError, ValueError, TypeError) as e: raise exc.YaqlGrammarException(getattr(e, 'message', e)) @classmethod def evaluate(cls, expression, data_context): expression = expression.strip() if expression else expression try: result = get_yaql_engine_class()(expression).evaluate( context=get_yaql_context(data_context) ) except Exception as e: # NOTE(rakhmerov): if we hit a database error then we need to # re-raise the initial exception so that upper layers had a # chance to handle it properly (e.g. in case of DB deadlock # the operations needs to retry. Essentially, such situation # indicates a problem with DB rather than with the expression # syntax or values. if isinstance(e, db_exc.DBError): LOG.error( "Failed to evaluate YAQL expression due to a database" " error, re-raising initial exception [expression=%s," " error=%s, data=%s]", expression, str(e), data_context ) raise e raise exc.YaqlEvaluationException( "Can not evaluate YAQL expression [expression=%s, error=%s" ", data=%s]" % (expression, str(e), data_context) ) return _sanitize_yaql_result(result) @classmethod def is_expression(cls, s): # The class should not be used outside of InlineYAQLEvaluator since by # convention, YAQL expression should always be wrapped in '<% %>'. return False class InlineYAQLEvaluator(YAQLEvaluator): # This regular expression will look for multiple occurrences of YAQL # expressions in '<% %>' (i.e. <% any_symbols %>) within a string. find_expression_pattern = re.compile(INLINE_YAQL_REGEXP) @classmethod def validate(cls, expression): if not isinstance(expression, six.string_types): raise exc.YaqlEvaluationException( "Unsupported type '%s'." % type(expression) ) found_expressions = cls.find_inline_expressions(expression) if found_expressions: [super(InlineYAQLEvaluator, cls).validate(expr.strip("<%>")) for expr in found_expressions] @classmethod def evaluate(cls, expression, data_context): LOG.debug( "Starting to evaluate YAQL expression. " "[expression='%s']", expression ) result = expression found_expressions = cls.find_inline_expressions(expression) if found_expressions: for expr in found_expressions: trim_expr = expr.strip("<%>") evaluated = super(InlineYAQLEvaluator, cls).evaluate( trim_expr, data_context ) if len(expression) == len(expr): result = evaluated else: result = result.replace(expr, str(evaluated)) return result @classmethod def is_expression(cls, s): return cls.find_expression_pattern.search(s) @classmethod def find_inline_expressions(cls, s): return cls.find_expression_pattern.findall(s) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1175673 mistral-10.0.0.0b3/mistral/ext/0000755000175000017500000000000000000000000016366 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/ext/__init__.py0000644000175000017500000000000000000000000020465 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/ext/pygmentplugin.py0000644000175000017500000000455000000000000021646 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from pygments import lexer from pygments import token class MistralLexer(lexer.RegexLexer): name = 'Mistral' aliases = ['mistral'] flags = re.MULTILINE | re.UNICODE tokens = { "root": [ (r'^(\s)*(workflows|tasks|input|output|type)(\s)*:', token.Keyword), (r'^(\s)*(version|name|description)(\s)*:', token.Keyword), (r'^(\s)*(publish|timeout|retry|with\-items)(\s)*:', token.Keyword), (r'^(\s)*(on\-success|on\-error|on\-complete)(\s)*:', token.Keyword), (r'^(\s)*(action|workflow)(\s)*:', token.Keyword, 'call'), (r'(\-|\:)(\s)*(fail|succeed|pause)(\s)+', token.Operator.Word), (r'<%', token.Name.Entity, 'expression'), (r'\{\{', token.Name.Entity, 'expression'), (r'#.*$', token.Comment), (r'(^|\s|\-)+\d+', token.Number), lexer.include("generic"), ], "expression": [ (r'\$', token.Operator), (r'\s(json_pp|task|tasks|execution|env|uuid)(?!\w)', token.Name.Builtin), lexer.include("generic"), (r'%>', token.Name.Entity, '#pop'), (r'}\\}', token.Name.Entity, '#pop'), ], "call": [ (r'(\s)*[\w\.]+($|\s)', token.Name.Function), lexer.default('#pop'), ], "generic": [ (r'%>', token.Name.Entity, '#pop'), (r'}\\}', token.Name.Entity, '#pop'), (r'(\-|:|=|!|\[|\]|<|>|\/|\*)', token.Operator), (r'(null|None|True|False)', token.Name.Builtin), (r'"(\\\\|\\"|[^"])*"', token.String.Double), (r"'(\\\\|\\'|[^'])*'", token.String.Single), (r'\W|\w|\s|\(|\)|,|\.', token.Text), ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1215675 mistral-10.0.0.0b3/mistral/hacking/0000755000175000017500000000000000000000000017172 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/hacking/__init__.py0000644000175000017500000000000000000000000021271 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/hacking/checks.py0000644000175000017500000002302500000000000021006 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mistral's pep8 extensions. In order to make the review process faster and easier for core devs we are adding some Mistral specific pep8 checks. This will catch common errors. There are two types of pep8 extensions. One is a function that takes either a physical or logical line. The physical or logical line is the first param in the function definition and can be followed by other parameters supported by pep8. The second type is a class that parses AST trees. For more info please see pep8.py. """ import ast import re import six from hacking import core oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+") oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]") oslo_namespace_imports_from_root = re.compile(r"from[\s]+oslo[\s]+import[\s]+") @core.flake8ext def no_assert_equal_true_false(logical_line): """Check for assertTrue/assertFalse sentences M319 """ _start_re = re.compile(r'assert(Not)?Equal\((True|False),') _end_re = re.compile(r'assert(Not)?Equal\(.*,\s+(True|False)\)$') if _start_re.search(logical_line) or _end_re.search(logical_line): yield (0, "M319: assertEqual(A, True|False), " "assertEqual(True|False, A), assertNotEqual(A, True|False), " "or assertEqual(True|False, A) sentences must not be used. " "Use assertTrue(A) or assertFalse(A) instead") @core.flake8ext def no_assert_true_false_is_not(logical_line): """Check for assertIs/assertIsNot sentences M320 """ _re = re.compile(r'assert(True|False)\(.+\s+is\s+(not\s+)?.+\)$') if _re.search(logical_line): yield (0, "M320: assertTrue(A is|is not B) or " "assertFalse(A is|is not B) sentences must not be used. " "Use assertIs(A, B) or assertIsNot(A, B) instead") @core.flake8ext def check_oslo_namespace_imports(logical_line): if re.match(oslo_namespace_imports_from_dot, logical_line): msg = ("O323: '%s' must be used instead of '%s'.") % ( logical_line.replace('oslo.', 'oslo_'), logical_line) yield(0, msg) elif re.match(oslo_namespace_imports_from_root, logical_line): msg = ("O323: '%s' must be used instead of '%s'.") % ( logical_line.replace('from oslo import ', 'import oslo_'), logical_line) yield(0, msg) elif re.match(oslo_namespace_imports_dot, logical_line): msg = ("O323: '%s' must be used instead of '%s'.") % ( logical_line.replace('import', 'from').replace('.', ' import '), logical_line) yield(0, msg) @core.flake8ext def check_python3_xrange(logical_line): if re.search(r"\bxrange\s*\(", logical_line): yield(0, "M327: Do not use xrange(). 'xrange()' is not compatible " "with Python 3. Use range() or six.moves.range() instead.") @core.flake8ext def check_python3_no_iteritems(logical_line): msg = ("M328: Use six.iteritems() instead of dict.iteritems().") if re.search(r".*\.iteritems\(\)", logical_line): yield(0, msg) @core.flake8ext def check_python3_no_iterkeys(logical_line): msg = ("M329: Use six.iterkeys() instead of dict.iterkeys().") if re.search(r".*\.iterkeys\(\)", logical_line): yield(0, msg) @core.flake8ext def check_python3_no_itervalues(logical_line): msg = ("M330: Use six.itervalues() instead of dict.itervalues().") if re.search(r".*\.itervalues\(\)", logical_line): yield(0, msg) class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) class CheckForLoggingIssues(BaseASTChecker): name = "check_for_logging_issues" version = "1.0" CHECK_DESC = ('M001 Using the deprecated Logger.warn') LOG_MODULES = ('logging', 'oslo_log.log') def __init__(self, tree, filename): super(CheckForLoggingIssues, self).__init__(tree, filename) self.logger_names = [] self.logger_module_names = [] # NOTE(dstanek): This kinda accounts for scopes when talking # about only leaf node in the graph. self.assignments = {} def _filter_imports(self, module_name, alias): """Keeps lists of logging imports.""" if module_name in self.LOG_MODULES: self.logger_module_names.append(alias.asname or alias.name) def visit_Import(self, node): for alias in node.names: self._filter_imports(alias.name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_ImportFrom(self, node): for alias in node.names: full_name = '%s.%s' % (node.module, alias.name) self._filter_imports(full_name, alias) return super(CheckForLoggingIssues, self).generic_visit(node) def _find_name(self, node): """Return the fully qualified name or a Name or a Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): obj_name = self._find_name(node.value) if obj_name is None: return None method_name = node.attr return obj_name + '.' + method_name elif isinstance(node, six.string_types): return node else: # Could be Subscript, Call or many more return None def visit_Assign(self, node): """Look for 'LOG = logging.getLogger' This handles the simple case: name = [logging_module].getLogger(...) """ attr_node_types = (ast.Name, ast.Attribute) if (len(node.targets) != 1 or not isinstance(node.targets[0], attr_node_types)): # Say no to: "x, y = ..." return super(CheckForLoggingIssues, self).generic_visit(node) target_name = self._find_name(node.targets[0]) if (isinstance(node.value, ast.BinOp) and isinstance(node.value.op, ast.Mod)): if (isinstance(node.value.left, ast.Call) and isinstance(node.value.left.func, ast.Name)): # NOTE(dstanek): This is done to match cases like: # `msg = _('something %s') % x` node = ast.Assign(value=node.value.left) if not isinstance(node.value, ast.Call): # node.value must be a call to getLogger self.assignments.pop(target_name, None) return super(CheckForLoggingIssues, self).generic_visit(node) if (not isinstance(node.value.func, ast.Attribute) or not isinstance(node.value.func.value, attr_node_types)): # Function must be an attribute on an object like # logging.getLogger return super(CheckForLoggingIssues, self).generic_visit(node) object_name = self._find_name(node.value.func.value) func_name = node.value.func.attr if (object_name in self.logger_module_names and func_name == 'getLogger'): self.logger_names.append(target_name) return super(CheckForLoggingIssues, self).generic_visit(node) def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # obj.method if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # Could be Subscript, Call or many more return super(CheckForLoggingIssues, self).generic_visit(node) # If dealing with a logger the method can't be "warn". if obj_name in self.logger_names and method_name == 'warn': self.add_error(node.args[0]) return super(CheckForLoggingIssues, self).generic_visit(node) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1215675 mistral-10.0.0.0b3/mistral/lang/0000755000175000017500000000000000000000000016507 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/__init__.py0000644000175000017500000000000000000000000020606 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/base.py0000644000175000017500000003232000000000000017773 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import jsonschema from osprofiler import profiler import re import six from mistral import exceptions as exc from mistral import expressions as expr from mistral.expressions.jinja_expression import ANY_JINJA_REGEXP from mistral.expressions.yaql_expression import INLINE_YAQL_REGEXP from mistral.lang import types from mistral_lib import utils ACTION_PATTERNS = { "command": r"[\w\.]+[^=\(\s\"]*", "yaql_expression": INLINE_YAQL_REGEXP, "jinja_expression": ANY_JINJA_REGEXP, } CMD_PTRN = re.compile( "^({})".format("|".join(six.itervalues(ACTION_PATTERNS))) ) EXPRESSION = '|'.join([expr.patterns[name] for name in expr.patterns]) _ALL_IN_BRACKETS = r"\[.*\]\s*" _ALL_IN_QUOTES = r"\"[^\"]*\"\s*" _ALL_IN_APOSTROPHES = r"'[^']*'\s*" _DIGITS = r"\d+" _TRUE = "true" _FALSE = "false" _NULL = "null" ALL = ( _ALL_IN_QUOTES, _ALL_IN_APOSTROPHES, EXPRESSION, _ALL_IN_BRACKETS, _TRUE, _FALSE, _NULL, _DIGITS ) PARAMS_PTRN = re.compile(r"([-_\w]+)=(%s)" % "|".join(ALL)) # {(base_spec_cls, polymorphic_value): spec_cls} _POLYMORPHIC_CACHE = {} @profiler.trace('lang-base-instantiate-spec', hide_args=True) def instantiate_spec(spec_cls, data, validate=False): """Instantiates specification accounting for specification hierarchies. :param spec_cls: Specification concrete or base class. In case if base class or the hierarchy is provided this method relies on attributes _polymorphic_key and _polymorphic_value in order to find a concrete class that needs to be instantiated. :param data: Raw specification data as a dictionary. :type data: dict :param validate: If it's False then semantics and schema validation will be skipped. :type validate: bool """ if issubclass(spec_cls, BaseSpecList): # Ignore polymorphic search for specification lists because # it doesn't make sense for them. return spec_cls(data, validate) if not hasattr(spec_cls, '_polymorphic_key'): spec = spec_cls(data, validate) if validate: spec.validate_semantics() return spec # In order to do polymorphic search we need to make sure that # a spec is backed by a dictionary. Otherwise we can't extract # a polymorphic key. if not isinstance(data, dict): raise exc.InvalidModelException( "A specification with polymorphic key must be backed by" " a dictionary [spec_cls=%s, data=%s]" % (spec_cls, data) ) key = spec_cls._polymorphic_key if not isinstance(key, tuple): key_name = key key_default = None else: key_name = key[0] key_default = key[1] polymorphic_val = data.get(key_name, key_default) global _POLYMORPHIC_CACHE cache_key = (spec_cls, polymorphic_val) concrete_spec_cls = _POLYMORPHIC_CACHE.get(cache_key) if concrete_spec_cls is None: for cls in utils.iter_subclasses(spec_cls): if not hasattr(cls, '_polymorphic_value'): raise exc.DSLParsingException( "Class '%s' is expected to have attribute" " '_polymorphic_value' because it's a part of" " specification hierarchy inherited " "from class '%s'." % (cls, spec_cls) ) if cls._polymorphic_value == polymorphic_val: concrete_spec_cls = cls _POLYMORPHIC_CACHE[cache_key] = concrete_spec_cls if concrete_spec_cls is None: raise exc.DSLParsingException( 'Failed to find a specification class to instantiate ' '[spec_cls=%s, data=%s]' % (spec_cls, data) ) spec = concrete_spec_cls(data, validate) if validate: spec.validate_semantics() return spec class BaseSpec(object): """Base class for all DSL specifications. It represents a DSL entity such as workflow or task as a python object providing more convenient API to analyse DSL than just working with raw data in form of a dictionary. Specification classes also implement all required validation logic by overriding instance methods 'validate_schema()' and 'validate_semantics()'. Note that the specification mechanism allows to have polymorphic entities in DSL. For example, if we find it more convenient to have separate specification classes for different types of workflow (i.e. 'direct' and 'reverse') we can do so. In this case, in order to instantiate them correctly method 'instantiate_spec' must always be used where argument 'spec_cls' must be a root class of the specification hierarchy containing class attribute '_polymorhpic_key' pointing to a key in raw data relying on which we can find a concrete class. Concrete classes then must all have attribute '_polymorhpic_value' corresponding to a value in a raw data. Attribute '_polymorhpic_key' can be either a string or a tuple of size two where the first value is a key name itself and the second value is a default polymorphic value that must be used if raw data doesn't contain a configured key at all. An example of this situation is when we don't specify a workflow type in DSL. In this case, we assume it's 'direct'. """ # See http://json-schema.org _schema = {'type': 'object'} _meta_schema = {'type': 'object'} _full_schema = None _definitions = {} _version = '2.0' @classmethod def get_schema(cls, includes=('meta', 'definitions')): if cls._full_schema is not None: return cls._full_schema schema = copy.deepcopy(cls._schema) schema['properties'] = utils.merge_dicts( schema.get('properties', {}), cls._meta_schema.get('properties', {}), overwrite=False ) if includes and 'meta' in includes: schema['required'] = list( set(schema.get('required', []) + cls._meta_schema.get('required', [])) ) if includes and 'definitions' in includes: schema['definitions'] = utils.merge_dicts( schema.get('definitions', {}), cls._definitions, overwrite=False ) cls._full_schema = schema return schema def __init__(self, data, validate): self._data = data self._validate = validate if validate: self.validate_schema() @profiler.trace('lang-base-spec-validate-schema', hide_args=True) def validate_schema(self): """Validates DSL entity schema that this specification represents. By default, this method just validate schema of DSL entity that this specification represents using "_schema" class attribute. Additionally, child classes may implement additional logic to validate more specific things like YAQL expressions in their fields. Note that this method is called before construction of specification fields and validation logic should only rely on raw data provided as a dictionary accessible through '_data' instance field. """ try: jsonschema.validate(self._data, self.get_schema()) except jsonschema.ValidationError as e: raise exc.InvalidModelException("Invalid DSL: %s" % e) def validate_semantics(self): """Validates semantics of specification object. Child classes may implement validation logic to check things like integrity of corresponding data structure (e.g. task graph) or other things that can't be expressed in JSON schema. This method is called after specification has been built (i.e. its initializer has finished it's work) so that validation logic can rely on initialized specification fields. """ pass def validate_expr(self, dsl_part): if isinstance(dsl_part, six.string_types): expr.validate(dsl_part) elif isinstance(dsl_part, (list, tuple)): for expression in dsl_part: if isinstance(expression, six.string_types): expr.validate(expression) elif isinstance(dsl_part, dict): for expression in dsl_part.values(): if isinstance(expression, six.string_types): expr.validate(expression) def _spec_property(self, prop_name, spec_cls): prop_val = self._data.get(prop_name) if prop_val is None: return None return instantiate_spec(spec_cls, prop_val, self._validate) def _group_spec(self, spec_cls, *prop_names): if not prop_names: return None data = {} for prop_name in prop_names: prop_val = self._data.get(prop_name) if prop_val: data[prop_name] = prop_val return instantiate_spec(spec_cls, data, self._validate) def _inject_version(self, prop_names): for prop_name in prop_names: prop_data = self._data.get(prop_name) if isinstance(prop_data, dict): prop_data['version'] = self._version def _as_dict(self, prop_name): prop_val = self._data.get(prop_name) if not prop_val: return {} if isinstance(prop_val, dict): return prop_val elif isinstance(prop_val, list): result = {} for t in prop_val: result.update(t if isinstance(t, dict) else {t: ''}) return result elif isinstance(prop_val, six.string_types): return {prop_val: ''} @staticmethod @profiler.trace('lang-base-parse-cmd-and-input', hide_args=True) def _parse_cmd_and_input(cmd_str): if ' ' not in cmd_str: return cmd_str, {} # TODO(rakhmerov): Try to find a way with one expression. cmd_matcher = CMD_PTRN.search(cmd_str) if not cmd_matcher: msg = "Invalid action/workflow task property: %s" % cmd_str raise exc.InvalidModelException(msg) cmd = cmd_matcher.group() params = {} for match in re.findall(PARAMS_PTRN, cmd_str): k = match[0] # Remove embracing quotes. v = match[1].strip() if v[0] == '"' or v[0] == "'": v = v[1:-1] else: try: v = json.loads(v) except Exception: pass params[k] = v return cmd, params def to_dict(self): return self._data def get_version(self): return self._version def __repr__(self): return "%s %s" % (self.__class__.__name__, self.to_dict()) class BaseListSpec(BaseSpec): item_class = None _schema = { "type": "object", "properties": { "version": types.VERSION }, "additionalProperties": types.NONEMPTY_DICT, "required": ["version"], } def __init__(self, data, validate): super(BaseListSpec, self).__init__(data, validate) self.items = [] for k, v in data.items(): if k != 'version': v['name'] = k self._inject_version([k]) self.items.append( instantiate_spec(self.item_class, v, validate) ) def validate_schema(self): super(BaseListSpec, self).validate_schema() if len(self._data.keys()) < 2: raise exc.InvalidModelException( 'At least one item must be in the list [data=%s].' % self._data ) def get_items(self): return self.items def __getitem__(self, idx): return self.items[idx] def __len__(self): return len(self.items) class BaseSpecList(object): item_class = None _version = '2.0' def __init__(self, data, validate): self.items = {} for k, v in data.items(): if k != 'version': # At this point, we don't know if item schema is valid, # it may not be even a dictionary. So we should check the # type first before manipulating with it. if isinstance(v, dict): v['name'] = k v['version'] = self._version self.items[k] = instantiate_spec(self.item_class, v, validate) def item_keys(self): return self.items.keys() def __iter__(self): return six.itervalues(self.items) def __getitem__(self, name): return self.items.get(name) def __len__(self): return len(self.items) def get(self, name): return self.__getitem__(name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/parser.py0000644000175000017500000001750000000000000020360 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cachetools import threading from yaml import error import six from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import base from mistral.lang.v2 import actions as actions_v2 from mistral.lang.v2 import tasks as tasks_v2 from mistral.lang.v2 import workbook as wb_v2 from mistral.lang.v2 import workflows as wf_v2 from mistral.utils import safe_yaml V2_0 = '2.0' ALL_VERSIONS = [V2_0] # {workflow execution id => workflow specification}. _WF_EX_CACHE = cachetools.LRUCache(maxsize=100) _WF_EX_CACHE_LOCK = threading.RLock() # {(workflow def id, workflow def updated at) => workflow specification}. _WF_DEF_CACHE = cachetools.LRUCache(maxsize=100) _WF_DEF_CACHE_LOCK = threading.RLock() def parse_yaml(text): """Loads a text in YAML format as dictionary object. :param text: YAML text. :return: Parsed YAML document as dictionary. """ try: return safe_yaml.load(text) or {} except error.YAMLError as e: raise exc.DSLParsingException( "Definition could not be parsed: %s\n" % e ) def _get_spec_version(spec_dict): # If version is not specified it will '2.0' by default. ver = V2_0 if 'version' in spec_dict: ver = spec_dict['version'] def _raise(ver): raise exc.DSLParsingException('Unsupported DSL version: %s' % ver) try: str_ver = str(float(ver)) except (ValueError, TypeError): _raise(ver) if not ver or str_ver not in ALL_VERSIONS: _raise(ver) return ver # Factory methods to get specifications either from raw YAML formatted text or # from dictionaries parsed from YAML formatted text. def get_workbook_spec(spec_dict, validate): if _get_spec_version(spec_dict) == V2_0: return base.instantiate_spec( wb_v2.WorkbookSpec, spec_dict, validate ) return None def get_workbook_spec_from_yaml(text, validate=True): return get_workbook_spec(parse_yaml(text), validate) def get_action_spec(spec_dict): if _get_spec_version(spec_dict) == V2_0: return base.instantiate_spec(actions_v2.ActionSpec, spec_dict) return None def get_action_spec_from_yaml(text, action_name): spec_dict = parse_yaml(text) spec_dict['name'] = action_name return get_action_spec(spec_dict) def get_action_list_spec(spec_dict, validate): return base.instantiate_spec( actions_v2.ActionListSpec, spec_dict, validate ) def get_action_list_spec_from_yaml(text, validate=True): return get_action_list_spec(parse_yaml(text), validate=validate) def get_workflow_spec(spec_dict): """Get workflow specification object from dictionary. NOTE: For large workflows this method can work very long (seconds). For this reason, method 'get_workflow_spec_by_definition_id' or 'get_workflow_spec_by_execution_id' should be used whenever possible because they cache specification objects. :param spec_dict: Raw specification dictionary. """ if _get_spec_version(spec_dict) == V2_0: return base.instantiate_spec(wf_v2.WorkflowSpec, spec_dict) return None def get_workflow_list_spec(spec_dict, validate): return base.instantiate_spec( wf_v2.WorkflowListSpec, spec_dict, validate ) def get_workflow_spec_from_yaml(text): return get_workflow_spec(parse_yaml(text)) def get_workflow_list_spec_from_yaml(text, validate=True): return get_workflow_list_spec(parse_yaml(text), validate) def get_task_spec(spec_dict): if _get_spec_version(spec_dict) == V2_0: return base.instantiate_spec(tasks_v2.TaskSpec, spec_dict) return None def get_workflow_definition(wb_def, wf_name): wf_name = wf_name + ":" return _parse_def_from_wb(wb_def, "workflows:", wf_name) def get_action_definition(wb_def, action_name): action_name += ":" return _parse_def_from_wb(wb_def, "actions:", action_name) def _parse_def_from_wb(wb_def, section_name, item_name): io = six.StringIO(wb_def[wb_def.index(section_name):]) io.readline() definition = [] ident = 0 # Get the indentation of the action/workflow name tag. for line in io: if item_name == line.strip(): ident = line.index(item_name) definition.append(line.lstrip()) break # Add strings to list unless same/less indentation is found. for line in io: new_line = line.strip() if not new_line: definition.append(line) elif new_line.startswith("#"): new_line = line if ident > line.index("#") else line[ident:] definition.append(new_line) else: temp = line.index(line.lstrip()) if ident < temp: definition.append(line[ident:]) else: break io.close() return ''.join(definition).rstrip() + '\n' # Methods for obtaining specifications in a more efficient way using # caching techniques. @cachetools.cached(_WF_EX_CACHE, lock=_WF_EX_CACHE_LOCK) def get_workflow_spec_by_execution_id(wf_ex_id): """Gets workflow specification by workflow execution id. The idea is that when a workflow execution is running we must be getting the same workflow specification even if the workflow definition has already changed. However, note that this is true only if the current engine instance didn't restart during the entire workflow execution run. :param wf_ex_id: Workflow execution id. :return: Workflow specification. """ if not wf_ex_id: return None wf_ex = db_api.get_workflow_execution(wf_ex_id) return get_workflow_spec(wf_ex.spec) @cachetools.cached(_WF_DEF_CACHE, lock=_WF_DEF_CACHE_LOCK) def get_workflow_spec_by_definition_id(wf_def_id, wf_def_updated_at): """Gets specification by workflow definition id and its 'updated_at'. The idea of this method is to return a cached specification for the given workflow id and workflow definition 'updated_at'. As long as the given workflow definition remains the same in DB users of this method will be getting a cached value. Once the workflow definition has changed clients will be providing a different 'updated_at' value and hence this method will be called and spec is updated for this combination of parameters. Old cached values will be kicked out by LRU algorithm if the cache runs out of space. :param wf_def_id: Workflow definition id. :param wf_def_updated_at: Workflow definition 'updated_at' value. It serves only as part of cache key and is not explicitly used in the method. :return: Workflow specification. """ if not wf_def_id: return None wf_def = db_api.get_workflow_definition(wf_def_id) return get_workflow_spec(wf_def.spec) def cache_workflow_spec_by_execution_id(wf_ex_id, wf_spec): with _WF_EX_CACHE_LOCK: _WF_EX_CACHE[cachetools.keys.hashkey(wf_ex_id)] = wf_spec def get_wf_execution_spec_cache_size(): return len(_WF_EX_CACHE) def get_wf_definition_spec_cache_size(): return len(_WF_DEF_CACHE) def clear_caches(): """Clears all specification caches.""" with _WF_EX_CACHE_LOCK: _WF_EX_CACHE.clear() with _WF_DEF_CACHE_LOCK: _WF_DEF_CACHE.clear() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/types.py0000644000175000017500000000544300000000000020233 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral import expressions NONEMPTY_STRING = { "type": "string", "minLength": 1 } UNIQUE_STRING_LIST = { "type": "array", "items": NONEMPTY_STRING, "uniqueItems": True, "minItems": 1 } POSITIVE_INTEGER = { "type": "integer", "minimum": 0 } POSITIVE_NUMBER = { "type": "number", "minimum": 0.0 } EXPRESSION = { "oneOf": [{ "type": "string", "pattern": "^%s\\s*$" % expressions.patterns[name] } for name in expressions.patterns] } EXPRESSION_CONDITION = { "type": "object", "minProperties": 1, "patternProperties": { r"^\w+$": EXPRESSION } } ANY = { "anyOf": [ {"type": "array"}, {"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "object"}, {"type": "string"} ] } ANY_NULLABLE = { "anyOf": [ {"type": "null"}, {"type": "array"}, {"type": "boolean"}, {"type": "integer"}, {"type": "number"}, {"type": "object"}, {"type": "string"} ] } NONEMPTY_DICT = { "type": "object", "minProperties": 1, "patternProperties": { r"^\w+$": ANY_NULLABLE } } ONE_KEY_DICT = { "type": "object", "minProperties": 1, "maxProperties": 1, "patternProperties": { r"^\w+$": ANY_NULLABLE } } STRING_OR_EXPRESSION_CONDITION = { "oneOf": [ NONEMPTY_STRING, EXPRESSION_CONDITION ] } EXPRESSION_OR_POSITIVE_INTEGER = { "oneOf": [ EXPRESSION, POSITIVE_INTEGER ] } EXPRESSION_OR_BOOLEAN = { "oneOf": [ EXPRESSION, {"type": "boolean"} ] } UNIQUE_STRING_OR_EXPRESSION_CONDITION_LIST = { "type": "array", "items": STRING_OR_EXPRESSION_CONDITION, "uniqueItems": True, "minItems": 1 } VERSION = { "anyOf": [ NONEMPTY_STRING, POSITIVE_INTEGER, POSITIVE_NUMBER ] } WORKFLOW_TYPE = { "enum": ["reverse", "direct"] } STRING_OR_ONE_KEY_DICT = { "oneOf": [ NONEMPTY_STRING, ONE_KEY_DICT ] } UNIQUE_STRING_OR_ONE_KEY_DICT_LIST = { "type": "array", "items": STRING_OR_ONE_KEY_DICT, "uniqueItems": True, "minItems": 1 } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1215675 mistral-10.0.0.0b3/mistral/lang/v2/0000755000175000017500000000000000000000000017036 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/__init__.py0000644000175000017500000000000000000000000021135 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/actions.py0000644000175000017500000000525400000000000021056 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from mistral.lang import types from mistral.lang.v2 import base from mistral_lib import utils class ActionSpec(base.BaseSpec): # See http://json-schema.org _schema = { "type": "object", "properties": { "base": types.NONEMPTY_STRING, "base-input": types.NONEMPTY_DICT, "input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST, "output": types.ANY_NULLABLE, }, "required": ["base"], "additionalProperties": False } def __init__(self, data, validate): super(ActionSpec, self).__init__(data, validate) self._name = data['name'] self._description = data.get('description') self._tags = data.get('tags', []) self._base = data['base'] self._base_input = data.get('base-input', {}) self._input = utils.get_dict_from_entries(data.get('input', [])) self._output = data.get('output') self._base, _input = self._parse_cmd_and_input(self._base) utils.merge_dicts(self._base_input, _input) def validate_schema(self): super(ActionSpec, self).validate_schema() # Validate YAQL expressions. inline_params = self._parse_cmd_and_input(self._data.get('base'))[1] self.validate_expr(inline_params) self.validate_expr(self._data.get('base-input', {})) if isinstance(self._data.get('output'), six.string_types): self.validate_expr(self._data.get('output')) def get_name(self): return self._name def get_description(self): return self._description def get_tags(self): return self._tags def get_base(self): return self._base def get_base_input(self): return self._base_input def get_input(self): return self._input def get_output(self): return self._output class ActionSpecList(base.BaseSpecList): item_class = ActionSpec class ActionListSpec(base.BaseListSpec): item_class = ActionSpec def get_actions(self): return self.get_items() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/base.py0000644000175000017500000000220200000000000020316 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.lang import base from mistral.lang import types class BaseSpec(base.BaseSpec): _version = "2.0" _meta_schema = { "type": "object", "properties": { "name": types.NONEMPTY_STRING, "version": types.VERSION, "description": types.NONEMPTY_STRING, "tags": types.UNIQUE_STRING_LIST }, "required": ["name", "version"] } class BaseSpecList(base.BaseSpecList): _version = "2.0" class BaseListSpec(base.BaseListSpec): _version = "2.0" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/on_clause.py0000644000175000017500000000744200000000000021367 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from mistral.lang import types from mistral.lang.v2 import base from mistral.lang.v2 import publish NEXT_TASK = { "oneOf": [ { "type": "string", "pattern": r"^\S+$", "description": "Task name (e.g.: `task1`)" }, { "type": "string", "pattern": r"^\w+ \w+=(.*)$", "description": "Task name with dict parameter " "(e.g.: `fail msg=\"test\"`, " "`fail msg=<% task() %>`)" }, { "type": "string", "pattern": r"^\w+\(\w+=(.*)\)$", "description": "Task name with func parameter " "(e.g.: `fail(msg=\"test\")`, " "`fail(msg=<% task() %>)`)" } ] } TASK_WITH_EXPRESSION = { "type": "object", "minProperties": 1, "maxProperties": 1, "patternProperties": { x['pattern']: types.EXPRESSION for x in NEXT_TASK['oneOf'] }, "description": "All next task variants plus expression (e.g.: " "`task1: <% $.vm_id != null %>`, " "`fail(msg=\"test\"): <% $.vm_id != null %>`)." } LIST_OF_TASKS = { "type": "array", "items": { "oneOf": [ NEXT_TASK, TASK_WITH_EXPRESSION ] }, "uniqueItems": True, "minItems": 1 } ADVANCED_PUBLISHING_DICT = { "type": "object", "minProperties": 1, "properties": { "publish": publish.PublishSpec.get_schema(), "next": { "oneOf": [ NEXT_TASK, TASK_WITH_EXPRESSION, LIST_OF_TASKS ] } }, "additionalProperties": False } class OnClauseSpec(base.BaseSpec): _schema = { "oneOf": [ NEXT_TASK, TASK_WITH_EXPRESSION, LIST_OF_TASKS, ADVANCED_PUBLISHING_DICT ] } def __init__(self, data, validate): super(OnClauseSpec, self).__init__(data, validate) if not isinstance(data, dict): # Old simple schema. self._publish = None self._next = prepare_next_clause(data) else: # New advanced schema. self._publish = self._spec_property('publish', publish.PublishSpec) self._next = prepare_next_clause(data.get('next')) @classmethod def get_schema(cls, includes=('definitions',)): return super(OnClauseSpec, cls).get_schema(includes) def get_publish(self): return self._publish def get_next(self): return self._next def _as_list_of_tuples(data): if not data: return [] if isinstance(data, six.string_types): return [_as_tuple(data)] return [_as_tuple(item) for item in data] def _as_tuple(val): return list(val.items())[0] if isinstance(val, dict) else (val, '') def prepare_next_clause(next_clause): list_of_tuples = _as_list_of_tuples(next_clause) for i, task in enumerate(list_of_tuples): task_name, params = OnClauseSpec._parse_cmd_and_input(task[0]) list_of_tuples[i] = (task_name, task[1], params) return list_of_tuples ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/policies.py0000644000175000017500000000552600000000000021227 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.lang import types from mistral.lang.v2 import base from mistral.lang.v2 import retry_policy class PoliciesSpec(base.BaseSpec): # See http://json-schema.org _schema = { "type": "object", "properties": { "retry": retry_policy.RetrySpec.get_schema(), "wait-before": types.EXPRESSION_OR_POSITIVE_INTEGER, "wait-after": types.EXPRESSION_OR_POSITIVE_INTEGER, "timeout": types.EXPRESSION_OR_POSITIVE_INTEGER, "pause-before": types.EXPRESSION_OR_BOOLEAN, "concurrency": types.EXPRESSION_OR_POSITIVE_INTEGER, "fail-on": types.EXPRESSION_OR_BOOLEAN }, "additionalProperties": False } @classmethod def get_schema(cls, includes=('definitions',)): return super(PoliciesSpec, cls).get_schema(includes) def __init__(self, data, validate): super(PoliciesSpec, self).__init__(data, validate) self._retry = self._spec_property('retry', retry_policy.RetrySpec) self._wait_before = data.get('wait-before', 0) self._wait_after = data.get('wait-after', 0) self._timeout = data.get('timeout', 0) self._pause_before = data.get('pause-before', False) self._concurrency = data.get('concurrency', 0) self._fail_on = data.get('fail-on', False) def validate_schema(self): super(PoliciesSpec, self).validate_schema() # Validate YAQL expressions. self.validate_expr(self._data.get('wait-before', 0)) self.validate_expr(self._data.get('wait-after', 0)) self.validate_expr(self._data.get('timeout', 0)) self.validate_expr(self._data.get('pause-before', False)) self.validate_expr(self._data.get('concurrency', 0)) self.validate_expr(self._data.get('fail-on', False)) def get_retry(self): return self._retry def get_wait_before(self): return self._wait_before def get_wait_after(self): return self._wait_after def get_timeout(self): return self._timeout def get_pause_before(self): return self._pause_before def get_concurrency(self): return self._concurrency def get_fail_on(self): return self._fail_on ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/publish.py0000644000175000017500000000455300000000000021065 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral import exceptions as exc from mistral.lang import types from mistral.lang.v2 import base from mistral_lib import utils class PublishSpec(base.BaseSpec): _schema = { "type": "object", "properties": { "branch": types.NONEMPTY_DICT, "global": types.NONEMPTY_DICT, "atomic": types.NONEMPTY_DICT }, "additionalProperties": False } def __init__(self, data, validate): super(PublishSpec, self).__init__(data, validate) self._branch = self._data.get('branch') self._global = self._data.get('global') self._atomic = self._data.get('atomic') @classmethod def get_schema(cls, includes=('definitions',)): return super(PublishSpec, cls).get_schema(includes) def validate_semantics(self): if not self._branch and not self._global and not self._atomic: raise exc.InvalidModelException( "Either 'branch', 'global' or 'atomic' must be specified: " % self._data ) self.validate_expr(self._branch) self.validate_expr(self._global) self.validate_expr(self._atomic) def get_branch(self): return self._branch def get_global(self): return self._global def get_atomic(self): return self._atomic def merge(self, spec_to_merge): if spec_to_merge: if spec_to_merge.get_branch(): utils.merge_dicts(self._branch, spec_to_merge.get_branch()) if spec_to_merge.get_global(): utils.merge_dicts(self._global, spec_to_merge.get_global()) if spec_to_merge.get_atomic(): utils.merge_dicts(self._atomic, spec_to_merge.get_atomic()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/retry_policy.py0000644000175000017500000000522000000000000022133 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from mistral.lang import types from mistral.lang.v2 import base class RetrySpec(base.BaseSpec): # See http://json-schema.org _retry_dict_schema = { "type": "object", "properties": { "count": { "oneOf": [ types.EXPRESSION, types.POSITIVE_INTEGER ] }, "break-on": types.EXPRESSION, "continue-on": types.EXPRESSION, "delay": { "oneOf": [ types.EXPRESSION, types.POSITIVE_INTEGER ] }, }, "required": ["delay", "count"], "additionalProperties": False } _schema = { "oneOf": [ _retry_dict_schema, types.NONEMPTY_STRING ] } @classmethod def get_schema(cls, includes=('definitions',)): return super(RetrySpec, cls).get_schema(includes) def __init__(self, data, validate): data = self._transform_retry_one_line(data) super(RetrySpec, self).__init__(data, validate) self._break_on = data.get('break-on') self._count = data.get('count') self._continue_on = data.get('continue-on') self._delay = data['delay'] def _transform_retry_one_line(self, retry): if isinstance(retry, six.string_types): _, params = self._parse_cmd_and_input(retry) return params return retry def validate_schema(self): super(RetrySpec, self).validate_schema() # Validate YAQL expressions. self.validate_expr(self._data.get('count')) self.validate_expr(self._data.get('delay')) self.validate_expr(self._data.get('break-on')) self.validate_expr(self._data.get('continue-on')) def get_count(self): return self._count def get_break_on(self): return self._break_on def get_continue_on(self): return self._continue_on def get_delay(self): return self._delay ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/task_defaults.py0000644000175000017500000000777600000000000022262 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from mistral.lang import types from mistral.lang.v2 import base from mistral.lang.v2 import on_clause from mistral.lang.v2 import policies from mistral.lang.v2 import retry_policy # TODO(rakhmerov): This specification should be broken into two separate # specs for direct and reverse workflows. It's weird to combine them into # one because they address different use cases. class TaskDefaultsSpec(base.BaseSpec): # See http://json-schema.org _schema = { "type": "object", "properties": { "retry": retry_policy.RetrySpec.get_schema(), "wait-before": types.EXPRESSION_OR_POSITIVE_INTEGER, "wait-after": types.EXPRESSION_OR_POSITIVE_INTEGER, "timeout": types.EXPRESSION_OR_POSITIVE_INTEGER, "pause-before": types.EXPRESSION_OR_BOOLEAN, "concurrency": types.EXPRESSION_OR_POSITIVE_INTEGER, "fail-on": types.EXPRESSION_OR_BOOLEAN, "on-complete": on_clause.OnClauseSpec.get_schema(), "on-success": on_clause.OnClauseSpec.get_schema(), "on-error": on_clause.OnClauseSpec.get_schema(), "safe-rerun": types.EXPRESSION_OR_BOOLEAN, "requires": { "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST] } }, "additionalProperties": False } @classmethod def get_schema(cls, includes=('definitions',)): return super(TaskDefaultsSpec, cls).get_schema(includes) def __init__(self, data, validate): super(TaskDefaultsSpec, self).__init__(data, validate) self._policies = self._group_spec( policies.PoliciesSpec, 'retry', 'wait-before', 'wait-after', 'timeout', 'pause-before', 'concurrency', 'fail-on' ) on_spec_cls = on_clause.OnClauseSpec self._on_complete = self._spec_property('on-complete', on_spec_cls) self._on_success = self._spec_property('on-success', on_spec_cls) self._on_error = self._spec_property('on-error', on_spec_cls) self._safe_rerun = data.get('safe-rerun') # TODO(rakhmerov): 'requires' should reside in a different spec for # reverse workflows. self._requires = data.get('requires', []) def validate_schema(self): super(TaskDefaultsSpec, self).validate_schema() self.validate_expr(self._data.get('safe-rerun', {})) def validate_semantics(self): # Validate YAQL expressions. self._validate_transitions(self._on_complete) self._validate_transitions(self._on_success) self._validate_transitions(self._on_error) def _validate_transitions(self, on_clause_spec): val = on_clause_spec.get_next() if on_clause_spec else [] if not val: return [self.validate_expr(t) for t in ([val] if isinstance(val, six.string_types) else val)] def get_policies(self): return self._policies def get_on_complete(self): return self._on_complete def get_on_success(self): return self._on_success def get_on_error(self): return self._on_error def get_safe_rerun(self): return self._safe_rerun def get_requires(self): if isinstance(self._requires, six.string_types): return [self._requires] return self._requires ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/tasks.py0000644000175000017500000003025400000000000020541 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2019 - NetCracker Technology Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import re import six from mistral import exceptions as exc from mistral import expressions from mistral.lang import types from mistral.lang.v2 import base from mistral.lang.v2 import on_clause from mistral.lang.v2 import policies from mistral.lang.v2 import publish from mistral.lang.v2 import retry_policy from mistral.workflow import states from mistral_lib import utils _expr_ptrns = [expressions.patterns[name] for name in expressions.patterns] WITH_ITEMS_PTRN = re.compile( r"\s*([\w\d_\-]+)\s*in\s*(\[.+\]|%s)" % '|'.join(_expr_ptrns) ) MAX_LENGTH_TASK_NAME = 255 # Length of a join task name must be less than or equal to maximum # of task_executions unique_key and named_locks name. Their # maximum equals 255. # https://dev.mysql.com/doc/refman/5.6/en/innodb-restrictions.html # For example: "join-task-" + "workflow execution id" + "-" + # "task join name" = 255 # "task join name" = 255 - 36 - 1 - 10 = MAX_LENGTH_TASK_NAME - 47 MAX_LENGTH_JOIN_TASK_NAME = MAX_LENGTH_TASK_NAME - 47 class TaskSpec(base.BaseSpec): # See http://json-schema.org _polymorphic_key = ('type', 'direct') _schema = { "type": "object", "properties": { "type": types.WORKFLOW_TYPE, "action": types.NONEMPTY_STRING, "workflow": types.NONEMPTY_STRING, "input": { "oneOf": [ types.NONEMPTY_DICT, types.NONEMPTY_STRING ] }, "with-items": { "oneOf": [ types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST ] }, "publish": types.NONEMPTY_DICT, "publish-on-error": types.NONEMPTY_DICT, "retry": retry_policy.RetrySpec.get_schema(), "wait-before": types.EXPRESSION_OR_POSITIVE_INTEGER, "wait-after": types.EXPRESSION_OR_POSITIVE_INTEGER, "timeout": types.EXPRESSION_OR_POSITIVE_INTEGER, "pause-before": types.EXPRESSION_OR_BOOLEAN, "concurrency": types.EXPRESSION_OR_POSITIVE_INTEGER, "fail-on": types.EXPRESSION_OR_BOOLEAN, "target": types.NONEMPTY_STRING, "keep-result": types.EXPRESSION_OR_BOOLEAN, "safe-rerun": types.EXPRESSION_OR_BOOLEAN }, "additionalProperties": False, "anyOf": [ { "not": { "type": "object", "required": ["action", "workflow"] }, }, { "oneOf": [ { "type": "object", "required": ["action"] }, { "type": "object", "required": ["workflow"] } ] } ] } def __init__(self, data, validate): super(TaskSpec, self).__init__(data, validate) self._name = data['name'] self._description = data.get('description') self._action = data.get('action') self._workflow = data.get('workflow') self._tags = data.get('tags', []) self._input = data.get('input', {}) self._with_items = self._get_with_items_as_dict() self._publish = data.get('publish', {}) self._publish_on_error = data.get('publish-on-error', {}) self._policies = self._group_spec( policies.PoliciesSpec, 'retry', 'wait-before', 'wait-after', 'timeout', 'pause-before', 'concurrency', 'fail-on' ) self._target = data.get('target') self._keep_result = data.get('keep-result', True) self._safe_rerun = data.get('safe-rerun') self._process_action_and_workflow() def validate_schema(self): super(TaskSpec, self).validate_schema() self._validate_name() action = self._data.get('action') workflow = self._data.get('workflow') # Validate YAQL expressions. if action or workflow: inline_params = self._parse_cmd_and_input(action or workflow)[1] self.validate_expr(inline_params) self.validate_expr(self._data.get('input', {})) self.validate_expr(self._data.get('publish', {})) self.validate_expr(self._data.get('publish-on-error', {})) self.validate_expr(self._data.get('keep-result', {})) self.validate_expr(self._data.get('safe-rerun', {})) def _validate_name(self): task_name = self._data.get('name') if len(task_name) > MAX_LENGTH_TASK_NAME: raise exc.InvalidModelException( "The length of a '{0}' task name must not exceed {1}" " symbols".format(task_name, MAX_LENGTH_TASK_NAME)) def _get_with_items_as_dict(self): raw = self._data.get('with-items', []) with_items = {} if isinstance(raw, six.string_types): raw = [raw] for item in raw: if not isinstance(item, six.string_types): raise exc.InvalidModelException( "'with-items' elements should be strings: %s" % self._data ) match = re.match(WITH_ITEMS_PTRN, item) if not match: raise exc.InvalidModelException( "Wrong format of 'with-items' property. Please use " "format 'var in {[some, list] | <%% $.array %%> }: " "%s" % self._data ) match_groups = match.groups() var_name = match_groups[0] array = match_groups[1] # Validate YAQL expression that may follow after "in" for the # with-items syntax "var in {[some, list] | <% $.array %> }". self.validate_expr(array) if array.startswith('['): try: array = json.loads(array) except Exception as e: msg = ("Invalid array in 'with-items' clause: " "%s, error: %s" % (array, str(e))) raise exc.InvalidModelException(msg) with_items[var_name] = array return with_items def _process_action_and_workflow(self): params = {} if self._action: self._action, params = self._parse_cmd_and_input(self._action) elif self._workflow: self._workflow, params = self._parse_cmd_and_input( self._workflow) else: self._action = 'std.noop' utils.merge_dicts(self._input, params) def get_name(self): return self._name def get_description(self): return self._description def get_action_name(self): return self._action if self._action else None def get_workflow_name(self): return self._workflow def get_tags(self): return self._tags def get_input(self): return self._input def get_with_items(self): return self._with_items def get_policies(self): return self._policies def get_target(self): return self._target def get_publish(self, state): spec = None if state == states.SUCCESS and self._publish: spec = publish.PublishSpec( {'branch': self._publish}, validate=self._validate ) elif state == states.ERROR and self._publish_on_error: spec = publish.PublishSpec( {'branch': self._publish_on_error}, validate=self._validate ) return spec def get_keep_result(self): return self._keep_result def get_safe_rerun(self): return self._safe_rerun def get_type(self): return (utils.WORKFLOW_TASK_TYPE if self._workflow else utils.ACTION_TASK_TYPE) class DirectWorkflowTaskSpec(TaskSpec): _polymorphic_value = 'direct' _direct_workflow_schema = { "type": "object", "properties": { "type": {"enum": [_polymorphic_value]}, "join": { "oneOf": [ {"enum": ["all", "one"]}, types.POSITIVE_INTEGER ] }, "on-complete": on_clause.OnClauseSpec.get_schema(), "on-success": on_clause.OnClauseSpec.get_schema(), "on-error": on_clause.OnClauseSpec.get_schema() } } _schema = utils.merge_dicts( copy.deepcopy(TaskSpec._schema), _direct_workflow_schema ) def __init__(self, data, validate): super(DirectWorkflowTaskSpec, self).__init__(data, validate) self._join = data.get('join') on_spec_cls = on_clause.OnClauseSpec self._on_complete = self._spec_property('on-complete', on_spec_cls) self._on_success = self._spec_property('on-success', on_spec_cls) self._on_error = self._spec_property('on-error', on_spec_cls) def validate_semantics(self): # Validate YAQL expressions. self._validate_transitions(self._on_complete) self._validate_transitions(self._on_success) self._validate_transitions(self._on_error) if self._join: join_task_name = self.get_name() if len(join_task_name) > MAX_LENGTH_JOIN_TASK_NAME: raise exc.InvalidModelException( "The length of a '{0}' join task name must not exceed {1} " "symbols".format(join_task_name, MAX_LENGTH_JOIN_TASK_NAME) ) def _validate_transitions(self, on_clause_spec): val = on_clause_spec.get_next() if on_clause_spec else [] if not val: return [self.validate_expr(t) for t in ([val] if isinstance(val, six.string_types) else val)] def get_publish(self, state): spec = super(DirectWorkflowTaskSpec, self).get_publish(state) if self._on_complete and self._on_complete.get_publish(): if spec: spec.merge(self._on_complete.get_publish()) else: spec = self._on_complete.get_publish() if state == states.SUCCESS: on_clause = self._on_success elif state == states.ERROR: on_clause = self._on_error if on_clause and on_clause.get_publish(): if spec: on_clause.get_publish().merge(spec) return on_clause.get_publish() return spec def get_join(self): return self._join def get_on_complete(self): return self._on_complete def get_on_success(self): return self._on_success def get_on_error(self): return self._on_error class ReverseWorkflowTaskSpec(TaskSpec): _polymorphic_value = 'reverse' _reverse_workflow_schema = { "type": "object", "properties": { "type": {"enum": [_polymorphic_value]}, "requires": { "oneOf": [types.NONEMPTY_STRING, types.UNIQUE_STRING_LIST] } } } _schema = utils.merge_dicts( copy.deepcopy(TaskSpec._schema), _reverse_workflow_schema ) def __init__(self, data, validate): super(ReverseWorkflowTaskSpec, self).__init__(data, validate) self._requires = data.get('requires', []) def get_requires(self): if isinstance(self._requires, six.string_types): return [self._requires] return self._requires class TaskSpecList(base.BaseSpecList): item_class = TaskSpec ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/workbook.py0000644000175000017500000000473400000000000021255 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.lang import types from mistral.lang.v2 import actions as act from mistral.lang.v2 import base from mistral.lang.v2 import workflows as wf # We want to match any single word that isn't exactly "version" NON_VERSION_WORD_REGEX = r"^(?!version$)[\w-]+$" class WorkbookSpec(base.BaseSpec): # See http://json-schema.org _schema = { "type": "object", "properties": { "version": {"enum": ["2.0", 2.0]}, "actions": { "type": "object", "minProperties": 1, "patternProperties": { "^version$": {"enum": ["2.0", 2.0]}, NON_VERSION_WORD_REGEX: types.ANY }, "additionalProperties": False }, "workflows": { "type": "object", "minProperties": 1, "patternProperties": { "^version$": {"enum": ["2.0", 2.0]}, NON_VERSION_WORD_REGEX: types.ANY }, "additionalProperties": False } }, "additionalProperties": False } def __init__(self, data, validate): super(WorkbookSpec, self).__init__(data, validate) self._inject_version(['actions', 'workflows']) self._name = data['name'] self._description = data.get('description') self._tags = data.get('tags', []) self._actions = self._spec_property('actions', act.ActionSpecList) self._workflows = self._spec_property('workflows', wf.WorkflowSpecList) def get_name(self): return self._name def get_description(self): return self._description def get_tags(self): return self._tags def get_actions(self): return self._actions def get_workflows(self): return self._workflows ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/lang/v2/workflows.py0000644000175000017500000002702200000000000021450 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils from osprofiler import profiler import six from mistral import exceptions as exc from mistral.lang import types from mistral.lang.v2 import base from mistral.lang.v2 import task_defaults from mistral.lang.v2 import tasks from mistral_lib import utils NOOP_COMMAND = 'noop' FAIL_COMMAND = 'fail' SUCCEED_COMMAND = 'succeed' PAUSE_COMMAND = 'pause' ENGINE_COMMANDS = [ NOOP_COMMAND, FAIL_COMMAND, SUCCEED_COMMAND, PAUSE_COMMAND ] class WorkflowSpec(base.BaseSpec): # See http://json-schema.org _polymorphic_key = ('type', 'direct') _meta_schema = { "type": "object", "properties": { "type": types.WORKFLOW_TYPE, "task-defaults": types.NONEMPTY_DICT, "input": types.UNIQUE_STRING_OR_ONE_KEY_DICT_LIST, "output": types.NONEMPTY_DICT, "output-on-error": types.NONEMPTY_DICT, "vars": types.NONEMPTY_DICT, "tags": types.UNIQUE_STRING_LIST }, "required": ["tasks"], "additionalProperties": False } def __init__(self, data, validate): super(WorkflowSpec, self).__init__(data, validate) self._name = data['name'] self._description = data.get('description') self._tags = data.get('tags', []) self._type = data['type'] if 'type' in data else 'direct' self._input = utils.get_dict_from_entries(data.get('input', [])) self._output = data.get('output', {}) self._output_on_error = data.get('output-on-error', {}) self._vars = data.get('vars', {}) self._task_defaults = self._spec_property( 'task-defaults', task_defaults.TaskDefaultsSpec ) # Inject 'type' here, so instantiate_spec function can recognize the # specific subclass of TaskSpec. for task in six.itervalues(self._data.get('tasks')): task['type'] = self._type self._tasks = self._spec_property('tasks', tasks.TaskSpecList) def validate_schema(self): super(WorkflowSpec, self).validate_schema() if not self._data.get('tasks'): raise exc.InvalidModelException( "Workflow doesn't have any tasks [data=%s]" % self._data ) # Validate expressions. self.validate_expr(self._data.get('output', {})) self.validate_expr(self._data.get('vars', {})) def validate_semantics(self): super(WorkflowSpec, self).validate_semantics() # Distinguish workflow name from workflow UUID. if uuidutils.is_uuid_like(self._name): raise exc.InvalidModelException( "Workflow name cannot be in the format of UUID." ) def _validate_task_link(self, task_name, allow_engine_cmds=True): valid_task = self._task_exists(task_name) if allow_engine_cmds: valid_task |= task_name in ENGINE_COMMANDS if not valid_task: raise exc.InvalidModelException( "Task '%s' not found." % task_name ) def _task_exists(self, task_name): return self.get_tasks()[task_name] is not None def get_name(self): return self._name def get_description(self): return self._description def get_tags(self): return self._tags def get_type(self): return self._type def get_input(self): return self._input def get_output(self): return self._output def get_output_on_error(self): return self._output_on_error def get_vars(self): return self._vars def get_task_defaults(self): return self._task_defaults def get_tasks(self): return self._tasks def get_task(self, name): return self._tasks[name] class DirectWorkflowSpec(WorkflowSpec): _polymorphic_value = 'direct' _schema = { "properties": { "tasks": { "type": "object", "minProperties": 1, "patternProperties": { r"^\w+$": types.NONEMPTY_DICT } }, } } def __init__(self, data, validate): super(DirectWorkflowSpec, self).__init__(data, validate) # Init simple dictionary based caches for inbound and # outbound task specifications. In fact, we don't need # any special cache implementations here because these # structures can't grow indefinitely. self.inbound_tasks_cache = {} self.outbound_tasks_cache = {} @profiler.trace('direct-wf-spec-validate-semantics', hide_args=True) def validate_semantics(self): super(DirectWorkflowSpec, self).validate_semantics() # Check if there are start tasks. if not self.find_start_tasks(): raise exc.DSLParsingException( 'Failed to find start tasks in direct workflow. ' 'There must be at least one task without inbound transition.' '[workflow_name=%s]' % self._name ) self._check_workflow_integrity() self._check_join_tasks() @profiler.trace('direct-wf-spec-check-workflow-integrity', hide_args=True) def _check_workflow_integrity(self): for t_s in self.get_tasks(): out_task_names = self.find_outbound_task_names(t_s.get_name()) for out_t_name in out_task_names: self._validate_task_link(out_t_name) def _check_join_tasks(self): join_tasks = [t for t in self.get_tasks() if t.get_join()] err_msgs = [] for join_t in join_tasks: t_name = join_t.get_name() join_val = join_t.get_join() in_tasks = self.find_inbound_task_specs(join_t) if join_val == 'all': if len(in_tasks) == 0: err_msgs.append( "No inbound tasks for task with 'join: all'" " [task_name=%s]" % t_name ) continue if join_val == 'one': join_val = 1 if len(in_tasks) < join_val: err_msgs.append( "Not enough inbound tasks for task with 'join'" " [task_name=%s, join=%s, inbound_tasks=%s]" % (t_name, join_val, len(in_tasks)) ) if len(err_msgs) > 0: raise exc.InvalidModelException('\n'.join(err_msgs)) def find_start_tasks(self): return [ t_s for t_s in self.get_tasks() if not self.has_inbound_transitions(t_s) ] def find_inbound_task_specs(self, task_spec): task_name = task_spec.get_name() specs = self.inbound_tasks_cache.get(task_name) if specs is not None: return specs specs = [ t_s for t_s in self.get_tasks() if self.transition_exists(t_s.get_name(), task_name) ] self.inbound_tasks_cache[task_name] = specs return specs def find_outbound_task_specs(self, task_spec): task_name = task_spec.get_name() specs = self.outbound_tasks_cache.get(task_name) if specs is not None: return specs specs = [ t_s for t_s in self.get_tasks() if self.transition_exists(task_name, t_s.get_name()) ] self.outbound_tasks_cache[task_name] = specs return specs def has_inbound_transitions(self, task_spec): return len(self.find_inbound_task_specs(task_spec)) > 0 def has_outbound_transitions(self, task_spec): return len(self.find_outbound_task_specs(task_spec)) > 0 def find_outbound_task_names(self, task_name): t_names = set() for tup in self.get_on_error_clause(task_name): t_names.add(tup[0]) for tup in self.get_on_success_clause(task_name): t_names.add(tup[0]) for tup in self.get_on_complete_clause(task_name): t_names.add(tup[0]) return t_names def transition_exists(self, from_task_name, to_task_name): t_names = self.find_outbound_task_names(from_task_name) return to_task_name in t_names def get_on_error_clause(self, t_name): result = [] on_clause = self.get_task(t_name).get_on_error() if on_clause: result = on_clause.get_next() if not result: t_defaults = self.get_task_defaults() if t_defaults and t_defaults.get_on_error(): result = self._remove_task_from_clause( t_defaults.get_on_error().get_next(), t_name ) return result def get_on_success_clause(self, t_name): result = [] on_clause = self.get_task(t_name).get_on_success() if on_clause: result = on_clause.get_next() if not result: t_defaults = self.get_task_defaults() if t_defaults and t_defaults.get_on_success(): result = self._remove_task_from_clause( t_defaults.get_on_success().get_next(), t_name ) return result def get_on_complete_clause(self, t_name): result = [] on_clause = self.get_task(t_name).get_on_complete() if on_clause: result = on_clause.get_next() if not result: t_defaults = self.get_task_defaults() if t_defaults and t_defaults.get_on_complete(): result = self._remove_task_from_clause( t_defaults.get_on_complete().get_next(), t_name ) return result @staticmethod def _remove_task_from_clause(on_clause, t_name): return list([tup for tup in on_clause if tup[0] != t_name]) class ReverseWorkflowSpec(WorkflowSpec): _polymorphic_value = 'reverse' _schema = { "properties": { "tasks": { "type": "object", "minProperties": 1, "patternProperties": { r"^\w+$": types.NONEMPTY_DICT } }, } } def validate_semantics(self): super(ReverseWorkflowSpec, self).validate_semantics() self._check_workflow_integrity() def _check_workflow_integrity(self): for t_s in self.get_tasks(): for req in self.get_task_requires(t_s): self._validate_task_link(req, allow_engine_cmds=False) def get_task_requires(self, task_spec): requires = set(task_spec.get_requires()) defaults = self.get_task_defaults() if defaults: requires |= set(defaults.get_requires()) requires.discard(task_spec.get_name()) return list(requires) class WorkflowSpecList(base.BaseSpecList): item_class = WorkflowSpec class WorkflowListSpec(base.BaseListSpec): item_class = WorkflowSpec def get_workflows(self): return self.get_items() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/messaging.py0000644000175000017500000000736700000000000020132 0ustar00coreycorey00000000000000# Copyright 2016 - IBM Corp. # Copyright 2016 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains common structures and functions that help to handle AMQP messages based on oslo.messaging framework. """ import abc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_messaging.notify import dispatcher from oslo_messaging.notify import listener from oslo_messaging import target from oslo_messaging import transport from oslo_utils import timeutils import six LOG = logging.getLogger(__name__) CONF = cfg.CONF def handle_event(self, ctxt, publisher_id, event_type, payload, metadata): """Callback function of each priority of notification messages. The function is used to construct endpoint class dynamically when starting listener in event engine service. After the class is created, 'self' param will make sense. :param ctxt: the notification context dict :param publisher_id: always describes where notification is sent from, for example: 'compute.host1' :param event_type: describes the event, for example: 'compute.create_instance' :param payload: the notification payload :param metadata: the notification metadata, is always a mapping containing a unique message_id and a timestamp. """ LOG.debug('Received notification. publisher_id: %s, event_type: %s, ' 'payload: %s, metadata: %s.', publisher_id, event_type, payload, metadata) notification = { 'event_type': event_type, 'payload': payload, 'publisher': publisher_id, 'timestamp': metadata.get('timestamp', ctxt.get('timestamp', timeutils.utcnow())), 'context': ctxt } self.event_engine.process_notification_event(notification) return dispatcher.NotificationResult.HANDLED @six.add_metaclass(abc.ABCMeta) class NotificationEndpoint(object): """Message listener endpoint. Only handle notifications that match the NotificationFilter rule set into the filter_rule attribute of the endpoint. """ event_types = [] def __init__(self, event_engine): self.event_engine = event_engine self.filter_rule = oslo_messaging.NotificationFilter( event_type='|'.join(self.event_types)) def get_pool_name(exchange): """Get pool name. Get the pool name for the listener, it will be formatted as 'mistral-exchange-hostname' :param exchange: exchange name """ pool_host = CONF.event_engine.listener_pool_name pool_name = 'mistral-%s-%s' % (exchange, pool_host) LOG.debug("Listener pool name is %s", pool_name) return pool_name def start_listener(conf, exchange, topic, endpoints): """Starts up a notification listener.""" trans = transport.get_transport(conf) targets = [target.Target(exchange=exchange, topic=topic)] pool_name = get_pool_name(exchange) notification_listener = listener.get_notification_listener( trans, targets, endpoints, executor='threading', allow_requeue=False, pool=pool_name ) notification_listener.start() return notification_listener ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1215675 mistral-10.0.0.0b3/mistral/notifiers/0000755000175000017500000000000000000000000017570 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/__init__.py0000644000175000017500000000000000000000000021667 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/base.py0000644000175000017500000000407000000000000021055 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from oslo_log import log as logging from stevedore import driver LOG = logging.getLogger(__name__) _NOTIFIERS = {} _NOTIFICATION_PUBLISHERS = {} def cleanup(): global _NOTIFIERS global _NOTIFICATION_PUBLISHERS _NOTIFIERS = {} _NOTIFICATION_PUBLISHERS = {} def get_notifier(notifier_name): global _NOTIFIERS if not _NOTIFIERS.get(notifier_name): mgr = driver.DriverManager( 'mistral.notifiers', notifier_name, invoke_on_load=True ) _NOTIFIERS[notifier_name] = mgr.driver return _NOTIFIERS[notifier_name] def get_notification_publisher(publisher_name): global _NOTIFICATION_PUBLISHERS if not _NOTIFICATION_PUBLISHERS.get(publisher_name): mgr = driver.DriverManager( 'mistral.notification.publishers', publisher_name, invoke_on_load=True ) _NOTIFICATION_PUBLISHERS[publisher_name] = mgr.driver return _NOTIFICATION_PUBLISHERS[publisher_name] @six.add_metaclass(abc.ABCMeta) class Notifier(object): """Notifier interface.""" @abc.abstractmethod def notify(self, ex_id, data, event, timestamp, **kwargs): raise NotImplementedError() @six.add_metaclass(abc.ABCMeta) class NotificationPublisher(object): """Notifier plugin interface.""" @abc.abstractmethod def publish(self, ctx, ex_id, data, event, timestamp, **kwargs): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/default_notifier.py0000644000175000017500000000313000000000000023462 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from oslo_log import log as logging from mistral import context as auth_ctx from mistral.notifiers import base LOG = logging.getLogger(__name__) class DefaultNotifier(base.Notifier): """Local notifier that process notification request.""" def notify(self, ex_id, data, event, timestamp, publishers): ctx = auth_ctx.ctx() data['event'] = event for entry in publishers: params = copy.deepcopy(entry) publisher_name = params.pop('type', None) if not publisher_name: LOG.error('Notification publisher type is not specified.') continue try: publisher = base.get_notification_publisher(publisher_name) publisher.publish(ctx, ex_id, data, event, timestamp, **params) except Exception: LOG.exception( 'Unable to process event for publisher "%s".', publisher_name ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/notification_events.py0000644000175000017500000000436100000000000024220 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.workflow import states WORKFLOW_LAUNCHED = 'WORKFLOW_LAUNCHED' WORKFLOW_SUCCEEDED = 'WORKFLOW_SUCCEEDED' WORKFLOW_FAILED = 'WORKFLOW_FAILED' WORKFLOW_CANCELLED = 'WORKFLOW_CANCELLED' WORKFLOW_PAUSED = 'WORKFLOW_PAUSED' WORKFLOW_RESUMED = 'WORKFLOW_RESUMED' WORKFLOW_RERUN = 'WORKFLOW_RERUN' WORKFLOWS = [ WORKFLOW_LAUNCHED, WORKFLOW_SUCCEEDED, WORKFLOW_FAILED, WORKFLOW_CANCELLED, WORKFLOW_PAUSED, WORKFLOW_RESUMED, WORKFLOW_RERUN ] TASK_LAUNCHED = 'TASK_LAUNCHED' TASK_SUCCEEDED = 'TASK_SUCCEEDED' TASK_FAILED = 'TASK_FAILED' TASK_CANCELLED = 'TASK_CANCELLED' TASK_PAUSED = 'TASK_PAUSED' TASK_RESUMED = 'TASK_RESUMED' TASK_RERUN = 'TASK_RERUN' TASKS = [ TASK_LAUNCHED, TASK_SUCCEEDED, TASK_FAILED, TASK_CANCELLED, TASK_PAUSED, TASK_RESUMED, TASK_RERUN ] EVENTS = WORKFLOWS + TASKS TASK_STATE_TRANSITION_MAP = { states.RUNNING: { 'ANY': TASK_LAUNCHED, 'IDLE': TASK_RESUMED, 'PAUSED': TASK_RESUMED, 'WAITING': TASK_RESUMED, 'ERROR': TASK_RERUN }, states.SUCCESS: {'ANY': TASK_SUCCEEDED}, states.ERROR: {'ANY': TASK_FAILED}, states.CANCELLED: {'ANY': TASK_CANCELLED}, states.PAUSED: {'ANY': TASK_PAUSED} } def identify_task_event(old_task_state, new_task_state): event_options = ( TASK_STATE_TRANSITION_MAP[new_task_state] if new_task_state in TASK_STATE_TRANSITION_MAP else {} ) if not event_options: return None event = ( event_options[old_task_state] if old_task_state and old_task_state in event_options else event_options['ANY'] ) return event ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/notification_server.py0000644000175000017500000000561600000000000024226 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from mistral import config as cfg from mistral.notifiers import default_notifier as notif from mistral.rpc import base as rpc from mistral.service import base as service_base from mistral.utils import profiler as profiler_utils from mistral_lib import utils LOG = logging.getLogger(__name__) class NotificationServer(service_base.MistralService): def __init__(self, notifier, setup_profiler=True): super(NotificationServer, self).__init__( 'notifier_group', setup_profiler ) self.notifier = notifier self._rpc_server = None def start(self): super(NotificationServer, self).start() if self._setup_profiler: profiler_utils.setup('mistral-notifier', cfg.CONF.notifier.host) # Initialize and start RPC server. self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.notifier) self._rpc_server.register_endpoint(self) self._rpc_server.run(executor='threading') self._notify_started('Notification server started.') def stop(self, graceful=False): super(NotificationServer, self).stop(graceful) if self._rpc_server: self._rpc_server.stop(graceful) def notify(self, rpc_ctx, ex_id, data, event, timestamp, publishers): """Receives calls over RPC to notify on notification server. :param rpc_ctx: RPC request context dictionary. :param ex_id: Workflow, task, or action execution id. :param data: Dictionary to include in the notification message. :param event: Event being notified on. :param timestamp: Datetime when this event occurred. :param publishers: The list of publishers to send the notification. """ LOG.info( "Received RPC request 'notify'[ex_id=%s, event=%s, " "timestamp=%s, data=%s, publishers=%s]", ex_id, event, timestamp, data, utils.cut(publishers) ) self.notifier.notify( ex_id, data, event, timestamp, publishers ) def get_oslo_service(setup_profiler=True): return NotificationServer( notif.DefaultNotifier(), setup_profiler=setup_profiler ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1215675 mistral-10.0.0.0b3/mistral/notifiers/publishers/0000755000175000017500000000000000000000000021750 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/publishers/__init__.py0000644000175000017500000000000000000000000024047 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/publishers/noop.py0000644000175000017500000000203500000000000023275 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from mistral.notifiers import base LOG = logging.getLogger(__name__) class NoopPublisher(base.NotificationPublisher): def publish(self, ctx, ex_id, data, event, timestamp, **kwargs): LOG.info( 'The event %s for [name=%s, id=%s] is published by the ' 'noop notification publisher.', event, data.get('name'), ex_id ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/publishers/webhook.py0000644000175000017500000000234200000000000023761 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import requests from six.moves import http_client from oslo_log import log as logging from mistral.notifiers import base LOG = logging.getLogger(__name__) class WebhookPublisher(base.NotificationPublisher): def publish(self, ctx, ex_id, data, event, timestamp, **kwargs): url = kwargs.get('url') headers = kwargs.get('headers', {}) resp = requests.post(url, data=json.dumps(data), headers=headers) LOG.info("Webook request url=%s code=%s", url, resp.status_code) if resp.status_code not in [http_client.OK, http_client.CREATED]: raise Exception(resp.text) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/notifiers/remote_notifier.py0000644000175000017500000000206400000000000023336 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from mistral.rpc import base as rpc_base from mistral.rpc import clients as rpc_clients LOG = logging.getLogger(__name__) class RemoteNotifier(rpc_clients.NotifierClient): """Notifier that passes notification request to a remote notifier.""" def __init__(self): self.topic = cfg.CONF.notifier.topic self._client = rpc_base.get_rpc_client_driver()(cfg.CONF.notifier) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/policies/0000755000175000017500000000000000000000000017375 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/__init__.py0000644000175000017500000000275300000000000021515 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from mistral.policies import action from mistral.policies import action_executions from mistral.policies import base from mistral.policies import cron_trigger from mistral.policies import environment from mistral.policies import event_trigger from mistral.policies import execution from mistral.policies import member from mistral.policies import service from mistral.policies import task from mistral.policies import workbook from mistral.policies import workflow def list_rules(): return itertools.chain( action.list_rules(), action_executions.list_rules(), base.list_rules(), cron_trigger.list_rules(), environment.list_rules(), event_trigger.list_rules(), execution.list_rules(), member.list_rules(), service.list_rules(), task.list_rules(), workbook.list_rules(), workflow.list_rules() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/action.py0000644000175000017500000000510600000000000021226 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base ACTIONS = 'actions:%s' rules = [ policy.DocumentedRuleDefault( name=ACTIONS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Create a new action.', operations=[ { 'path': '/v2/actions', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=ACTIONS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete the named action.', operations=[ { 'path': '/v2/actions', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=ACTIONS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Return the named action.', operations=[ { 'path': '/v2/actions/{action_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTIONS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all actions.', operations=[ { 'path': '/v2/actions', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTIONS % 'publicize', check_str=base.RULE_ADMIN_OR_OWNER, description='Make an action publicly available', operations=[ { 'path': '/v2/actions', 'method': 'POST' }, { 'path': '/v2/actions', 'method': 'PUT' } ] ), policy.DocumentedRuleDefault( name=ACTIONS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Update one or more actions.', operations=[ { 'path': '/v2/actions', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/action_executions.py0000644000175000017500000000456100000000000023500 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base ACTION_EXECUTIONS = 'action_executions:%s' rules = [ policy.DocumentedRuleDefault( name=ACTION_EXECUTIONS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Create new action execution.', operations=[ { 'path': '/v2/action_executions', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=ACTION_EXECUTIONS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete the specified action execution.', operations=[ { 'path': '/v2/action_executions', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=ACTION_EXECUTIONS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Return the specified action execution.', operations=[ { 'path': '/v2/action_executions/{action_execution_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_EXECUTIONS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all tasks within the execution.', operations=[ { 'path': '/v2/action_executions', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_EXECUTIONS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Update the specified action execution.', operations=[ { 'path': '/v2/action_executions', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/base.py0000644000175000017500000000165200000000000020665 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_ONLY = 'rule:admin_only' rules = [ policy.RuleDefault( "admin_only", "is_admin:True"), policy.RuleDefault( "admin_or_owner", "is_admin:True or project_id:%(project_id)s") ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/cron_trigger.py0000644000175000017500000000444000000000000022435 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base CRON_TRIGGERS = 'cron_triggers:%s' rules = [ policy.DocumentedRuleDefault( name=CRON_TRIGGERS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Creates a new cron trigger.', operations=[ { 'path': '/v2/cron_triggers', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CRON_TRIGGERS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete cron trigger.', operations=[ { 'path': '/v2/cron_triggers', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CRON_TRIGGERS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Returns the named cron trigger.', operations=[ { 'path': '/v2/cron_triggers/{cron_trigger_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CRON_TRIGGERS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all cron triggers.', operations=[ { 'path': '/v2/cron_triggers', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CRON_TRIGGERS % 'list:all_projects', check_str=base.RULE_ADMIN_ONLY, description='Return all cron triggers of all projects.', operations=[ { 'path': '/v2/cron_triggers', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/environment.py0000644000175000017500000000442200000000000022315 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base ENVIRONMENTS = 'environments:%s' rules = [ policy.DocumentedRuleDefault( name=ENVIRONMENTS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Create a new environment.', operations=[ { 'path': '/v2/environments', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=ENVIRONMENTS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete the named environment.', operations=[ { 'path': '/v2/environments/{environment_name}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=ENVIRONMENTS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Return the named environment.', operations=[ { 'path': '/v2/environments/{environment_name}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ENVIRONMENTS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all environments.', operations=[ { 'path': '/v2/environments', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ENVIRONMENTS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Update an environment.', operations=[ { 'path': '/v2/environments', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/event_trigger.py0000644000175000017500000000607200000000000022620 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base EVENT_TRIGGERS = 'event_triggers:%s' # NOTE(hieulq): all API operations of below rules are not documented in API # reference docs yet. rules = [ policy.DocumentedRuleDefault( name=EVENT_TRIGGERS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Create a new event trigger.', operations=[ { 'path': '/v2/event_triggers', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=EVENT_TRIGGERS % 'create:public', check_str=base.RULE_ADMIN_ONLY, description='Create a new event trigger for public usage.', operations=[ { 'path': '/v2/event_triggers', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=EVENT_TRIGGERS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete event trigger.', operations=[ { 'path': '/v2/event_triggers/{event_trigger_id}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=EVENT_TRIGGERS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Returns the specified event trigger.', operations=[ { 'path': '/v2/event_triggers/{event_trigger_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=EVENT_TRIGGERS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all event triggers.', operations=[ { 'path': '/v2/event_triggers', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=EVENT_TRIGGERS % 'list:all_projects', check_str=base.RULE_ADMIN_ONLY, description='Return all event triggers from all projects.', operations=[ { 'path': '/v2/event_triggers', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=EVENT_TRIGGERS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Updates an existing event trigger.', operations=[ { 'path': '/v2/event_triggers', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/execution.py0000644000175000017500000000506500000000000021760 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base EXECUTIONS = 'executions:%s' rules = [ policy.DocumentedRuleDefault( name=EXECUTIONS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Create a new execution.', operations=[ { 'path': '/v2/executions', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=EXECUTIONS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete the specified execution.', operations=[ { 'path': '/v2/executions/{execution_id}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=EXECUTIONS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Return the specified execution.', operations=[ { 'path': '/v2/executions/{execution_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=EXECUTIONS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all executions.', operations=[ { 'path': '/v2/executions', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=EXECUTIONS % 'list:all_projects', check_str=base.RULE_ADMIN_ONLY, description='Return all executions from all projects.', operations=[ { 'path': '/v2/executions', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=EXECUTIONS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Update an execution.', operations=[ { 'path': '/v2/executions', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/member.py0000644000175000017500000000461200000000000021221 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base MEMBERS = 'members:%s' # NOTE(hieulq): all API operations of below rules are not documented in API # reference docs yet. rules = [ policy.DocumentedRuleDefault( name=MEMBERS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Shares the resource to a new member.', operations=[ { 'path': '/v2/members', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=MEMBERS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Deletes a member from the member list of a resource.', operations=[ { 'path': '/v2/members', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=MEMBERS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Shows resource member details.', operations=[ { 'path': '/v2/members/{member_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=MEMBERS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all members with whom the resource has been ' 'shared.', operations=[ { 'path': '/v2/members', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=MEMBERS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Sets the status for a resource member.', operations=[ { 'path': '/v2/members', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/service.py0000644000175000017500000000201600000000000021406 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base SERVICES = 'services:%s' rules = [ policy.DocumentedRuleDefault( name=SERVICES % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all Mistral services.', operations=[ { 'path': '/v2/services', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/task.py0000644000175000017500000000312400000000000020711 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base TASKS = 'tasks:%s' rules = [ policy.DocumentedRuleDefault( name=TASKS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Return the specified task.', operations=[ { 'path': '/v2/tasks/{task_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=TASKS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all tasks.', operations=[ { 'path': '/v2/tasks', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=TASKS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Update the specified task execution.', operations=[ { 'path': '/v2/tasks', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/workbook.py0000644000175000017500000000431100000000000021603 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base WORKBOOKS = 'workbooks:%s' rules = [ policy.DocumentedRuleDefault( name=WORKBOOKS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Create a new workbook.', operations=[ { 'path': '/v2/workbooks', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=WORKBOOKS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete the named workbook.', operations=[ { 'path': '/v2/workbooks', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=WORKBOOKS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Return the named workbook.', operations=[ { 'path': '/v2/workbooks/{workbook_name}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=WORKBOOKS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return all workbooks.', operations=[ { 'path': '/v2/workbooks', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=WORKBOOKS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Update an workbook.', operations=[ { 'path': '/v2/workbooks', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/policies/workflow.py0000644000175000017500000000566700000000000021637 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from mistral.policies import base WORKFLOWS = 'workflows:%s' rules = [ policy.DocumentedRuleDefault( name=WORKFLOWS % 'create', check_str=base.RULE_ADMIN_OR_OWNER, description='Create a new workflow.', operations=[ { 'path': '/v2/workflows', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=WORKFLOWS % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, description='Delete a workflow.', operations=[ { 'path': '/v2/workflows', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=WORKFLOWS % 'get', check_str=base.RULE_ADMIN_OR_OWNER, description='Return the named workflow.', operations=[ { 'path': '/v2/workflows/{workflow_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=WORKFLOWS % 'list', check_str=base.RULE_ADMIN_OR_OWNER, description='Return a list of workflows.', operations=[ { 'path': '/v2/workflows', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=WORKFLOWS % 'list:all_projects', check_str=base.RULE_ADMIN_ONLY, description='Return a list of workflows from all projects.', operations=[ { 'path': '/v2/workflows', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=WORKFLOWS % 'publicize', check_str=base.RULE_ADMIN_OR_OWNER, description='Make a workflow publicly available', operations=[ { 'path': '/v2/workflows', 'method': 'POST' }, { 'path': '/v2/workflows', 'method': 'PUT' } ] ), policy.DocumentedRuleDefault( name=WORKFLOWS % 'update', check_str=base.RULE_ADMIN_OR_OWNER, description='Update one or more workflows.', operations=[ { 'path': '/v2/workflows', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0735664 mistral-10.0.0.0b3/mistral/resources/0000755000175000017500000000000000000000000017600 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/resources/actions/0000755000175000017500000000000000000000000021240 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/resources/actions/wait_ssh.yaml0000644000175000017500000000041600000000000023746 0ustar00coreycorey00000000000000--- version: '2.0' std.wait_ssh: description: Simple SSH command. base: std.ssh base-input: host: <% $.host %> username: <% $.username %> password: <% $.password %> cmd: 'ls -l' input: - host - username - password ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/rpc/0000755000175000017500000000000000000000000016352 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/__init__.py0000644000175000017500000000000000000000000020451 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/base.py0000644000175000017500000001364100000000000017643 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from functools import wraps from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.exceptions import MessagingTimeout from oslo_messaging.rpc import client from stevedore import driver from mistral import exceptions as exc LOG = logging.getLogger(__name__) _IMPL_CLIENT = None _IMPL_SERVER = None _TRANSPORT = None def cleanup(): """Intended to be used by tests to recreate all RPC related objects.""" global _TRANSPORT _TRANSPORT = None # TODO(rakhmerov): This method seems misplaced. Now we have different kind # of transports (oslo, kombu) and this module should not have any oslo # specific things anymore. def get_transport(): global _TRANSPORT if not _TRANSPORT: _TRANSPORT = messaging.get_rpc_transport(cfg.CONF) return _TRANSPORT def get_rpc_server_driver(): rpc_impl = cfg.CONF.rpc_implementation global _IMPL_SERVER if not _IMPL_SERVER: _IMPL_SERVER = driver.DriverManager( 'mistral.rpc.backends', '%s_server' % rpc_impl ).driver return _IMPL_SERVER def get_rpc_client_driver(): rpc_impl = cfg.CONF.rpc_implementation global _IMPL_CLIENT if not _IMPL_CLIENT: _IMPL_CLIENT = driver.DriverManager( 'mistral.rpc.backends', '%s_client' % rpc_impl ).driver return _IMPL_CLIENT def _wrap_exception_and_reraise(exception): message = "%s: %s" % (exception.__class__.__name__, exception.args[0]) raise exc.MistralException(message) def wrap_messaging_exception(method): """The decorator unwraps a remote error into one of the mistral exceptions. oslo.messaging has different behavior on raising exceptions depending on whether we use 'fake' or 'rabbit' transports. In case of 'rabbit' transport it raises an instance of RemoteError which forwards directly to the API. The RemoteError instance contains one of the MistralException instances raised remotely on the RPC server side and for correct exception handling we need to unwrap and raise the original wrapped exception. """ @wraps(method) def decorator(*args, **kwargs): try: return method(*args, **kwargs) except exc.MistralException: raise except MessagingTimeout: timeout = cfg.CONF.rpc_response_timeout raise exc.MistralException('This rpc call "%s" took longer than ' 'configured %s seconds.' % (method.__name__, timeout)) except (client.RemoteError, exc.KombuException, Exception) as e: # Since we're going to transform the original exception # we need to log it as is. LOG.exception( "Caught a messaging remote error." " See details of the original exception." ) if hasattr(e, 'exc_type') and hasattr(exc, e.exc_type): exc_cls = getattr(exc, e.exc_type) raise exc_cls(e.value) _wrap_exception_and_reraise(e) return decorator class RPCClient(object): def __init__(self, conf): """Base class for RPCClient's drivers RPC Client is responsible for sending requests to RPC Server. All RPC client drivers have to inherit from this class. :param conf: Additional config provided by upper layer. """ self.conf = conf @abc.abstractmethod def sync_call(self, ctx, method, target=None, **kwargs): """Synchronous call of RPC method. Blocks the thread and wait for method result. """ raise NotImplementedError @abc.abstractmethod def async_call(self, ctx, method, target=None, fanout=False, **kwargs): """Asynchronous call of RPC method. Does not block the thread, just send invoking data to the RPC server and immediately returns nothing. """ raise NotImplementedError class RPCServer(object): def __init__(self, conf): """Base class for RPCServer's drivers RPC Server should listen for request coming from RPC Clients and respond to them respectively to the registered endpoints. All RPC server drivers have to inherit from this class. :param conf: Additional config provided by upper layer. """ self.conf = conf @abc.abstractmethod def register_endpoint(self, endpoint): """Registers a new RPC endpoint. :param endpoint: an object containing methods which will be used as RPC methods. """ raise NotImplementedError @abc.abstractmethod def run(self, executor='eventlet'): """Runs the RPC server. :param executor: Executor used to process incoming requests. Different implementations may support different options. """ raise NotImplementedError def stop(self, graceful=False): """Stop the RPC server. :param graceful: True if this method call should wait till all internal threads are finished. :return: """ # No-op by default. pass def wait(self): """Wait till all internal threads are finished.""" # No-op by default. pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/clients.py0000644000175000017500000003520600000000000020373 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2017 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from osprofiler import profiler import threading from mistral import context as auth_ctx from mistral.engine import base as eng from mistral.event_engine import base as evt_eng from mistral.executors import base as exe from mistral.notifiers import base as notif from mistral.rpc import base LOG = logging.getLogger(__name__) _ENGINE_CLIENT = None _ENGINE_CLIENT_LOCK = threading.Lock() _EXECUTOR_CLIENT = None _EXECUTOR_CLIENT_LOCK = threading.Lock() _EVENT_ENGINE_CLIENT = None _EVENT_ENGINE_CLIENT_LOCK = threading.Lock() _NOTIFIER_CLIENT = None _NOTIFIER_CLIENT_LOCK = threading.Lock() def cleanup(): """Clean all the RPC clients. Intended to be used by tests to recreate all RPC related objects. Another usage is forking a child API process. In this case we must recreate all RPC objects so that they function properly. """ global _ENGINE_CLIENT global _EXECUTOR_CLIENT global _EVENT_ENGINE_CLIENT global _NOTIFIER_CLIENT _ENGINE_CLIENT = None _EXECUTOR_CLIENT = None _EVENT_ENGINE_CLIENT = None _NOTIFIER_CLIENT = None base.cleanup() def get_engine_client(): global _ENGINE_CLIENT global _ENGINE_CLIENT_LOCK with _ENGINE_CLIENT_LOCK: if not _ENGINE_CLIENT: _ENGINE_CLIENT = EngineClient(cfg.CONF.engine) return _ENGINE_CLIENT def get_executor_client(): global _EXECUTOR_CLIENT global _EXECUTOR_CLIENT_LOCK with _EXECUTOR_CLIENT_LOCK: if not _EXECUTOR_CLIENT: _EXECUTOR_CLIENT = ExecutorClient(cfg.CONF.executor) return _EXECUTOR_CLIENT def get_event_engine_client(): global _EVENT_ENGINE_CLIENT global _EVENT_ENGINE_CLIENT_LOCK with _EVENT_ENGINE_CLIENT_LOCK: if not _EVENT_ENGINE_CLIENT: _EVENT_ENGINE_CLIENT = EventEngineClient(cfg.CONF.event_engine) return _EVENT_ENGINE_CLIENT def get_notifier_client(): global _NOTIFIER_CLIENT global _NOTIFIER_CLIENT_LOCK with _NOTIFIER_CLIENT_LOCK: if not _NOTIFIER_CLIENT: _NOTIFIER_CLIENT = NotifierClient(cfg.CONF.notifier) return _NOTIFIER_CLIENT class EngineClient(eng.Engine): """RPC Engine client.""" def __init__(self, rpc_conf_dict): """Constructs an RPC client for engine. :param rpc_conf_dict: Dict containing RPC configuration. """ self._client = base.get_rpc_client_driver()(rpc_conf_dict) @base.wrap_messaging_exception def start_workflow(self, wf_identifier, wf_namespace='', wf_ex_id=None, wf_input=None, description='', async_=False, **params): """Starts workflow sending a request to engine over RPC. :param wf_identifier: Workflow identifier. :param wf_namespace: Workflow namespace. :param wf_input: Workflow input data as a dictionary. :param wf_ex_id: Workflow execution id. If passed, it will be set in the new execution object. :param description: Execution description. :param async_: If True, start workflow in asynchronous mode (w/o waiting for completion). :param params: Additional workflow type specific parameters. :return: Workflow execution. """ call = self._client.async_call if async_ else self._client.sync_call return call( auth_ctx.ctx(), 'start_workflow', wf_identifier=wf_identifier, wf_namespace=wf_namespace, wf_ex_id=wf_ex_id, wf_input=wf_input or {}, description=description, params=params ) @base.wrap_messaging_exception def start_action(self, action_name, action_input, description=None, namespace='', **params): """Starts action sending a request to engine over RPC. :param action_name: Action name. :param action_input: Action input data as a dictionary. :param description: Execution description. :param namespace: The namespace of the action. :param params: Additional options for action running. :return: Action execution. """ return self._client.sync_call( auth_ctx.ctx(), 'start_action', action_name=action_name, action_input=action_input or {}, description=description, namespace=namespace, params=params ) @base.wrap_messaging_exception @profiler.trace('engine-client-on-action-complete', hide_args=True) def on_action_complete(self, action_ex_id, result, wf_action=False, async_=False): """Conveys action result to Mistral Engine. This method should be used by clients of Mistral Engine to update the state of an action execution once action has executed. One of the clients of this method is Mistral REST API server that receives action result from the outside action handlers. Note: calling this method serves an event notifying Mistral that it possibly needs to move the workflow on, i.e. run other workflow tasks for which all dependencies are satisfied. :param action_ex_id: Action execution id. :param result: Action execution result. :param wf_action: If True it means that the given id points to a workflow execution rather than action execution. It happens when a nested workflow execution sends its result to a parent workflow. :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :return: Action(or workflow if wf_action=True) execution object. """ call = self._client.async_call if async_ else self._client.sync_call return call( auth_ctx.ctx(), 'on_action_complete', action_ex_id=action_ex_id, result=result, wf_action=wf_action ) @base.wrap_messaging_exception @profiler.trace('engine-client-on-action-update', hide_args=True) def on_action_update(self, action_ex_id, state, wf_action=False, async_=False): """Conveys update of action state to Mistral Engine. This method should be used by clients of Mistral Engine to update the state of an action execution once action has executed. Note: calling this method serves an event notifying Mistral that it may need to change the state of the parent task and workflow. Use on_action_complete if the action execution reached completion state. :param action_ex_id: Action execution id. :param state: Updated state. :param wf_action: If True it means that the given id points to a workflow execution rather than action execution. It happens when a nested workflow execution sends its result to a parent workflow. :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :return: Action(or workflow if wf_action=True) execution object. """ call = self._client.async_call if async_ else self._client.sync_call return call( auth_ctx.ctx(), 'on_action_update', action_ex_id=action_ex_id, state=state, wf_action=wf_action ) @base.wrap_messaging_exception def pause_workflow(self, wf_ex_id): """Stops the workflow with the given execution id. :param wf_ex_id: Workflow execution id. :return: Workflow execution. """ return self._client.sync_call( auth_ctx.ctx(), 'pause_workflow', wf_ex_id=wf_ex_id ) @base.wrap_messaging_exception def rerun_workflow(self, task_ex_id, reset=True, env=None): """Rerun the workflow. This method reruns workflow with the given execution id at the specific task execution id. :param task_ex_id: Task execution id. :param reset: If true, then reset task execution state and purge action execution for the task. :param env: Environment variables to update. :return: Workflow execution. """ return self._client.sync_call( auth_ctx.ctx(), 'rerun_workflow', task_ex_id=task_ex_id, reset=reset, env=env ) @base.wrap_messaging_exception def resume_workflow(self, wf_ex_id, env=None): """Resumes the workflow with the given execution id. :param wf_ex_id: Workflow execution id. :param env: Environment variables to update. :return: Workflow execution. """ return self._client.sync_call( auth_ctx.ctx(), 'resume_workflow', wf_ex_id=wf_ex_id, env=env ) @base.wrap_messaging_exception def stop_workflow(self, wf_ex_id, state, message=None): """Stops workflow execution with given status. Once stopped, the workflow is complete with SUCCESS or ERROR, and can not be resumed. :param wf_ex_id: Workflow execution id :param state: State assigned to the workflow: SUCCESS or ERROR :param message: Optional information string :return: Workflow execution, model.Execution """ return self._client.sync_call( auth_ctx.ctx(), 'stop_workflow', wf_ex_id=wf_ex_id, state=state, message=message ) @base.wrap_messaging_exception def rollback_workflow(self, wf_ex_id): """Rolls back the workflow with the given execution id. :param wf_ex_id: Workflow execution id. :return: Workflow execution. """ return self._client.sync_call( auth_ctx.ctx(), 'rollback_workflow', wf_ex_id=wf_ex_id ) @base.wrap_messaging_exception def process_action_heartbeats(self, action_ex_ids): """Receives action execution heartbeats. :param action_ex_ids: Action execution ids. """ return self._client.async_call( auth_ctx.ctx(), 'report_running_actions', action_ex_ids=action_ex_ids ) class ExecutorClient(exe.Executor): """RPC Executor client.""" def __init__(self, rpc_conf_dict): """Constructs an RPC client for the Executor.""" self.topic = cfg.CONF.executor.topic self._client = base.get_rpc_client_driver()(rpc_conf_dict) @profiler.trace('executor-client-run-action') def run_action(self, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, redelivered=False, target=None, async_=True, timeout=None): """Sends a request to run action to executor. :param action_ex_id: Action execution id. :param action_cls_str: Action class name. :param action_cls_attrs: Action class attributes. :param params: Action input parameters. :param safe_rerun: If true, action would be re-run if executor dies during execution. :param execution_context: A dict of values providing information about the current execution. :param redelivered: Tells if given action was run before on another executor. :param target: Target (group of action executors). :param async_: If True, run action in asynchronous mode (w/o waiting for completion). :param timeout: a period of time in seconds after which execution of action will be interrupted :return: Action result. """ rpc_kwargs = { 'action_ex_id': action_ex_id, 'action_cls_str': action_cls_str, 'action_cls_attrs': action_cls_attrs, 'params': params, 'safe_rerun': safe_rerun, 'execution_context': execution_context, 'timeout': timeout } rpc_client_method = (self._client.async_call if async_ else self._client.sync_call) LOG.debug( "Sending an action to executor [action_ex_id=%s, action_cls=%s]", action_ex_id, action_cls_str ) return rpc_client_method(auth_ctx.ctx(), 'run_action', **rpc_kwargs) class EventEngineClient(evt_eng.EventEngine): """RPC EventEngine client.""" def __init__(self, rpc_conf_dict): """Constructs an RPC client for the EventEngine service.""" self._client = base.get_rpc_client_driver()(rpc_conf_dict) def create_event_trigger(self, trigger, events): return self._client.async_call( auth_ctx.ctx(), 'create_event_trigger', trigger=trigger, events=events, fanout=True, ) def delete_event_trigger(self, trigger, events): return self._client.async_call( auth_ctx.ctx(), 'delete_event_trigger', trigger=trigger, events=events, fanout=True, ) def update_event_trigger(self, trigger): return self._client.async_call( auth_ctx.ctx(), 'update_event_trigger', trigger=trigger, fanout=True, ) class NotifierClient(notif.Notifier): """RPC Notifier client.""" def __init__(self, rpc_conf_dict): """Constructs an RPC client for the Notifier service.""" self._client = base.get_rpc_client_driver()(rpc_conf_dict) def notify(self, ex_id, data, event, timestamp, publishers): try: return self._client.async_call( auth_ctx.ctx(), 'notify', ex_id=ex_id, data=data, event=event, timestamp=timestamp, publishers=publishers ) except Exception: LOG.exception('Unable to send notification.') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/rpc/kombu/0000755000175000017500000000000000000000000017467 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/__init__.py0000644000175000017500000000000000000000000021566 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/base.py0000644000175000017500000001250100000000000020752 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import kombu from mistral_lib import serialization as mistral_serialization import oslo_messaging as messaging from mistral import config as cfg from mistral import exceptions as exc IS_RECEIVED = 'kombu_rpc_is_received' RESULT = 'kombu_rpc_result' CORR_ID = 'kombu_rpc_correlation_id' TYPE = 'kombu_rpc_type' CONF = cfg.CONF def set_transport_options(check_backend=True): # We can be sure that all needed transport options are registered # only if we at least once called method get_transport(). Because # this is the method that registers them. messaging.get_transport(CONF) backend = messaging.TransportURL.parse(CONF, CONF.transport_url).transport if check_backend and backend not in ['rabbit', 'kombu']: raise exc.MistralException("Unsupported backend: %s" % backend) class Base(object): """Base class for Client and Server.""" def __init__(self): self.serializer = None @staticmethod def _make_connection(amqp_host, amqp_port, amqp_user, amqp_password, amqp_vhost): """Create connection. This method creates object representing the connection to RabbitMQ. :param amqp_host: Address of RabbitMQ server. :param amqp_user: Username for connecting to RabbitMQ. :param amqp_password: Password matching the given username. :param amqp_vhost: Virtual host to connect to. :param amqp_port: Port of RabbitMQ server. :return: New connection to RabbitMQ. """ return kombu.BrokerConnection( hostname=amqp_host, userid=amqp_user, password=amqp_password, virtual_host=amqp_vhost, port=amqp_port, transport_options={'confirm_publish': True} ) @staticmethod def _make_exchange(name, durable=False, auto_delete=True, exchange_type='topic'): """Make named exchange. This method creates object representing exchange on RabbitMQ. It would create a new exchange if exchange with given name don't exists. :param name: Name of the exchange. :param durable: If set to True, messages on this exchange would be store on disk - therefore can be retrieve after failure. :param auto_delete: If set to True, exchange would be automatically deleted when none is connected. :param exchange_type: Type of the exchange. Can be one of 'direct', 'topic', 'fanout', 'headers'. See Kombu docs for further details. :return: Kombu exchange object. """ return kombu.Exchange( name=name, type=exchange_type, durable=durable, auto_delete=auto_delete ) @staticmethod def _make_queue(name, exchange, routing_key='', durable=False, auto_delete=True, **kwargs): """Make named queue for a given exchange. This method creates object representing queue in RabbitMQ. It would create a new queue if queue with given name don't exists. :param name: Name of the queue :param exchange: Kombu Exchange object (can be created using _make_exchange). :param routing_key: Routing key for queue. It behaves differently depending on exchange type. See Kombu docs for further details. :param durable: If set to True, messages on this queue would be store on disk - therefore can be retrieve after failure. :param auto_delete: If set to True, queue would be automatically deleted when none is connected. :param kwargs: See kombu documentation for all parameters than may be may be passed to Queue. :return: Kombu Queue object. """ return kombu.Queue( name=name, routing_key=routing_key, exchange=exchange, durable=durable, auto_delete=auto_delete, **kwargs ) def _register_mistral_serialization(self): """Adds mistral serializer to available serializers in kombu.""" self.serializer = mistral_serialization.get_polymorphic_serializer() def _serialize_message(self, kwargs): result = {} for argname, arg in kwargs.items(): result[argname] = self.serializer.serialize(arg) return result def _deserialize_message(self, kwargs): result = {} for argname, arg in kwargs.items(): result[argname] = self.serializer.deserialize(arg) return result ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/rpc/kombu/examples/0000755000175000017500000000000000000000000021305 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/examples/__init__.py0000644000175000017500000000000000000000000023404 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/examples/client.py0000644000175000017500000000236000000000000023136 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from mistral.rpc.kombu import kombu_client # Example of using Kombu based RPC client. def main(): conf = { 'user_id': 'guest', 'password': 'secret', 'exchange': 'my_exchange', 'topic': 'my_topic', 'server_id': 'host', 'host': 'localhost', 'port': 5672, 'virtual_host': '/' } kombu_rpc = kombu_client.KombuRPCClient(conf) print(" [x] Requesting ...") ctx = type('context', (object,), {'to_dict': lambda self: {}})() response = kombu_rpc.sync_call(ctx, 'fib', n=44) print(" [.] Got %r" % (response,)) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/examples/server.py0000644000175000017500000000302700000000000023167 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from mistral.rpc.kombu import kombu_server # Simple example of endpoint of RPC server, which just # calculates given fibonacci number. class MyServer(object): cache = {0: 0, 1: 1} def fib(self, rpc_ctx, n): if self.cache.get(n) is None: self.cache[n] = (self.fib(rpc_ctx, n - 1) + self.fib(rpc_ctx, n - 2)) return self.cache[n] def get_name(self, rpc_ctx): return self.__class__.__name__ # Example of using Kombu based RPC server. def main(): conf = { 'user_id': 'guest', 'password': 'secret', 'exchange': 'my_exchange', 'topic': 'my_topic', 'server_id': 'host', 'host': 'localhost', 'port': 5672, 'virtual_host': '/' } rpc_server = kombu_server.KombuRPCServer(conf) rpc_server.register_endpoint(MyServer()) rpc_server.run() if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/kombu_client.py0000644000175000017500000001535400000000000022524 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import itertools import errno import six from six import moves import kombu from oslo_log import log as logging from mistral import config as cfg from mistral import exceptions as exc from mistral.rpc import base as rpc_base from mistral.rpc.kombu import base as kombu_base from mistral.rpc.kombu import kombu_hosts from mistral.rpc.kombu import kombu_listener from mistral_lib import utils #: When connection to the RabbitMQ server breaks, the #: client will receive EPIPE socket errors. These indicate #: an error that may be fixed by retrying. This constant #: is a guess for how many times the retry may be reasonable EPIPE_RETRIES = 4 LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('rpc_response_timeout', 'mistral.config') class KombuRPCClient(rpc_base.RPCClient, kombu_base.Base): def __init__(self, conf): super(KombuRPCClient, self).__init__(conf) kombu_base.set_transport_options() self._register_mistral_serialization() self.topic = conf.topic self.server_id = conf.host hosts = kombu_hosts.KombuHosts(CONF) self.exchange = CONF.control_exchange self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete self._timeout = CONF.rpc_response_timeout self.routing_key = self.topic connections = [] for host in hosts.hosts: conn = self._make_connection( host.hostname, host.port, host.username, host.password, hosts.virtual_host ) connections.append(conn) self._connections = itertools.cycle(connections) # Create exchange. exchange = self._make_exchange( self.exchange, durable=self.durable_queue, auto_delete=self.auto_delete ) # Create queue. self.queue_name = utils.generate_unicode_uuid() self.callback_queue = kombu.Queue( self.queue_name, exchange=exchange, routing_key=self.queue_name, durable=False, exclusive=True, auto_delete=True ) self._listener = kombu_listener.KombuRPCListener( connections=self._connections, callback_queue=self.callback_queue ) self._listener.start() def _wait_for_result(self, correlation_id): """Waits for the result from the server. Waits for the result from the server, checks every second if a timeout occurred. If a timeout occurred - the `RpcTimeout` exception will be raised. """ try: return self._listener.get_result(correlation_id, self._timeout) except moves.queue.Empty: raise exc.MistralException( "RPC Request timeout, correlation_id = %s" % correlation_id ) def _call(self, ctx, method, target, async_=False, **kwargs): """Performs a remote call for the given method. :param ctx: authentication context associated with mistral :param method: name of the method that should be executed :param kwargs: keyword parameters for the remote-method :param target: Server name :param async: bool value means whether the request is asynchronous or not. :return: result of the method or None if async. """ correlation_id = utils.generate_unicode_uuid() body = { 'rpc_ctx': ctx.to_dict(), 'rpc_method': method, 'arguments': self._serialize_message(kwargs), 'async': async_ } LOG.debug("Publish request: %s", body) try: if not async_: self._listener.add_listener(correlation_id) # Publish request. for retry_round in six.moves.range(EPIPE_RETRIES): if self._publish_request(body, correlation_id): break # Start waiting for response. if async_: return LOG.debug( "Waiting a reply for sync call [reply_to = %s]", self.queue_name ) result = self._wait_for_result(correlation_id) res_type = result[kombu_base.TYPE] res_object = result[kombu_base.RESULT] if res_type == 'error': raise res_object else: res_object = self._deserialize_message(res_object)['body'] finally: if not async_: self._listener.remove_listener(correlation_id) return res_object def _publish_request(self, body, correlation_id): """Publishes the request message .. note:: The :const:`errno.EPIPE` socket errors are suppressed and result in False being returned. This is because this type of error can usually be fixed by retrying. :param body: message body :param correlation_id: correlation id :return: True if publish succeeded, False otherwise :rtype: bool """ try: conn = self._listener.wait_ready() if conn: with kombu.producers[conn].acquire(block=True) as producer: producer.publish( body=body, exchange=self.exchange, routing_key=self.topic, reply_to=self.queue_name, correlation_id=correlation_id, delivery_mode=2 ) return True except socket.error as e: if e.errno != errno.EPIPE: raise else: LOG.debug('Retrying publish due to broker connection failure') return False def sync_call(self, ctx, method, target=None, **kwargs): return self._call(ctx, method, async_=False, target=target, **kwargs) def async_call(self, ctx, method, target=None, fanout=False, **kwargs): return self._call(ctx, method, async_=True, target=target, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/kombu_hosts.py0000644000175000017500000000217200000000000022400 0ustar00coreycorey00000000000000# Copyright (c) 2017 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import random import six import oslo_messaging as messaging class KombuHosts(object): def __init__(self, conf): transport_url = messaging.TransportURL.parse(conf, conf.transport_url) self.virtual_host = transport_url.virtual_host self.hosts = transport_url.hosts if len(self.hosts) > 1: random.shuffle(self.hosts) self._hosts_cycle = itertools.cycle(self.hosts) def get_host(self): return six.next(self._hosts_cycle) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/kombu_listener.py0000644000175000017500000000775200000000000023076 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from kombu.mixins import ConsumerMixin import six import threading from oslo_log import log as logging from oslo_utils import eventletutils from mistral.rpc.kombu import base as kombu_base LOG = logging.getLogger(__name__) class KombuRPCListener(ConsumerMixin): def __init__(self, connections, callback_queue): self._results = {} self._connections = itertools.cycle(connections) self._callback_queue = callback_queue self._thread = None self.connection = six.next(self._connections) self.ready = eventletutils.Event() def add_listener(self, correlation_id): self._results[correlation_id] = six.moves.queue.Queue() def remove_listener(self, correlation_id): if correlation_id in self._results: del self._results[correlation_id] def get_consumers(self, Consumer, channel): consumers = [Consumer( self._callback_queue, callbacks=[self.on_message], accept=['pickle', 'json'] )] self.ready.set() return consumers def start(self): if self._thread is None: self._thread = threading.Thread(target=self.run) self._thread.daemon = True self._thread.start() def on_message(self, response, message): """Callback on response. This method is automatically called when a response is incoming and decides if it is the message we are waiting for - the message with the result. :param response: the body of the amqp message already deserialized by kombu :param message: the plain amqp kombu.message with additional information """ LOG.debug("Got response: {0}".format(response)) try: message.ack() except Exception as e: LOG.exception("Failed to acknowledge AMQP message: %s", e) else: LOG.debug("AMQP message acknowledged.") correlation_id = message.properties['correlation_id'] queue = self._results.get(correlation_id) if queue: result = { kombu_base.TYPE: 'error' if message.properties.get('type') == 'error' else None, kombu_base.RESULT: response } queue.put(result) else: LOG.debug( "Got a response, but seems like no process is waiting for " "it [correlation_id={0}]".format(correlation_id) ) def get_result(self, correlation_id, timeout): return self._results[correlation_id].get(block=True, timeout=timeout) def on_connection_error(self, exc, interval): self.ready.clear() self.connection = six.next(self._connections) LOG.debug("Broker connection failed: %s", exc) LOG.debug( "Sleeping for %s seconds, then retrying connection", interval ) def wait_ready(self, timeout=10.0): """Waits for the listener to successfully declare the consumer :param timeout: timeout for waiting in seconds :return: same as :func:`~threading.Event.wait` :rtype: bool """ if self.ready.wait(timeout=timeout): return self.connection else: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/kombu/kombu_server.py0000644000175000017500000002211700000000000022547 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import amqp import socket import threading import time import kombu from oslo_config import cfg from oslo_log import log as logging from oslo_utils import eventletutils from stevedore import driver from mistral import context as auth_ctx from mistral import exceptions as exc from mistral.rpc import base as rpc_base from mistral.rpc.kombu import base as kombu_base from mistral.rpc.kombu import kombu_hosts LOG = logging.getLogger(__name__) CONF = cfg.CONF _pool_opts = [ cfg.IntOpt( 'executor_thread_pool_size', default=64, deprecated_name="rpc_thread_pool_size", help='Size of executor thread pool when' ' executor is threading or eventlet.' ), ] class KombuRPCServer(rpc_base.RPCServer, kombu_base.Base): def __init__(self, conf): super(KombuRPCServer, self).__init__(conf) CONF.register_opts(_pool_opts) kombu_base.set_transport_options() self._register_mistral_serialization() self.topic = conf.topic self.server_id = conf.host self._hosts = kombu_hosts.KombuHosts(CONF) self._executor_threads = CONF.executor_thread_pool_size self.exchange = CONF.control_exchange # TODO(rakhmerov): We shouldn't rely on any properties related # to oslo.messaging. Only "transport_url" should matter. self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete self.routing_key = self.topic self.channel = None self.conn = None self._running = eventletutils.Event() self._stopped = eventletutils.Event() self.endpoints = [] self._worker = None self._thread = None # TODO(ddeja): Those 2 options should be gathered from config. self._sleep_time = 1 self._max_sleep_time = 10 @property def is_running(self): """Return whether server is running.""" return self._running.is_set() def run(self, executor='eventlet'): if self._thread is None: self._thread = threading.Thread(target=self._run, args=(executor,)) self._thread.daemon = True self._thread.start() def _run(self, executor): """Start the server.""" self._prepare_worker(executor) while True: try: _retry_connection = False host = self._hosts.get_host() self.conn = self._make_connection( host.hostname, host.port, host.username, host.password, self._hosts.virtual_host, ) conn = kombu.connections[self.conn].acquire(block=True) exchange = self._make_exchange( self.exchange, durable=self.durable_queue, auto_delete=self.auto_delete ) queue = self._make_queue( self.topic, exchange, routing_key=self.routing_key, durable=self.durable_queue, auto_delete=self.auto_delete ) with conn.Consumer( queues=queue, callbacks=[self._process_message], ) as consumer: consumer.qos(prefetch_count=1) self._running.set() self._stopped.clear() LOG.info( "Connected to AMQP at %s:%s", host.hostname, host.port ) self._sleep_time = 1 while self.is_running: try: conn.drain_events(timeout=1) except socket.timeout: pass except KeyboardInterrupt: self.stop() LOG.info( "Server with id='{}' stopped." .format(self.server_id) ) return except (socket.error, amqp.exceptions.ConnectionForced) as e: LOG.debug("Broker connection failed: %s", e) _retry_connection = True finally: self._stopped.set() if _retry_connection: LOG.debug( "Sleeping for %s seconds, then retrying " "connection", self._sleep_time ) time.sleep(self._sleep_time) self._sleep_time = min( self._sleep_time * 2, self._max_sleep_time ) def stop(self, graceful=False): self._running.clear() if graceful: self.wait() def wait(self): self._stopped.wait() try: self._worker.shutdown(wait=True) except AttributeError as e: LOG.warning("Cannot stop worker in graceful way: %s", e) def _get_rpc_method(self, method_name): for endpoint in self.endpoints: if hasattr(endpoint, method_name): return getattr(endpoint, method_name) return None @staticmethod def _set_auth_ctx(ctx): if not isinstance(ctx, dict): return context = auth_ctx.MistralContext.from_dict(ctx) auth_ctx.set_ctx(context) return context def publish_message(self, body, reply_to, corr_id, res_type='response'): if res_type != 'error': body = self._serialize_message({'body': body}) with kombu.producers[self.conn].acquire(block=True) as producer: producer.publish( body=body, exchange=self.exchange, routing_key=reply_to, correlation_id=corr_id, serializer='pickle' if res_type == 'error' else 'json', type=res_type ) def _on_message_safe(self, request, message): try: return self._on_message(request, message) except Exception as e: LOG.warning( "Got exception while consuming message. Exception would be " "send back to the caller." ) LOG.debug("Exceptions: %s", str(e)) # Wrap exception into another exception for compatibility # with oslo. self.publish_message( exc.KombuException(e), message.properties['reply_to'], message.properties['correlation_id'], res_type='error' ) finally: message.ack() def _on_message(self, request, message): LOG.debug('Received message %s', request) is_async = request.get('async', False) rpc_ctx = request.get('rpc_ctx') redelivered = message.delivery_info.get('redelivered') rpc_method_name = request.get('rpc_method') arguments = self._deserialize_message(request.get('arguments')) correlation_id = message.properties['correlation_id'] reply_to = message.properties['reply_to'] if redelivered is not None: rpc_ctx['redelivered'] = redelivered rpc_context = self._set_auth_ctx(rpc_ctx) rpc_method = self._get_rpc_method(rpc_method_name) if not rpc_method: raise exc.MistralException("No such method: %s" % rpc_method_name) response = rpc_method(rpc_ctx=rpc_context, **arguments) if not is_async: LOG.debug( "RPC server sent a reply [reply_to = %s, correlation_id = %s", reply_to, correlation_id ) self.publish_message( response, reply_to, correlation_id ) def register_endpoint(self, endpoint): self.endpoints.append(endpoint) def _process_message(self, request, message): self._worker.submit(self._on_message_safe, request, message) def _prepare_worker(self, executor='blocking'): mgr = driver.DriverManager('kombu_driver.executors', executor) executor_opts = {} if executor != 'blocking': executor_opts['max_workers'] = self._executor_threads self._worker = mgr.driver(**executor_opts) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/rpc/oslo/0000755000175000017500000000000000000000000017326 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/oslo/__init__.py0000644000175000017500000000000000000000000021425 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/oslo/oslo_client.py0000644000175000017500000000302500000000000022212 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_messaging as messaging from mistral import context as auth_ctx from mistral.rpc import base as rpc class OsloRPCClient(rpc.RPCClient): def __init__(self, conf): super(OsloRPCClient, self).__init__(conf) self.topic = conf.topic serializer = auth_ctx.RpcContextSerializer() self._client = messaging.RPCClient( rpc.get_transport(), messaging.Target(topic=self.topic), serializer=serializer ) def sync_call(self, ctx, method, target=None, **kwargs): return self._client.prepare(topic=self.topic, server=target).call( ctx, method, **kwargs ) def async_call(self, ctx, method, target=None, fanout=False, **kwargs): return self._client.prepare(topic=self.topic, server=target, fanout=fanout).cast(ctx, method, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/rpc/oslo/oslo_server.py0000644000175000017500000000372400000000000022250 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from mistral import context as ctx from mistral.rpc import base as rpc class OsloRPCServer(rpc.RPCServer): def __init__(self, conf): super(OsloRPCServer, self).__init__(conf) self.topic = conf.topic self.server_id = conf.host self.queue = self.topic self.routing_key = self.topic self.channel = None self.connection = None self.endpoints = [] self.oslo_server = None def register_endpoint(self, endpoint): self.endpoints.append(endpoint) def run(self, executor='eventlet'): target = messaging.Target( topic=self.topic, server=self.server_id ) # TODO(rakhmerov): rpc.get_transport() should be in oslo.messaging # related module. access_policy = dispatcher.DefaultRPCAccessPolicy self.oslo_server = messaging.get_rpc_server( rpc.get_transport(), target, self.endpoints, executor=executor, serializer=ctx.RpcContextSerializer(), access_policy=access_policy ) self.oslo_server.start() def stop(self, graceful=False): self.oslo_server.stop() if graceful: self.oslo_server.wait() def wait(self): self.oslo_server.wait() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/scheduler/0000755000175000017500000000000000000000000017544 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/scheduler/__init__.py0000644000175000017500000000000000000000000021643 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/scheduler/base.py0000644000175000017500000001121600000000000021031 0ustar00coreycorey00000000000000# Copyright 2018 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from oslo_config import cfg from stevedore import driver CONF = cfg.CONF _SCHEDULER_IMPL = None _SCHEDULER = None @six.add_metaclass(abc.ABCMeta) class Scheduler(object): """Scheduler interface. Responsible for scheduling jobs to be executed at some point in future. """ @abc.abstractmethod def schedule(self, job, allow_redistribute=False): """Schedules a delayed call to be invoked at some point in future. :param job: Scheduler Job. An instance of :class:`SchedulerJob`. :param allow_redistribute: If True then the method is allowed to reroute the call to other Scheduler instances available in the cluster. """ raise NotImplementedError @abc.abstractmethod def has_scheduled_jobs(self, **filters): """Returns True if there are scheduled jobs matching the given filter. :param filters: Filters that define what kind of jobs need to be counted. Permitted values: * key= - a key set for a job when it was scheduled. * processing= - if True, count only jobs that are currently being processed. """ raise NotImplementedError @abc.abstractmethod def start(self): """Starts this scheduler.""" raise NotImplementedError @abc.abstractmethod def stop(self, graceful=False): """Stops this scheduler.""" raise NotImplementedError class SchedulerJob(object): """Scheduler job. Encapsulates information about a command that needs to be executed at some point in future. """ def __init__(self, run_after=0, target_factory_func_name=None, func_name=None, func_args=None, func_arg_serializers=None, key=None): """Initializes a Scheduler Job. :param run_after: Amount of seconds after which to invoke a scheduled call. :param target_factory_func_name: Full path of a function that returns a target object against which a method specified with the "func_name" should be invoked. Optional. If None, then "func_name" must be a full path of a static function to invoke. :param func_name: Function or method name to invoke when a job gets triggered. :param func_args: Dictionary containing function/method argument names and values as key-value pairs. A function/method specified with the "func_name" argument will be invoked with these arguments. :param func_arg_serializers: Dictionary containing function/method argument names and serializers for argument values as key-value pairs. Each serializer is a full path to a subclass of :class:'mistral_lib.serialization.Serializer' that is capable of serializing and deserializing of a corresponding argument value. Optional. Serializers must be specified only for those arguments whose values can't be saved into a persistent storage as is and they need to be converted first into a value of a primitive type. :param key: A value that can be used to find the job. """ if not func_name: raise RuntimeError("'target_method_name' must be provided.") self.run_after = run_after self.target_factory_func_name = target_factory_func_name self.func_name = func_name self.func_args = func_args or {} self.func_arg_serializers = func_arg_serializers self.key = key def get_system_scheduler(): global _SCHEDULER if not _SCHEDULER: impl = _get_scheduler_implementation() _SCHEDULER = impl(CONF.scheduler) return _SCHEDULER def destroy_system_scheduler(): global _SCHEDULER _SCHEDULER = None def _get_scheduler_implementation(): global _SCHEDULER_IMPL if not _SCHEDULER_IMPL: _SCHEDULER_IMPL = driver.DriverManager( 'mistral.schedulers', CONF.scheduler_type ).driver return _SCHEDULER_IMPL ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/scheduler/default_scheduler.py0000644000175000017500000002615200000000000023606 0ustar00coreycorey00000000000000# Copyright 2018 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import eventlet import random import sys import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from osprofiler import profiler from mistral import context from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.scheduler import base from mistral_lib import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF class DefaultScheduler(base.Scheduler): def __init__(self, conf): """Initializes a scheduler instance. # TODO(rakhmerov): Fix docstring :param fixed_delay: A fixed part of the delay (in seconds) that defines how often this scheduler checks the persistent job store for the new jobs to run. :param random_delay: A random part of the delay (in seconds) that defines how often this scheduler checks the persistent job store for the new jobs to run. :param batch_size: Defines how many jobs this scheduler can pick up from the job store at once. """ self._fixed_delay = conf.fixed_delay self._random_delay = conf.random_delay self._batch_size = conf.batch_size # Dictionary containing {GreenThread: ScheduledJob} pairs that # represent in-memory jobs. self.in_memory_jobs = {} self._job_store_checker_thread = threading.Thread( target=self._job_store_checker ) self._job_store_checker_thread.daemon = True self._stopped = True def start(self): self._stopped = False self._job_store_checker_thread.start() def stop(self, graceful=False): self._stopped = True if graceful: self._job_store_checker_thread.join() def _job_store_checker(self): while not self._stopped: LOG.debug( "Starting Scheduler Job Store checker [scheduler=%s]...", self ) eventlet.sleep( self._fixed_delay + random.Random().randint(0, self._random_delay * 1000) * 0.001 ) try: self._process_store_jobs() except Exception: LOG.exception( "Scheduler failed to process delayed calls" " due to unexpected exception." ) # For some mysterious reason (probably eventlet related) # the exception is not cleared from the context automatically. # This results in subsequent log.warning calls to show invalid # info. if sys.version_info < (3,): sys.exc_clear() def _process_store_jobs(self): # Select and capture eligible jobs. with db_api.transaction(): candidate_jobs = db_api.get_scheduled_jobs_to_start( utils.utc_now_sec(), self._batch_size ) captured_jobs = [ job for job in candidate_jobs if self._capture_scheduled_job(job) ] # Invoke and delete scheduled jobs. for job in captured_jobs: auth_ctx, func, func_args = self._prepare_job(job) self._invoke_job(auth_ctx, func, func_args) self._delete_scheduled_job(job) def schedule(self, job, allow_redistribute=False): scheduled_job = self._persist_job(job) self._schedule_in_memory(job.run_after, scheduled_job) def has_scheduled_jobs(self, **filters): # Checking in-memory jobs first. for j in self.in_memory_jobs.values(): if filters and 'key' in filters and filters['key'] != j.key: continue if filters and 'processing' in filters: if filters['processing'] is (j.captured_at is None): continue return True if filters and 'processing' in filters: processing = filters.pop('processing') filters['captured_at'] = {'neq' if processing else 'eq': None} return db_api.get_scheduled_jobs_count(**filters) > 0 @staticmethod def _persist_job(job): ctx_serializer = context.RpcContextSerializer() ctx = ( ctx_serializer.serialize_context(context.ctx()) if context.has_ctx() else {} ) execute_at = (utils.utc_now_sec() + datetime.timedelta(seconds=job.run_after)) args = job.func_args arg_serializers = job.func_arg_serializers if arg_serializers: for arg_name, serializer_path in arg_serializers.items(): if arg_name not in args: raise exc.MistralException( "Serializable function argument %s" " not found in func_args=%s" % (arg_name, args)) try: serializer = importutils.import_class(serializer_path)() except ImportError as e: raise ImportError( "Cannot import class %s: %s" % (serializer_path, e) ) args[arg_name] = serializer.serialize(args[arg_name]) values = { 'run_after': job.run_after, 'target_factory_func_name': job.target_factory_func_name, 'func_name': job.func_name, 'func_args': args, 'func_arg_serializers': arg_serializers, 'auth_ctx': ctx, 'execute_at': execute_at, 'captured_at': None, 'key': job.key } return db_api.create_scheduled_job(values) def _schedule_in_memory(self, run_after, scheduled_job): green_thread = eventlet.spawn_after( run_after, self._process_memory_job, scheduled_job ) self.in_memory_jobs[green_thread] = scheduled_job def _process_memory_job(self, scheduled_job): # 1. Capture the job in Job Store. if not self._capture_scheduled_job(scheduled_job): LOG.warning( "Unable to capture a scheduled job [scheduled_job=%s]", scheduled_job ) return # 2. Invoke the target function. auth_ctx, func, func_args = self._prepare_job(scheduled_job) self._invoke_job(auth_ctx, func, func_args) self._delete_scheduled_job(scheduled_job) # 3. Delete the job from Job Store, if success. # TODO(rakhmerov): # 3.1 What do we do if invocation wasn't successful? # Delete from a local collection of in-memory jobs. del self.in_memory_jobs[eventlet.getcurrent()] @staticmethod def _capture_scheduled_job(scheduled_job): """Capture a scheduled persistent job in a job store. :param scheduled_job: Job. :return: True if the job has been captured, False if not. """ now_sec = utils.utc_now_sec() # Mark this job as captured in order to prevent calling from # a parallel transaction. We don't use query filter # {'captured_at': None} to account for a case when the job needs # to be recaptured after a maximum capture time has elapsed. If this # method was called for a job that has non-empty "captured_at" then # it means that it is already eligible for recapturing and the # Job Store selected it. _, updated_cnt = db_api.update_scheduled_job( id=scheduled_job.id, values={'captured_at': now_sec}, query_filter={'captured_at': scheduled_job.captured_at} ) # We need to update "captured_at" of the initial object stored in # memory because it's used in a few places. if updated_cnt == 1: scheduled_job.captured_at = now_sec # If updated_cnt != 1 then another scheduler # has already updated it. return updated_cnt == 1 @db_utils.retry_on_db_error def _delete_scheduled_job(self, scheduled_job): db_api.delete_scheduled_job(scheduled_job.id) @staticmethod def _prepare_job(scheduled_job): """Prepares a scheduled job for invocation. To make an invocation of a delayed call it needs to be prepared for further usage, we need to reconstruct a final target func and deserialize arguments, if needed. :param scheduled_job: Persistent scheduled job. :return: A tuple (auth_ctx, func, args) where all data is properly deserialized. """ LOG.debug( 'Preparing a scheduled job. [ID=%s, target_factory_func_name=%s,' ' func_name=%s, func_args=%s]', scheduled_job.id, scheduled_job.target_factory_func_name, scheduled_job.func_name, scheduled_job.func_args ) auth_ctx = copy.deepcopy(scheduled_job.auth_ctx) if scheduled_job.target_factory_func_name: factory = importutils.import_class( scheduled_job.target_factory_func_name ) func = getattr(factory(), scheduled_job.func_name) else: func = importutils.import_class(scheduled_job.func_name) args = copy.deepcopy(scheduled_job.func_args) serializers_dict = scheduled_job.func_arg_serializers if serializers_dict: # Deserialize arguments. for arg_name, ser_path in serializers_dict.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize(args[arg_name]) args[arg_name] = deserialized return auth_ctx, func, args @staticmethod def _invoke_job(auth_ctx, func, args): # Scheduler runs jobs in an separate thread that's neither related # to an RPC nor a REST request processing thread. So we need to # initialize a profiler specifically for this thread. if cfg.CONF.profiler.enabled: profiler.init(cfg.CONF.profiler.hmac_keys) ctx_serializer = context.RpcContextSerializer() try: # Set the correct context for the function. ctx_serializer.deserialize_context(auth_ctx) # Invoke the function. func(**args) except Exception as e: LOG.exception( "Scheduled job failed, method: %s, exception: %s", func, e ) finally: # Remove context. context.set_ctx(None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/scheduler/scheduler_server.py0000644000175000017500000000374300000000000023471 0ustar00coreycorey00000000000000# Copyright 2018 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from mistral.rpc import base as rpc from mistral.service import base as service_base LOG = logging.getLogger(__name__) CONF = cfg.CONF class SchedulerServer(service_base.MistralService): """Scheduler server. Manages scheduler life-cycle and gets registered as an RPC endpoint to process scheduler specific calls. """ def __init__(self, scheduler, setup_profiler=True): super(SchedulerServer, self).__init__( 'scheduler_group', setup_profiler ) self.scheduler = scheduler self._rpc_server = None def start(self): super(SchedulerServer, self).start() self._rpc_server = rpc.get_rpc_server_driver()(cfg.CONF.engine) self._rpc_server.register_endpoint(self) self._rpc_server.run() self._notify_started('Scheduler server started.') def stop(self, graceful=False): super(SchedulerServer, self).stop() if self._rpc_server: self._rpc_server.stop(graceful) def schedule(self, rpc_ctx, job): """Receives requests over RPC to schedule delayed calls. :param rpc_ctx: RPC request context. :param job: Scheduler job. """ LOG.info("Received RPC request 'schedule'[job=%s]", job) return self.scheduler.schedule(job, allow_redistribute=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1255674 mistral-10.0.0.0b3/mistral/service/0000755000175000017500000000000000000000000017226 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/service/__init__.py0000644000175000017500000000000000000000000021325 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/service/base.py0000644000175000017500000000374500000000000020523 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import event from oslo_service import service from mistral.service import coordination class MistralService(service.Service): """Base class for Mistral services. The term 'service' here means any Mistral component that can run as an independent process and thus can be registered as a cluster member. """ def __init__(self, cluster_group, setup_profiler=True): super(MistralService, self).__init__() self.cluster_member = coordination.Service(cluster_group) self._setup_profiler = setup_profiler self._started = event.Event() def wait_started(self): """Wait until the service is fully started.""" self._started.wait() def _notify_started(self, message): print(message) self._started.send() def start(self): super(MistralService, self).start() self.cluster_member.register_membership() def stop(self, graceful=False): super(MistralService, self).stop(graceful) self._started = event.Event() # TODO(rakhmerov): Probably we could also take care of an RPC server # if it exists for this particular service type. Take a look at # executor and engine servers. # TODO(rakhmerov): This method is not implemented correctly now # (not thread-safe). Uncomment this call once it's fixed. # self.cluster_member.stop() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/service/coordination.py0000644000175000017500000001226500000000000022276 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log import tenacity import tooz.coordination from mistral_lib import utils LOG = log.getLogger(__name__) _SERVICE_COORDINATOR = None class ServiceCoordinator(object): """Service coordinator. This class uses the `tooz` library to manage group membership. To ensure that the other agents know this agent is still alive, the `heartbeat` method should be called periodically. """ def __init__(self, my_id=None): self._coordinator = None self._my_id = six.b(my_id or utils.get_process_identifier()) self._started = False def start(self): backend_url = cfg.CONF.coordination.backend_url if backend_url: try: self._coordinator = tooz.coordination.get_coordinator( backend_url, self._my_id ) self._coordinator.start(start_heart=True) self._started = True LOG.info('Coordination backend started successfully.') except tooz.coordination.ToozError as e: self._started = False LOG.exception('Error connecting to coordination backend. ' '%s', six.text_type(e)) def stop(self): if not self.is_active(): return try: self._coordinator.stop() except tooz.coordination.ToozError: LOG.warning('Error connecting to coordination backend.') finally: self._coordinator = None self._started = False def is_active(self): return self._coordinator and self._started @tenacity.retry(stop=tenacity.stop_after_attempt(5)) def join_group(self, group_id): if not self.is_active() or not group_id: return try: join_req = self._coordinator.join_group(six.b(group_id)) join_req.get() LOG.info( 'Joined service group:%s, member:%s', group_id, self._my_id ) return except tooz.coordination.MemberAlreadyExist: return except tooz.coordination.GroupNotCreated as e: create_grp_req = self._coordinator.create_group(six.b(group_id)) try: create_grp_req.get() except tooz.coordination.GroupAlreadyExist: pass # Re-raise exception to join group again. raise e def leave_group(self, group_id): if self.is_active(): self._coordinator.leave_group(six.b(group_id)) LOG.info( 'Left service group:%s, member:%s', group_id, self._my_id ) def get_members(self, group_id): """Gets members of coordination group. ToozError exception must be handled when this function is invoded, we leave it to the invoker for the handling decision. """ if not self.is_active(): return [] get_members_req = self._coordinator.get_members(six.b(group_id)) try: members = get_members_req.get() LOG.debug('Members of group %s: %s', group_id, members) return members except tooz.coordination.GroupNotCreated: LOG.warning('Group %s does not exist.', group_id) return [] def cleanup_service_coordinator(): """Intends to be used by tests to recreate service coordinator.""" global _SERVICE_COORDINATOR _SERVICE_COORDINATOR = None def get_service_coordinator(my_id=None): global _SERVICE_COORDINATOR if not _SERVICE_COORDINATOR: _SERVICE_COORDINATOR = ServiceCoordinator(my_id=my_id) _SERVICE_COORDINATOR.start() return _SERVICE_COORDINATOR class Service(object): def __init__(self, group_type): self.group_type = group_type @lockutils.synchronized('service_coordinator') def register_membership(self): """Registers group membership. Because this method will be invoked on each service startup almost at the same time, so it must be synchronized, in case all the services are started within same process. """ service_coordinator = get_service_coordinator() if service_coordinator.is_active(): service_coordinator.join_group(self.group_type) def stop(self): service_coordinator = get_service_coordinator() if service_coordinator.is_active(): service_coordinator.stop() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1295676 mistral-10.0.0.0b3/mistral/services/0000755000175000017500000000000000000000000017411 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/__init__.py0000644000175000017500000000166300000000000021530 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg def is_validation_enabled(validate): validation_mode = cfg.CONF.api.validation_mode if validation_mode == 'mandatory': result = True elif validation_mode == 'disabled': result = False else: # validation_mode = 'enabled' result = validate return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/action_heartbeat_checker.py0000644000175000017500000000715500000000000024753 0ustar00coreycorey00000000000000# Copyright 2018 Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import eventlet import sys from mistral import context as auth_ctx from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral.engine import action_handler from mistral.engine import post_tx_queue from mistral_lib import actions as mistral_lib from mistral_lib import utils from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) CONF = cfg.CONF _stopped = True @db_utils.retry_on_db_error @post_tx_queue.run def handle_expired_actions(): LOG.debug("Running heartbeat checker...") interval = CONF.action_heartbeat.check_interval max_missed = CONF.action_heartbeat.max_missed_heartbeats exp_date = utils.utc_now_sec() - datetime.timedelta( seconds=max_missed * interval ) with db_api.transaction(): action_exs = db_api.get_running_expired_sync_action_executions( exp_date, CONF.action_heartbeat.batch_size ) LOG.debug("Found {} running and expired actions.".format( len(action_exs)) ) if action_exs: LOG.info( "Actions executions to transit to error, because " "heartbeat wasn't received: {}".format(action_exs) ) for action_ex in action_exs: result = mistral_lib.Result( error="Heartbeat wasn't received." ) action_handler.on_action_complete(action_ex, result) def _loop(): global _stopped # This is an administrative thread so we need to set an admin # security context. auth_ctx.set_ctx( auth_ctx.MistralContext( user=None, tenant=None, auth_token=None, is_admin=True ) ) while not _stopped: try: handle_expired_actions() except Exception: LOG.exception( 'Action heartbeat checker iteration failed' ' due to an unexpected exception.' ) # For some mysterious reason (probably eventlet related) # the exception is not cleared from the context automatically. # This results in subsequent log.warning calls to show invalid # info. if sys.version_info < (3,): sys.exc_clear() eventlet.sleep(CONF.action_heartbeat.check_interval) def start(): interval = CONF.action_heartbeat.check_interval max_missed = CONF.action_heartbeat.max_missed_heartbeats enabled = interval and max_missed if not enabled: LOG.info("Action heartbeats are disabled.") return wait_time = interval * max_missed LOG.debug( "First run of action heartbeat checker, wait before " "checking to make sure executors have time to send " "heartbeats. ({} seconds)".format(wait_time) ) global _stopped _stopped = False eventlet.spawn_after(wait_time, _loop) def stop(graceful=False): global _stopped _stopped = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/action_heartbeat_sender.py0000644000175000017500000000545100000000000024624 0ustar00coreycorey00000000000000# Copyright 2018 Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet import sys from oslo_config import cfg from oslo_log import log as logging from mistral import context as auth_ctx from mistral.rpc import clients as rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF _enabled = False _stopped = True _running_actions = set() def add_action(action_ex_id): global _enabled # With run-action there is no actions_ex_id assigned. if action_ex_id and _enabled: rpc.get_engine_client().process_action_heartbeats([action_ex_id]) _running_actions.add(action_ex_id) def remove_action(action_ex_id): global _enabled if action_ex_id and _enabled: _running_actions.discard(action_ex_id) def send_action_heartbeats(): LOG.debug('Running heartbeat sender...') global _running_actions if not _running_actions: return rpc.get_engine_client().process_action_heartbeats(_running_actions) def _loop(): global _stopped # This is an administrative thread so we need to set an admin # security context. auth_ctx.set_ctx( auth_ctx.MistralContext( user=None, tenant=None, auth_token=None, is_admin=True ) ) while not _stopped: try: send_action_heartbeats() except Exception: LOG.exception( 'Action heartbeat sender iteration failed' ' due to an unexpected exception.' ) # For some mysterious reason (probably eventlet related) # the exception is not cleared from the context automatically. # This results in subsequent log.warning calls to show invalid # info. if sys.version_info < (3,): sys.exc_clear() eventlet.sleep(CONF.action_heartbeat.check_interval) def start(): global _stopped, _enabled interval = CONF.action_heartbeat.check_interval max_missed = CONF.action_heartbeat.max_missed_heartbeats _enabled = interval and max_missed if not _enabled: LOG.info("Action heartbeat reporting is disabled.") return _stopped = False eventlet.spawn(_loop) def stop(graceful=False): global _stopped _stopped = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/action_manager.py0000644000175000017500000001067700000000000022745 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2014 - StackStorm, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from stevedore import extension from mistral.actions import action_factory from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import actions from mistral_lib import utils from mistral_lib.utils import inspect_utils as i_utils # TODO(rakhmerov): Make methods more consistent and granular. LOG = logging.getLogger(__name__) ACTIONS_PATH = 'resources/actions' def register_preinstalled_actions(): action_paths = utils.get_file_list(ACTIONS_PATH) for action_path in action_paths: action_definition = open(action_path).read() actions.create_or_update_actions( action_definition, scope='public' ) def get_registered_actions(**kwargs): return db_api.get_action_definitions(**kwargs) def register_action_class(name, action_class_str, attributes, description=None, input_str=None, namespace=''): values = { 'name': name, 'action_class': action_class_str, 'attributes': attributes, 'description': description, 'input': input_str, 'is_system': True, 'scope': 'public', 'namespace': namespace } try: LOG.debug("Registering action in DB: %s", name) db_api.create_action_definition(values) except exc.DBDuplicateEntryError: LOG.debug("Action %s already exists in DB.", name) def _clear_system_action_db(): db_api.delete_action_definitions(is_system=True) def sync_db(): with db_api.transaction(): _clear_system_action_db() register_action_classes() register_preinstalled_actions() def _register_dynamic_action_classes(namespace=''): extensions = extension.ExtensionManager( namespace='mistral.generators', invoke_on_load=True ) for ext in extensions: for generator in ext.obj: _register_actions(generator, namespace) def _register_actions(generator, namespace): module = generator.base_action_class.__module__ class_name = generator.base_action_class.__name__ action_class_str = "%s.%s" % (module, class_name) for action in generator.create_actions(): attrs = i_utils.get_public_fields(action['class']) register_action_class( action['name'], action_class_str, attrs, action['description'], action['arg_list'], namespace=namespace ) def register_action_classes(namespace=''): mgr = extension.ExtensionManager( namespace='mistral.actions', invoke_on_load=False ) for name in mgr.names(): action_class_str = mgr[name].entry_point_target.replace(':', '.') action_class = mgr[name].plugin description = i_utils.get_docstring(action_class) input_str = i_utils.get_arg_list_as_str(action_class.__init__) attrs = i_utils.get_public_fields(mgr[name].plugin) register_action_class( name, action_class_str, attrs, description=description, input_str=input_str, namespace=namespace ) _register_dynamic_action_classes(namespace=namespace) def get_action_db(action_name, namespace=''): return db_api.load_action_definition(action_name, namespace=namespace) def get_action_class(action_full_name, namespace=''): """Finds action class by full action name (i.e. 'namespace.action_name'). :param action_full_name: Full action name (that includes namespace). :return: Action class or None if not found. """ action_db = get_action_db(action_full_name, namespace) if action_db: return action_factory.construct_action_class( action_db.action_class, action_db.attributes ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/actions.py0000644000175000017500000001030300000000000021420 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser def create_actions(definition, scope='private', namespace=''): action_list_spec = spec_parser.get_action_list_spec_from_yaml(definition) db_actions = [] for action_spec in action_list_spec.get_actions(): db_actions.append(create_action( action_spec, definition, scope, namespace)) return db_actions def update_actions(definition, scope='private', identifier=None, namespace=''): action_list_spec = spec_parser.get_action_list_spec_from_yaml(definition) actions = action_list_spec.get_actions() if identifier and len(actions) > 1: raise exc.InputException( "More than one actions are not supported for " "update with identifier. [identifier: %s]" % identifier ) db_actions = [] for action_spec in action_list_spec.get_actions(): db_actions.append(update_action( action_spec, definition, scope, identifier=identifier, namespace=namespace )) return db_actions def create_or_update_actions(definition, scope='private', namespace=''): action_list_spec = spec_parser.get_action_list_spec_from_yaml(definition) db_actions = [] for action_spec in action_list_spec.get_actions(): db_actions.append( create_or_update_action(action_spec, definition, scope, namespace) ) return db_actions def create_action(action_spec, definition, scope, namespace): return db_api.create_action_definition( _get_action_values(action_spec, definition, scope, namespace) ) def update_action(action_spec, definition, scope, identifier=None, namespace=''): action = db_api.load_action_definition(action_spec.get_name()) if action and action.is_system: raise exc.InvalidActionException( "Attempt to modify a system action: %s" % action.name ) values = _get_action_values(action_spec, definition, scope, namespace) return db_api.update_action_definition( identifier if identifier else values['name'], values ) def create_or_update_action(action_spec, definition, scope, namespace): action = db_api.load_action_definition(action_spec.get_name()) if action and action.is_system: raise exc.InvalidActionException( "Attempt to modify a system action: %s" % action.name ) values = _get_action_values(action_spec, definition, scope, namespace) return db_api.create_or_update_action_definition(values['name'], values) def get_input_list(action_input): input_list = [] for param in action_input: if isinstance(param, dict): for k, v in param.items(): input_list.append("%s=%s" % (k, json.dumps(v))) else: input_list.append(param) return input_list def _get_action_values(action_spec, definition, scope, namespace=''): action_input = action_spec.to_dict().get('input', []) input_list = get_input_list(action_input) values = { 'name': action_spec.get_name(), 'description': action_spec.get_description(), 'tags': action_spec.get_tags(), 'definition': definition, 'spec': action_spec.to_dict(), 'is_system': False, 'input': ", ".join(input_list) if input_list else None, 'scope': scope, 'namespace': namespace } return values ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/expiration_policy.py0000644000175000017500000001201700000000000023525 0ustar00coreycorey00000000000000# Copyright 2015 - Alcatel-lucent, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import traceback from oslo_config import cfg from oslo_log import log as logging from oslo_service import periodic_task from oslo_service import threadgroup from mistral import context as auth_ctx from mistral.db.v2 import api as db_api from mistral.workflow import states LOG = logging.getLogger(__name__) CONF = cfg.CONF class ExecutionExpirationPolicy(periodic_task.PeriodicTasks): """Expiration Policy task. This task will run every 'evaluation_interval' and will remove old executions (expired execution). The time interval is configurable In the 'mistral.cfg' and also the expiration time (both in minutes). By default the interval set to 'None' so this task will be disabled. """ def __init__(self, conf): super(ExecutionExpirationPolicy, self).__init__(conf) interval = CONF.execution_expiration_policy.evaluation_interval ot = CONF.execution_expiration_policy.older_than mfe = CONF.execution_expiration_policy.max_finished_executions if interval and ((ot and ot >= 1) or (mfe and mfe >= 1)): _periodic_task = periodic_task.periodic_task( spacing=interval * 60, run_immediately=True ) self.add_periodic_task( _periodic_task(run_execution_expiration_policy) ) else: LOG.debug("Expiration policy disabled. Evaluation_interval " "is not configured or both older_than and " "max_finished_executions < '1'.") def _delete_executions(batch_size, expiration_time, max_finished_executions): _delete_until_depleted( lambda: db_api.get_expired_executions( expiration_time, batch_size ) ) _delete_until_depleted( lambda: db_api.get_superfluous_executions( max_finished_executions, batch_size ) ) def _delete_until_depleted(fetch_func): while True: with db_api.transaction(): execs = fetch_func() if not execs: break _delete(execs) def _delete(executions): for execution in executions: try: # Setup project_id for _secure_query delete execution. # TODO(tuan_luong): Manipulation with auth_ctx should be # out of db transaction scope. ctx = auth_ctx.MistralContext( user=None, tenant=execution.project_id, auth_token=None, is_admin=True ) auth_ctx.set_ctx(ctx) LOG.debug( 'Delete execution id : %s from date : %s ' 'according to expiration policy', execution.id, execution.updated_at ) db_api.delete_workflow_execution(execution.id) except Exception as e: msg = ("Failed to delete [execution_id=%s]\n %s" % (execution.id, traceback.format_exc(e))) LOG.warning(msg) finally: auth_ctx.set_ctx(None) def run_execution_expiration_policy(self, ctx): LOG.debug("Starting expiration policy.") older_than = CONF.execution_expiration_policy.older_than exp_time = (datetime.datetime.utcnow() - datetime.timedelta(minutes=older_than)) batch_size = CONF.execution_expiration_policy.batch_size max_executions = CONF.execution_expiration_policy.max_finished_executions # The default value of batch size is 0 # If it is not set, size of batch will be the size # of total number of expired executions. _delete_executions(batch_size, exp_time, max_executions) def _check_ignored_states_config(): ignored_states = CONF.execution_expiration_policy.ignored_states for state in ignored_states: if state not in states.TERMINAL_STATES: raise ValueError( '{} is not a terminal state. The valid states are [{}]' .format(state, states.TERMINAL_STATES)) def setup(): tg = threadgroup.ThreadGroup() pt = ExecutionExpirationPolicy(CONF) _check_ignored_states_config() ctx = auth_ctx.MistralContext( user=None, tenant=None, auth_token=None, is_admin=True ) tg.add_dynamic_timer( pt.run_periodic_tasks, initial_delay=None, periodic_interval_max=1, context=ctx ) return tg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/legacy_scheduler.py0000644000175000017500000002562600000000000023300 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import eventlet import random import sys import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from osprofiler import profiler from mistral import context from mistral.db import utils as db_utils from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.scheduler import base as sched_base from mistral_lib import utils LOG = logging.getLogger(__name__) def _schedule_call(factory_method_path, target_method_name, run_after, serializers=None, key=None, **method_args): """Schedules call and lately invokes target_method. Add this call specification to DB, and then after run_after seconds service CallScheduler invokes the target_method. :param factory_method_path: Full python-specific path to factory method that creates a target object that the call will be made against. :param target_method_name: Name of a method which will be invoked. :param run_after: Value in seconds. :param serializers: map of argument names and their serializer class paths. Use when an argument is an object of specific type, and needs to be serialized. Example: { "result": "mistral.utils.serializer.ResultSerializer"} Serializer for the object type must implement serializer interface in mistral/utils/serializer.py :param key: Key which can potentially be used for squashing similar delayed calls. :param method_args: Target method keyword arguments. """ ctx_serializer = context.RpcContextSerializer() ctx = ( ctx_serializer.serialize_context(context.ctx()) if context.has_ctx() else {} ) execution_time = (utils.utc_now_sec() + datetime.timedelta(seconds=run_after)) if serializers: for arg_name, serializer_path in serializers.items(): if arg_name not in method_args: raise exc.MistralException( "Serializable method argument %s" " not found in method_args=%s" % (arg_name, method_args)) try: serializer = importutils.import_class(serializer_path)() except ImportError as e: raise ImportError( "Cannot import class %s: %s" % (serializer_path, e) ) method_args[arg_name] = serializer.serialize(method_args[arg_name]) values = { 'factory_method_path': factory_method_path, 'target_method_name': target_method_name, 'execution_time': execution_time, 'auth_context': ctx, 'serializers': serializers, 'key': key, 'method_arguments': method_args, 'processing': False } db_api.create_delayed_call(values) class LegacyScheduler(sched_base.Scheduler): def __init__(self, conf): self._stopped = False self._thread = threading.Thread(target=self._loop) self._thread.daemon = True self._fixed_delay = conf.fixed_delay self._random_delay = conf.random_delay self._batch_size = conf.batch_size def schedule(self, job, allow_redistribute=False): _schedule_call( job.target_factory_func_name, job.func_name, job.run_after, serializers=job.func_arg_serializers, key=job.key, **job.func_args ) def has_scheduled_jobs(self, **filters): return db_api.get_delayed_calls_count(**filters) > 0 def start(self): self._thread.start() def stop(self, graceful=False): self._stopped = True if graceful: self._thread.join() def _loop(self): # Scheduler runs jobs in an separate thread that's neither related # to an RPC nor a REST request processing thread. So we need to # initialize a profiler specifically for this thread. if cfg.CONF.profiler.enabled: profiler.init(cfg.CONF.profiler.hmac_keys) while not self._stopped: LOG.debug("Starting Scheduler loop [scheduler=%s]...", self) try: self._process_delayed_calls() except Exception: LOG.exception( "Scheduler failed to process delayed calls" " due to unexpected exception." ) # For some mysterious reason (probably eventlet related) # the exception is not cleared from the context automatically. # This results in subsequent log.warning calls to show invalid # info. if sys.version_info < (3,): sys.exc_clear() eventlet.sleep( self._fixed_delay + random.Random().randint(0, self._random_delay * 1000) * 0.001 ) def _process_delayed_calls(self, ctx=None): """Run delayed required calls. This algorithm should work with transactions having at least 'READ-COMMITTED' isolation mode. :param ctx: Auth context. """ # Select and capture calls matching time criteria. db_calls = self._capture_calls(self._batch_size) if not db_calls: return # Determine target methods, deserialize arguments etc. prepared_calls = self._prepare_calls(db_calls) # Invoke prepared calls. self._invoke_calls(prepared_calls) # Delete invoked calls from DB. self.delete_calls(db_calls) @staticmethod @db_utils.retry_on_db_error def _capture_calls(batch_size): """Captures delayed calls eligible for processing (based on time). The intention of this method is to select delayed calls based on time criteria and mark them in DB as being processed so that no other threads could process them in parallel. :return: A list of delayed calls captured for further processing. """ result = [] time_filter = utils.utc_now_sec() + datetime.timedelta(seconds=1) with db_api.transaction(): candidates = db_api.get_delayed_calls_to_start( time_filter, batch_size ) for call in candidates: # Mark this delayed call has been processed in order to # prevent calling from parallel transaction. db_call, updated_cnt = db_api.update_delayed_call( id=call.id, values={'processing': True}, query_filter={'processing': False} ) # If updated_cnt != 1 then another scheduler # has already updated it. if updated_cnt == 1: result.append(db_call) LOG.debug("Scheduler captured %s delayed calls.", len(result)) return result @staticmethod def _prepare_calls(raw_calls): """Prepares delayed calls for invocation. After delayed calls were selected from DB they still need to be prepared for further usage, we need to build final target methods and deserialize arguments, if needed. :param raw_calls: Delayed calls fetched from DB (DB models). :return: A list of tuples (target_auth_context, target_method, method_args) where all data is properly deserialized. """ result = [] for call in raw_calls: LOG.debug( 'Preparing next delayed call. ' '[ID=%s, factory_method_path=%s, target_method_name=%s, ' 'method_arguments=%s]', call.id, call.factory_method_path, call.target_method_name, call.method_arguments ) target_auth_context = copy.deepcopy(call.auth_context) if call.factory_method_path: factory = importutils.import_class(call.factory_method_path) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name ) method_args = copy.deepcopy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name] ) method_args[arg_name] = deserialized result.append((target_auth_context, target_method, method_args)) return result @staticmethod def _invoke_calls(delayed_calls): """Invokes prepared delayed calls. :param delayed_calls: Prepared delayed calls represented as tuples (target_auth_context, target_method, method_args). """ ctx_serializer = context.RpcContextSerializer() for (target_auth_context, target_method, method_args) in delayed_calls: try: # Set the correct context for the method. ctx_serializer.deserialize_context(target_auth_context) # Invoke the method. target_method(**method_args) except Exception as e: LOG.exception( "Delayed call failed, method: %s, exception: %s", target_method, e ) finally: # Remove context. context.set_ctx(None) @staticmethod @db_utils.retry_on_db_error def delete_calls(db_calls): """Deletes delayed calls. :param db_calls: Delayed calls to delete from DB. """ try: db_api.delete_delayed_calls(id={'in': [c.id for c in db_calls]}) except Exception as e: LOG.error( "Failed to delete all delayed calls [exception=%s]", e ) # We have to re-raise any exception because the transaction # would be already invalid anyway. If it's a deadlock then # it will be handled. raise e LOG.debug("Scheduler deleted %s delayed calls.", len(db_calls)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/periodic.py0000644000175000017500000001316500000000000021567 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json from oslo_config import cfg from oslo_log import log as logging from oslo_service import periodic_task from oslo_service import threadgroup from mistral import context as auth_ctx from mistral.db.v2 import api as db_api_v2 from mistral import exceptions as exc from mistral.rpc import clients as rpc from mistral.services import security from mistral.services import triggers LOG = logging.getLogger(__name__) CONF = cfg.CONF # {periodic_task: thread_group} _periodic_tasks = {} def process_cron_triggers_v2(self, ctx): LOG.debug("Processing cron triggers...") for trigger in triggers.get_next_cron_triggers(): LOG.debug("Processing cron trigger: %s", trigger) try: # Setup admin context before schedule triggers. ctx = security.create_context( trigger.trust_id, trigger.project_id ) auth_ctx.set_ctx(ctx) LOG.debug("Cron trigger security context: %s", ctx) # Try to advance the cron trigger next_execution_time and # remaining_executions if relevant. modified = advance_cron_trigger(trigger) # If cron trigger was not already modified by another engine. if modified: LOG.debug( "Starting workflow '%s' by cron trigger '%s'", trigger.workflow.name, trigger.name ) description = { "description": ( "Workflow execution created by cron" " trigger '(%s)'." % trigger.id ), "triggered_by": { "type": "cron_trigger", "id": trigger.id, "name": trigger.name, } } rpc.get_engine_client().start_workflow( trigger.workflow.name, trigger.workflow.namespace, None, trigger.workflow_input, description=json.dumps(description), **trigger.workflow_params ) except Exception: # Log and continue to next cron trigger. LOG.exception( "Failed to process cron trigger %s", str(trigger) ) finally: auth_ctx.set_ctx(None) class MistralPeriodicTasks(periodic_task.PeriodicTasks): def __init__(self, conf): super(MistralPeriodicTasks, self).__init__(conf) periodic_task_ = periodic_task.periodic_task( spacing=CONF.cron_trigger.execution_interval, run_immediately=True, ) self.add_periodic_task(periodic_task_(process_cron_triggers_v2)) def advance_cron_trigger(t): modified_count = 0 try: # If the cron trigger is defined with limited execution count. if t.remaining_executions is not None and t.remaining_executions > 0: t.remaining_executions -= 1 # If this is the last execution. if t.remaining_executions == 0: modified_count = triggers.delete_cron_trigger( t.name, trust_id=t.trust_id, delete_trust=False ) else: # if remaining execution = None or > 0. # In case the we are lagging or if the api stopped for some time # we use the max of the current time or the next scheduled time. next_time = triggers.get_next_execution_time( t.pattern, max(datetime.datetime.utcnow(), t.next_execution_time) ) # Update the cron trigger with next execution details # only if it wasn't already updated by a different process. updated, modified_count = db_api_v2.update_cron_trigger( t.name, { 'next_execution_time': next_time, 'remaining_executions': t.remaining_executions }, query_filter={ 'next_execution_time': t.next_execution_time } ) except exc.DBEntityNotFoundError as e: # Cron trigger was probably already deleted by a different process. LOG.debug( "Cron trigger named '%s' does not exist anymore: %s", t.name, str(e) ) # Return True if this engine was able to modify the cron trigger in DB. return modified_count > 0 def setup(): tg = threadgroup.ThreadGroup() pt = MistralPeriodicTasks(CONF) ctx = auth_ctx.MistralContext( user=None, tenant=None, auth_token=None, is_admin=True ) tg.add_dynamic_timer( pt.run_periodic_tasks, initial_delay=None, periodic_interval_max=1, context=ctx ) _periodic_tasks[pt] = tg return tg def stop_all_periodic_tasks(): for tg in _periodic_tasks.values(): tg.stop() _periodic_tasks.clear() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/security.py0000644000175000017500000000602200000000000021632 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from mistral import context as auth_ctx from mistral.utils.openstack import keystone LOG = logging.getLogger(__name__) CONF = cfg.CONF # Make sure to import 'auth_enable' option before using it. # TODO(rakhmerov): Try to find a better solution. CONF.import_opt('auth_enable', 'mistral.config', group='pecan') DEFAULT_PROJECT_ID = "" def get_project_id(): if CONF.pecan.auth_enable and auth_ctx.has_ctx(): return auth_ctx.ctx().project_id else: return DEFAULT_PROJECT_ID def create_trust(): client = keystone.client() ctx = auth_ctx.ctx() trustee_id = keystone.client_for_admin().session.get_user_id() return client.trusts.create( trustor_user=client.user_id, trustee_user=trustee_id, impersonation=True, role_names=ctx.roles, project=ctx.project_id ) def create_context(trust_id, project_id): """Creates Mistral security context. :param trust_id: Trust Id. :param project_id: Project Id. :return: Mistral security context. """ if CONF.pecan.auth_enable: client = keystone.client_for_trusts(trust_id) if client.session: # Method get_token is deprecated, using get_auth_headers. token = client.session.get_auth_headers().get('X-Auth-Token') user_id = client.session.get_user_id() else: token = client.auth_token user_id = client.user_id return auth_ctx.MistralContext( user=user_id, tenant=project_id, auth_token=token, is_trust_scoped=True, trust_id=trust_id, ) return auth_ctx.MistralContext( user=None, tenant=None, auth_token=None, is_admin=True ) def delete_trust(trust_id=None): if not trust_id: # Try to retrieve trust from context. if auth_ctx.has_ctx(): trust_id = auth_ctx.ctx().trust_id if not trust_id: return keystone_client = keystone.client_for_trusts(trust_id) try: keystone_client.trusts.delete(trust_id) except Exception as e: LOG.warning("Failed to delete trust [id=%s]: %s", trust_id, e) def add_trust_id(secure_object_values): if cfg.CONF.pecan.auth_enable: trust = create_trust() secure_object_values.update({ 'trust_id': trust.id }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/triggers.py0000644000175000017500000001713500000000000021620 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import croniter import datetime import json import six from oslo_log import log as logging from mistral.db.v2 import api as db_api from mistral.engine import utils as eng_utils from mistral import exceptions as exc from mistral.lang import parser from mistral.rpc import clients as rpc from mistral.services import security LOG = logging.getLogger(__name__) def get_next_execution_time(pattern, start_time): return croniter.croniter(pattern, start_time).get_next( datetime.datetime ) # Triggers v2. def get_next_cron_triggers(): return db_api.get_next_cron_triggers( datetime.datetime.utcnow() + datetime.timedelta(0, 2) ) def validate_cron_trigger_input(pattern, first_time, count): if not (first_time or pattern): raise exc.InvalidModelException( 'Pattern or first_execution_time must be specified.' ) if first_time: valid_min_time = datetime.datetime.utcnow() + datetime.timedelta(0, 60) if valid_min_time > first_time: raise exc.InvalidModelException( 'first_execution_time must be at least 1 minute in the future.' ) if not pattern and count and count > 1: raise exc.InvalidModelException( 'Pattern must be provided if count is superior to 1.' ) if pattern: try: croniter.croniter(pattern) except (ValueError, KeyError): raise exc.InvalidModelException( 'The specified pattern is not valid: {}'.format(pattern) ) def create_cron_trigger(name, workflow_name, workflow_input, workflow_params=None, pattern=None, first_time=None, count=None, start_time=None, workflow_id=None): if not start_time: start_time = datetime.datetime.utcnow() if isinstance(first_time, six.string_types): try: first_time = datetime.datetime.strptime( first_time, '%Y-%m-%d %H:%M' ) except ValueError as e: raise exc.InvalidModelException(str(e)) validate_cron_trigger_input(pattern, first_time, count) if first_time: next_time = first_time if not (pattern or count): count = 1 else: next_time = get_next_execution_time(pattern, start_time) with db_api.transaction(): wf_def = db_api.get_workflow_definition( workflow_id if workflow_id else workflow_name ) wf_spec = parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at ) # TODO(rakhmerov): Use Workflow object here instead of utils. eng_utils.validate_input( wf_spec.get_input(), workflow_input, wf_spec.get_name(), wf_spec.__class__.__name__ ) trigger_parameters = { 'name': name, 'pattern': pattern, 'first_execution_time': first_time, 'next_execution_time': next_time, 'remaining_executions': count, 'workflow_name': wf_def.name, 'workflow_id': wf_def.id, 'workflow_input': workflow_input or {}, 'workflow_params': workflow_params or {}, 'scope': 'private' } security.add_trust_id(trigger_parameters) try: trig = db_api.create_cron_trigger(trigger_parameters) except Exception: # Delete trust before raising exception. security.delete_trust(trigger_parameters.get('trust_id')) raise return trig def delete_cron_trigger(identifier, trust_id=None, delete_trust=True): if not trust_id: trigger = db_api.get_cron_trigger(identifier) trust_id = trigger.trust_id modified_count = db_api.delete_cron_trigger(identifier) if modified_count and delete_trust: # Delete trust only together with deleting trigger. security.delete_trust(trust_id) return modified_count def create_event_trigger(name, exchange, topic, event, workflow_id, scope='private', workflow_input=None, workflow_params=None): with db_api.transaction(): wf_def = db_api.get_workflow_definition_by_id(workflow_id) wf_spec = parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at ) # TODO(rakhmerov): Use Workflow object here instead of utils. eng_utils.validate_input( wf_spec.get_input(), workflow_input, wf_spec.get_name(), wf_spec.__class__.__name__ ) values = { 'name': name, 'workflow_id': workflow_id, 'workflow_input': workflow_input or {}, 'workflow_params': workflow_params or {}, 'exchange': exchange, 'topic': topic, 'event': event, 'scope': scope, } security.add_trust_id(values) trig = db_api.create_event_trigger(values) trigs = db_api.get_event_triggers(insecure=True, exchange=exchange, topic=topic) events = [t.event for t in trigs] # NOTE(kong): Send RPC message within the db transaction, rollback if # any error occurs. trig_dict = trig.to_dict() trig_dict['workflow_namespace'] = wf_def.namespace rpc.get_event_engine_client().create_event_trigger( trig_dict, events ) return trig def delete_event_trigger(event_trigger): db_api.delete_event_trigger(event_trigger['id']) trigs = db_api.get_event_triggers( insecure=True, exchange=event_trigger['exchange'], topic=event_trigger['topic'] ) events = set([t.event for t in trigs]) # NOTE(kong): Send RPC message within the db transaction, rollback if # any error occurs. rpc.get_event_engine_client().delete_event_trigger( event_trigger, list(events) ) security.delete_trust(event_trigger['trust_id']) def update_event_trigger(id, values): trig = db_api.update_event_trigger(id, values) # NOTE(kong): Send RPC message within the db transaction, rollback if # any error occurs. rpc.get_event_engine_client().update_event_trigger(trig.to_dict()) return trig def on_workflow_complete(wf_ex): if wf_ex.task_execution_id: return if not wf_ex.description: return try: description = json.loads(wf_ex.description) except ValueError as e: LOG.debug(str(e)) return if not isinstance(description, dict): return triggered = description.get('triggered_by') if not triggered: return if triggered['type'] == 'cron_trigger': if not db_api.load_cron_trigger(triggered['name']): security.delete_trust() elif triggered['type'] == 'event_trigger': if not db_api.load_event_trigger(triggered['id'], True): security.delete_trust() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/workbooks.py0000644000175000017500000001112600000000000022004 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api_v2 from mistral.lang import parser as spec_parser from mistral import services from mistral.services import actions def create_workbook_v2(definition, namespace='', scope='private', validate=True): wb_spec = spec_parser.get_workbook_spec_from_yaml( definition, validate=services.is_validation_enabled(validate) ) wb_values = _get_workbook_values( wb_spec, definition, scope, namespace ) with db_api_v2.transaction(): wb_db = db_api_v2.create_workbook(wb_values) _on_workbook_update(wb_db, wb_spec, namespace) return wb_db def update_workbook_v2(definition, namespace='', scope='private', validate=True): wb_spec = spec_parser.get_workbook_spec_from_yaml( definition, validate=services.is_validation_enabled(validate) ) values = _get_workbook_values(wb_spec, definition, scope, namespace) with db_api_v2.transaction(): wb_db = db_api_v2.update_workbook(values['name'], values) _, db_wfs = _on_workbook_update(wb_db, wb_spec, namespace) return wb_db def _on_workbook_update(wb_db, wb_spec, namespace=''): db_actions = _create_or_update_actions( wb_db, wb_spec.get_actions(), namespace=namespace ) db_wfs = _create_or_update_workflows( wb_db, wb_spec.get_workflows(), namespace ) return db_actions, db_wfs def _create_or_update_actions(wb_db, actions_spec, namespace): db_actions = [] if actions_spec: for action_spec in actions_spec: action_name = '%s.%s' % (wb_db.name, action_spec.get_name()) input_list = actions.get_input_list( action_spec.to_dict().get('input', []) ) values = { 'name': action_name, 'spec': action_spec.to_dict(), 'tags': action_spec.get_tags(), 'definition': _get_action_definition(wb_db, action_spec), 'description': action_spec.get_description(), 'is_system': False, 'input': ', '.join(input_list) if input_list else None, 'scope': wb_db.scope, 'project_id': wb_db.project_id, 'namespace': namespace } db_actions.append( db_api_v2.create_or_update_action_definition( action_name, values ) ) return db_actions def _create_or_update_workflows(wb_db, workflows_spec, namespace): db_wfs = [] if workflows_spec: for wf_spec in workflows_spec: wf_name = '%s.%s' % (wb_db.name, wf_spec.get_name()) values = { 'name': wf_name, 'definition': _get_wf_definition(wb_db, wf_spec), 'spec': wf_spec.to_dict(), 'scope': wb_db.scope, 'project_id': wb_db.project_id, 'namespace': namespace, 'tags': wf_spec.get_tags(), 'is_system': False } db_wfs.append( db_api_v2.create_or_update_workflow_definition(wf_name, values) ) return db_wfs def _get_workbook_values(wb_spec, definition, scope, namespace=None): values = { 'name': wb_spec.get_name(), 'tags': wb_spec.get_tags(), 'definition': definition, 'spec': wb_spec.to_dict(), 'scope': scope, 'namespace': namespace, 'is_system': False } return values def _get_wf_definition(wb_db, wf_spec): wf_definition = spec_parser.get_workflow_definition( wb_db.definition, wf_spec.get_name() ) return wf_definition def _get_action_definition(wb_db, action_spec): action_definition = spec_parser.get_action_definition( wb_db.definition, action_spec.get_name() ) return action_definition ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/services/workflows.py0000644000175000017500000001351200000000000022022 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral import services from mistral.utils import safe_yaml from mistral.workflow import states from mistral_lib import utils from oslo_log import log as logging from stevedore import extension STD_WF_PATH = 'resources/workflows' LOG = logging.getLogger(__name__) def register_preinstalled_workflows(run_in_tx=True): extensions = extension.ExtensionManager( namespace='mistral.preinstalled_workflows', invoke_on_load=True ) for ext in extensions: for wf_path in ext.obj: register_workflow(run_in_tx, wf_path) def register_workflow(run_in_tx, wf_path): LOG.debug("Registering preinstalled workflow %s", wf_path) workflow_definition = open(wf_path).read() create_workflows( workflow_definition, scope='public', is_system=True, run_in_tx=run_in_tx, namespace='' ) def _clear_system_workflow_db(): db_api.delete_workflow_definitions(is_system=True) def sync_db(): LOG.debug("Syncing db...") with db_api.transaction(): _clear_system_workflow_db() register_preinstalled_workflows(run_in_tx=False) def create_workflows(definition, scope='private', is_system=False, run_in_tx=True, namespace='', validate=True): LOG.debug("Creating workflows...") wf_list_spec = spec_parser.get_workflow_list_spec_from_yaml( definition, validate=services.is_validation_enabled(validate) ) db_wfs = [] if run_in_tx: with db_api.transaction(): _append_all_workflows( definition, is_system, scope, namespace, wf_list_spec, db_wfs ) else: _append_all_workflows( definition, is_system, scope, namespace, wf_list_spec, db_wfs ) return db_wfs def _append_all_workflows(definition, is_system, scope, namespace, wf_list_spec, db_wfs): wfs = wf_list_spec.get_workflows() wfs_yaml = safe_yaml.load(definition) if len(wfs) != 1 else None for wf_spec in wfs: if len(wfs) != 1: definition = _cut_wf_definition_from_all( wfs_yaml, wf_spec.get_name() ) db_wfs.append( _create_workflow( wf_spec, definition, scope, namespace, is_system ) ) def update_workflows(definition, scope='private', identifier=None, namespace='', validate=True): LOG.debug("Updating workflows...") wf_list_spec = spec_parser.get_workflow_list_spec_from_yaml( definition, validate=services.is_validation_enabled(validate) ) wfs = wf_list_spec.get_workflows() if identifier and len(wfs) > 1: raise exc.InputException( "More than one workflows are not supported for " "update with identifier. [identifier: %s]" % identifier ) db_wfs = [] wfs_yaml = safe_yaml.load(definition) if len(wfs) != 1 else None with db_api.transaction(): for wf_spec in wfs: if len(wfs) != 1: definition = _cut_wf_definition_from_all( wfs_yaml, wf_spec.get_name() ) db_wfs.append( _update_workflow( wf_spec, definition, scope, namespace=namespace, identifier=identifier ) ) return db_wfs def update_workflow_execution_env(wf_ex, env): if not env: return wf_ex if wf_ex.state not in [states.IDLE, states.PAUSED, states.ERROR]: raise exc.NotAllowedException( 'Updating env to workflow execution is only permitted if ' 'it is in IDLE, PAUSED, or ERROR state.' ) wf_ex.params['env'] = utils.merge_dicts(wf_ex.params['env'], env) return wf_ex def _get_workflow_values(wf_spec, definition, scope, namespace=None, is_system=False): values = { 'name': wf_spec.get_name(), 'tags': wf_spec.get_tags(), 'definition': definition, 'spec': wf_spec.to_dict(), 'scope': scope, 'namespace': namespace, 'is_system': is_system } return values def _create_workflow(wf_spec, definition, scope, namespace, is_system): return db_api.create_workflow_definition( _get_workflow_values(wf_spec, definition, scope, namespace, is_system) ) def _update_workflow(wf_spec, definition, scope, identifier=None, namespace=''): values = _get_workflow_values(wf_spec, definition, scope, namespace) return db_api.update_workflow_definition( identifier if identifier else values['name'], values ) def _cut_wf_definition_from_all(wfs_yaml, wf_name): return safe_yaml.dump({ 'version': wfs_yaml['version'], wf_name: wfs_yaml[wf_name] }, sort_keys=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1295676 mistral-10.0.0.0b3/mistral/tests/0000755000175000017500000000000000000000000016730 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/__init__.py0000644000175000017500000000000000000000000021027 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0775666 mistral-10.0.0.0b3/mistral/tests/releasenotes/0000755000175000017500000000000000000000000021421 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1295676 mistral-10.0.0.0b3/mistral/tests/releasenotes/notes/0000755000175000017500000000000000000000000022551 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=mistral-10.0.0.0b3/mistral/tests/releasenotes/notes/return-errors-for-std-mistral-http-b852b6d8f0034477.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/releasenotes/notes/return-errors-for-std-mistral-http-b852b6d8f00340000644000175000017500000000033500000000000033147 0ustar00coreycorey00000000000000--- features: - | The action std.mistral_http will now retrun an error if the HTTP request fails. Previously the task would still go into the RUNNING state and wait to be completed by the external resource. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1295676 mistral-10.0.0.0b3/mistral/tests/resources/0000755000175000017500000000000000000000000020742 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/action_jinja.yaml0000644000175000017500000000045200000000000024257 0ustar00coreycorey00000000000000--- version: "2.0" greeting: description: "This action says 'Hello'" tags: [hello] base: std.echo base-input: output: 'Hello, {{ _.name }}' input: - name output: string: '{{ _ }}' farewell: base: std.echo base-input: output: 'Bye!' output: info: '{{ _ }}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/action_v2.yaml0000644000175000017500000000044600000000000023516 0ustar00coreycorey00000000000000--- version: "2.0" greeting: description: "This action says 'Hello'" tags: [hello] base: std.echo base-input: output: 'Hello, <% $.name %>' input: - name output: string: <% $ %> farewell: base: std.echo base-input: output: 'Bye!' output: info: <% $ %> ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1295676 mistral-10.0.0.0b3/mistral/tests/resources/for_wf_namespace/0000755000175000017500000000000000000000000024240 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/for_wf_namespace/lowest_level_wf.yaml0000644000175000017500000000012200000000000030317 0ustar00coreycorey00000000000000--- version: '2.0' lowest_level_wf: tasks: noop_task: action: std.noop././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/for_wf_namespace/middle_wf.yaml0000644000175000017500000000016200000000000027055 0ustar00coreycorey00000000000000--- version: '2.0' middle_wf: tasks: run_workflow_with_name_lowest_level_wf: workflow: lowest_level_wf././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/for_wf_namespace/top_level_wf.yaml0000644000175000017500000000015100000000000027606 0ustar00coreycorey00000000000000--- version: '2.0' top_level_wf: tasks: run_workflow_with_name_middle_wf: workflow: middle_wf././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/single_wf.yaml0000644000175000017500000000024100000000000023600 0ustar00coreycorey00000000000000--- version: '2.0' single_wf: type: direct tasks: hello: action: std.echo output="Hello" publish: result: <% task(hello).result %> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/wb_v1.yaml0000644000175000017500000000027100000000000022644 0ustar00coreycorey00000000000000Namespaces: Greetings: actions: hello: class: std.echo base-parameters: output: Hello! Workflow: tasks: hello: action: Greetings.hello././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/wb_v2.yaml0000644000175000017500000000030100000000000022637 0ustar00coreycorey00000000000000--- version: '2.0' name: test workflows: test: type: direct tasks: hello: action: std.echo output="Hello" publish: result: <% task(hello).result %> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/wb_with_nested_wf.yaml0000644000175000017500000000040500000000000025326 0ustar00coreycorey00000000000000--- version: "2.0" name: wb_with_nested_wf workflows: wrapping_wf: type: direct tasks: call_inner_wf: workflow: inner_wf inner_wf: type: direct tasks: hello: action: std.echo output="Hello from inner workflow"././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/wf_action_ex_concurrency.yaml0000644000175000017500000000024500000000000026706 0ustar00coreycorey00000000000000--- version: '2.0' test_action_ex_concurrency: tasks: test_with_items: with-items: index in <% range(2) %> action: std.echo output='<% $.index %>'././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/wf_jinja.yaml0000644000175000017500000000102600000000000023414 0ustar00coreycorey00000000000000--- version: '2.0' wf: type: direct tasks: hello: action: std.echo output="Hello" wait-before: 1 publish: result: '{{ task("hello").result }}' wf1: type: reverse input: - farewell tasks: addressee: action: std.echo output="John" publish: name: '{{ task("addressee").result }}' goodbye: action: std.echo output="{{ _.farewell }}, {{ _.name }}" requires: [addressee] wf2: type: direct tasks: hello: action: std.echo output="Hello" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/wf_task_ex_concurrency.yaml0000644000175000017500000000025000000000000026367 0ustar00coreycorey00000000000000--- version: '2.0' test_task_ex_concurrency: tasks: task1: action: std.async_noop timeout: 2 task2: action: std.async_noop timeout: 2././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/wf_v2.yaml0000644000175000017500000000101600000000000022647 0ustar00coreycorey00000000000000--- version: '2.0' wf: type: direct tasks: hello: action: std.echo output="Hello" wait-before: 1 publish: result: <% task(hello).result %> wf1: type: reverse input: - farewell tasks: addressee: action: std.echo output="John" publish: name: <% task(addressee).result %> goodbye: action: std.echo output="<% $.farewell %>, <% $.name %>" requires: [addressee] wf2: type: direct tasks: hello: action: std.echo output="Hello" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0775666 mistral-10.0.0.0b3/mistral/tests/resources/workbook/0000755000175000017500000000000000000000000022577 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1295676 mistral-10.0.0.0b3/mistral/tests/resources/workbook/v2/0000755000175000017500000000000000000000000023126 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/workbook/v2/my_workbook.yaml0000644000175000017500000000465700000000000026370 0ustar00coreycorey00000000000000version: '2.0' name: my_workbook description: This is a test workbook tags: [test, v2] actions: action1: description: This is a test ad-hoc action tags: [test, v2] base: std.echo base-input: output: Hello <% $.name %>! output: <% $ %> action2: description: This is a test ad-hoc action with base params tags: [test, v2] base: std.echo output="Echo output" output: <% $ %> workflows: wf1: description: This is a test workflow tags: [test, v2] type: reverse input: - name tasks: task1: description: This is a test task action: action1 name=<% $.name %> wait-before: 2 wait-after: 5 retry: count: 10 delay: 30 break-on: <% $.my_val = 10 %> concurrency: 3 task2: requires: [task1] action: std.echo output="Thanks <% $.name %>!" wf2: tags: [test, v2] type: direct task-defaults: retry: count: 10 delay: 30 break-on: <% $.my_val = 10 %> on-error: - fail: <% $.my_val = 0 %> on-success: - pause on-complete: - succeed tasks: task3: workflow: wf1 name="John Doe" age=32 param1=null param2=false on-error: - task4: <% $.my_val = 1 %> on-success: - task5: <% $.my_val = 2 %> on-complete: - task6: <% $.my_val = 3 %> task4: action: std.echo output="Task 4 echo" task5: action: std.echo output="Task 5 echo" task6: action: std.echo output="Task 6 echo" task7: with-items: vm_info in <% $.vms %> workflow: wf2 is_true=true object_list=[1, null, "str"] is_string="50" on-complete: - task9 - task10 task8: with-items: - itemX in <% $.arrayI %> - itemY in <% $.arrayJ %> workflow: wf2 expr_list=["<% $.v %>", "<% $.k %>"] expr=<% $.value %> target: nova on-complete: - task9 - task10 - task11 task9: join: all action: std.echo output="Task 9 echo" task10: join: 2 action: std.echo output="Task 10 echo" task11: join: one action: std.echo output="Task 11 echo" task12: action: std.http url="http://site.com?q=<% $.query %>" params="" task13: description: No-op task ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/resources/workbook/v2/workbook_schema_test.yaml0000644000175000017500000000211200000000000030222 0ustar00coreycorey00000000000000version: '2.0' name: workbook_schema_test description: > This is a test workbook to verify workbook the schema validation. Specifically we want to test the validation of workflow names. See bug #1645354 for more details. actions: actionversion: base: std.noop versionaction: base: std.noop actionversionaction: base: std.noop action-action: base: std.noop workflows: workflowversion: description: Workflow name ending with version tasks: task1: action: actionversion versionworkflow: description: Workflow name starting with version tasks: task1: action: versionaction workflowversionworkflow: description: Workflow name with version in the middle tasks: task1: action: actionversionaction version_workflow: description: Workflow name starting with version and an underscore tasks: task1: workflow: workflowversion workflow-with-hyphen: description: Workflow name containing - tasks: task1: action: action-action ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1295676 mistral-10.0.0.0b3/mistral/tests/unit/0000755000175000017500000000000000000000000017707 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/__init__.py0000644000175000017500000000144600000000000022025 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import eventlet eventlet.monkey_patch( os=True, select=True, socket=True, thread=False if '--use-debugger' in sys.argv else True, time=True ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1335678 mistral-10.0.0.0b3/mistral/tests/unit/actions/0000755000175000017500000000000000000000000021347 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/__init__.py0000644000175000017500000000000000000000000023446 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_action_manager.py0000644000175000017500000000367500000000000025742 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.actions import std_actions as std from mistral.services import action_manager as a_m from mistral.tests.unit import base class ActionManagerTest(base.DbTestCase): def test_register_standard_actions(self): action_list = a_m.get_registered_actions() self._assert_single_item(action_list, name="std.echo") self._assert_single_item(action_list, name="std.email") self._assert_single_item(action_list, name="std.http") self._assert_single_item(action_list, name="std.mistral_http") self._assert_single_item(action_list, name="std.ssh") self._assert_single_item(action_list, name="std.javascript") def test_get_action_class(self): self.assertTrue( issubclass(a_m.get_action_class("std.echo"), std.EchoAction) ) self.assertTrue( issubclass(a_m.get_action_class("std.http"), std.HTTPAction) ) self.assertTrue( issubclass( a_m.get_action_class("std.mistral_http"), std.MistralHTTPAction ) ) self.assertTrue( issubclass(a_m.get_action_class("std.email"), std.SendEmailAction) ) self.assertTrue( issubclass( a_m.get_action_class("std.javascript"), std.JavaScriptAction ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_javascript_action.py0000644000175000017500000000205400000000000026464 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from mistral.actions import std_actions as std from mistral.tests.unit import base from mistral.utils import javascript class JavascriptActionTest(base.BaseTest): @mock.patch.object( javascript, 'evaluate', mock.Mock(return_value="3") ) def test_js_action(self): mock_ctx = mock.Mock() script = "return 1 + 2" action = std.JavaScriptAction(script) self.assertEqual("3", action.run(mock_ctx)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_std_echo_action.py0000644000175000017500000000165000000000000026107 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.actions import std_actions as std from mistral.tests.unit import base import mock class EchoActionTest(base.BaseTest): def test_fake_action(self): expected = "my output" mock_ctx = mock.Mock() action = std.EchoAction(expected) self.assertEqual(expected, action.run(mock_ctx)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_std_email_action.py0000644000175000017500000002736200000000000026270 0ustar00coreycorey00000000000000# -*- coding: utf-8 -*- # # Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 from email.header import decode_header from email import parser import mock import six import testtools from mistral.actions import std_actions as std from mistral import exceptions as exc from mistral.tests.unit import base """ To try against a real SMTP server: 1) set LOCAL_SMTPD = True run debug smtpd on the local machine: `sudo python -m smtpd -c DebuggingServer -n localhost:25` Debugging server doesn't support password. 2) set REMOTE_SMTP = True use external SMTP (like gmail), change the configuration, provide actual username and password self.settings = { 'host': 'smtp.gmail.com:587', 'from': 'youraccount@gmail.com', 'password': 'secret' } """ LOCAL_SMTPD = False REMOTE_SMTP = False class SendEmailActionTest(base.BaseTest): def setUp(self): super(SendEmailActionTest, self).setUp() self.to_addrs = ["dz@example.com", "deg@example.com", "xyz@example.com"] self.reply_to = ['reply-to@example.com'] self.cc_addrs = ['copy@example.com'] self.bcc_addrs = ['hidden_copy@example.com'] self.subject = "Multi word subject с русскими буквами" self.body = "short multiline\nbody\nc русскими буквами" self.html_body = 'HTML body' self.smtp_server = 'mail.example.com:25' self.from_addr = "bot@example.com" self.to_addrs_str = ", ".join(self.to_addrs) self.reply_to_str = ", ".join(self.reply_to) self.ctx = mock.Mock() @testtools.skipIf(not LOCAL_SMTPD, "Setup local smtpd to run it") def test_send_email_real(self): action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, smtp_server=self.smtp_server, smtp_password=None, subject=self.subject, body=self.body ) action.run(self.ctx) @testtools.skipIf(not REMOTE_SMTP, "Configure Remote SMTP to run it") def test_with_password_real(self): self.to_addrs = ["dz@stackstorm.com"] self.smtp_server = 'mail.example.com:25' self.from_addr = "bot@example.com" self.smtp_password = 'secret' action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, smtp_server=self.smtp_server, smtp_password=self.smtp_password, subject=self.subject, body=self.body ) action.run(self.ctx) @mock.patch('smtplib.SMTP') def test_with_mutli_to_addrs(self, smtp): smtp_password = "secret" action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, smtp_server=self.smtp_server, smtp_password=smtp_password, subject=self.subject, body=self.body ) action.run(self.ctx) @mock.patch('smtplib.SMTP') def test_with_one_to_addr(self, smtp): to_addr = ["dz@example.com"] smtp_password = "secret" action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=to_addr, smtp_server=self.smtp_server, smtp_password=smtp_password, subject=self.subject, body=self.body ) action.run(self.ctx) @mock.patch('smtplib.SMTP') def test_send_email(self, smtp): action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, smtp_server=self.smtp_server, smtp_password=None, subject=self.subject, body=self.body ) action.run(self.ctx) smtp.assert_called_once_with(self.smtp_server) sendmail = smtp.return_value.sendmail self.assertTrue(sendmail.called, "should call sendmail") self.assertEqual( self.from_addr, sendmail.call_args[1]['from_addr']) self.assertEqual( self.to_addrs, sendmail.call_args[1]['to_addrs']) message = parser.Parser().parsestr(sendmail.call_args[1]['msg']) self.assertEqual(self.from_addr, message['from']) self.assertEqual(self.to_addrs_str, message['to']) if six.PY3: self.assertEqual( self.subject, decode_header(message['subject'])[0][0].decode('utf-8') ) else: self.assertEqual( self.subject.decode('utf-8'), decode_header(message['subject'])[0][0].decode('utf-8') ) if six.PY3: self.assertEqual( self.body, base64.b64decode(message.get_payload()).decode('utf-8') ) else: self.assertEqual( self.body.decode('utf-8'), base64.b64decode(message.get_payload()).decode('utf-8') ) @mock.patch('smtplib.SMTP') def test_send_email_with_cc(self, smtp): to_addrs = self.cc_addrs + self.to_addrs cc_addrs_str = ", ".join(self.cc_addrs) action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, cc_addrs=self.cc_addrs, smtp_server=self.smtp_server, smtp_password=None, subject=self.subject, body=self.body ) action.run(self.ctx) smtp.assert_called_once_with(self.smtp_server) sendmail = smtp.return_value.sendmail self.assertTrue(sendmail.called, "should call sendmail") self.assertEqual( self.from_addr, sendmail.call_args[1]['from_addr']) self.assertEqual( to_addrs, sendmail.call_args[1]['to_addrs']) message = parser.Parser().parsestr(sendmail.call_args[1]['msg']) self.assertEqual(self.from_addr, message['from']) self.assertEqual(self.to_addrs_str, message['to']) self.assertEqual(cc_addrs_str, message['cc']) @mock.patch('smtplib.SMTP') def test_send_email_with_bcc(self, smtp): to_addrs = self.bcc_addrs + self.to_addrs action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, bcc_addrs=self.bcc_addrs, smtp_server=self.smtp_server, smtp_password=None, subject=self.subject, body=self.body ) action.run(self.ctx) smtp.assert_called_once_with(self.smtp_server) sendmail = smtp.return_value.sendmail self.assertTrue(sendmail.called, "should call sendmail") self.assertEqual( self.from_addr, sendmail.call_args[1]['from_addr']) self.assertEqual( to_addrs, sendmail.call_args[1]['to_addrs']) message = parser.Parser().parsestr(sendmail.call_args[1]['msg']) self.assertEqual(self.from_addr, message['from']) self.assertEqual(self.to_addrs_str, message['to']) @mock.patch('smtplib.SMTP') def test_send_email_with_reply_to(self, smtp): action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, reply_to=self.reply_to, bcc_addrs=self.bcc_addrs, smtp_server=self.smtp_server, smtp_password=None, subject=self.subject, body=self.body ) action.run(self.ctx) smtp.assert_called_once_with(self.smtp_server) sendmail = smtp.return_value.sendmail self.assertTrue(sendmail.called, "should call sendmail") self.assertEqual( self.from_addr, sendmail.call_args[1]['from_addr']) message = parser.Parser().parsestr(sendmail.call_args[1]['msg']) self.assertEqual(self.from_addr, message['from']) self.assertEqual(self.to_addrs_str, message['to']) self.assertEqual(self.reply_to_str, message['reply-to']) @mock.patch('smtplib.SMTP') def test_send_email_html(self, smtp): action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, smtp_server=self.smtp_server, smtp_password=None, subject=self.subject, body=self.body, html_body=self.html_body ) action.run(self.ctx) smtp.assert_called_once_with(self.smtp_server) sendmail = smtp.return_value.sendmail self.assertTrue(sendmail.called, "should call sendmail") self.assertEqual( self.from_addr, sendmail.call_args[1]['from_addr']) self.assertEqual( self.to_addrs, sendmail.call_args[1]['to_addrs']) message = parser.Parser().parsestr(sendmail.call_args[1]['msg']) self.assertEqual(self.from_addr, message['from']) self.assertEqual(self.to_addrs_str, message['to']) if six.PY3: self.assertEqual( self.subject, decode_header(message['subject'])[0][0].decode('utf-8') ) else: self.assertEqual( self.subject.decode('utf-8'), decode_header(message['subject'])[0][0].decode('utf-8') ) body_payload = message.get_payload(0).get_payload() if six.PY3: self.assertEqual( self.body, base64.b64decode(body_payload).decode('utf-8') ) else: self.assertEqual( self.body.decode('utf-8'), base64.b64decode(body_payload).decode('utf-8') ) html_body_payload = message.get_payload(1).get_payload() if six.PY3: self.assertEqual( self.html_body, base64.b64decode(html_body_payload).decode('utf-8') ) else: self.assertEqual( self.html_body.decode('utf-8'), base64.b64decode(html_body_payload).decode('utf-8') ) @mock.patch('smtplib.SMTP') def test_with_password(self, smtp): self.smtp_password = "secret" action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, smtp_server=self.smtp_server, smtp_password=self.smtp_password, subject=self.subject, body=self.body ) action.run(self.ctx) smtpmock = smtp.return_value calls = [mock.call.ehlo(), mock.call.starttls(), mock.call.ehlo(), mock.call.login(self.from_addr, self.smtp_password)] smtpmock.assert_has_calls(calls) self.assertTrue(smtpmock.sendmail.called, "should call sendmail") @mock.patch('mistral.actions.std_actions.LOG') def test_exception(self, log): self.smtp_server = "wrong host" action = std.SendEmailAction( from_addr=self.from_addr, to_addrs=self.to_addrs, smtp_server=self.smtp_server, smtp_password=None, subject=self.subject, body=self.body ) try: action.run(self.ctx) except exc.ActionException: pass else: self.assertFalse("Must throw exception.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_std_fail_action.py0000644000175000017500000000232600000000000026105 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from mistral.actions import std_actions as std from mistral import exceptions as exc from mistral.tests.unit import base class FailActionTest(base.BaseTest): def test_fail_action(self): action = std.FailAction() self.assertRaises(exc.ActionException, action.run, mock.Mock) def test_fail_with_data(self): data = { "x": 1, "y": 2, } action = std.FailAction(error_data=data) action_result = action.run(context={}) self.assertTrue(action_result.is_error()) self.assertDictEqual(data, action_result.to_dict()['result']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_std_http_action.py0000644000175000017500000001206400000000000026151 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import mock import requests from mistral.actions import std_actions as std from mistral.tests.unit import base from mistral_lib import actions as mistral_lib_actions URL = 'http://some_url' DATA = { 'server': { 'id': '12345', 'metadata': { 'name': 'super_server' } } } def get_fake_response(content, code, **kwargs): return base.FakeHTTPResponse( content, code, **kwargs ) def get_success_fake_response(): return get_fake_response( json.dumps(DATA), 200, headers={'Content-Type': 'application/json'} ) def get_error_fake_response(): return get_fake_response( json.dumps(DATA), 401 ) class HTTPActionTest(base.BaseTest): @mock.patch.object(requests, 'request') def test_http_action(self, mocked_method): mocked_method.return_value = get_success_fake_response() mock_ctx = mock.Mock() action = std.HTTPAction( url=URL, method='POST', body=DATA, timeout=20, allow_redirects=True ) DATA_STR = json.dumps(DATA) self.assertEqual(DATA_STR, action.body) self.assertEqual(URL, action.url) result = action.run(mock_ctx) self.assertIsInstance(result, dict) self.assertEqual(DATA, result['content']) self.assertIn('headers', result) self.assertEqual(200, result['status']) mocked_method.assert_called_with( 'POST', URL, data=DATA_STR, json=None, headers=None, cookies=None, params=None, timeout=20, auth=None, allow_redirects=True, proxies=None, verify=None ) @mock.patch.object(requests, 'request') def test_http_action_error_result(self, mocked_method): mocked_method.return_value = get_error_fake_response() mock_ctx = mock.Mock() action = std.HTTPAction( url=URL, method='POST', body=DATA, timeout=20, allow_redirects=True ) result = action.run(mock_ctx) self.assertIsInstance(result, mistral_lib_actions.Result) self.assertEqual(401, result.error['status']) @mock.patch.object(requests, 'request') def test_http_action_with_auth(self, mocked_method): mocked_method.return_value = get_success_fake_response() mock_ctx = mock.Mock() action = std.HTTPAction( url=URL, method='POST', auth='user:password' ) action.run(mock_ctx) args, kwargs = mocked_method.call_args self.assertEqual(('user', 'password'), kwargs['auth']) @mock.patch.object(requests, 'request') def test_http_action_with_headers(self, mocked_method): mocked_method.return_value = get_success_fake_response() mock_ctx = mock.Mock() headers = {'int_header': 33, 'bool_header': True, 'float_header': 3.0, 'regular_header': 'teststring'} safe_headers = {'int_header': '33', 'bool_header': 'True', 'float_header': '3.0', 'regular_header': 'teststring'} action = std.HTTPAction( url=URL, method='POST', headers=headers.copy(), ) result = action.run(mock_ctx) self.assertIn('headers', result) args, kwargs = mocked_method.call_args self.assertEqual(safe_headers, kwargs['headers']) @mock.patch.object(requests, 'request') def test_http_action_empty_resp(self, mocked_method): def invoke(content): action = std.HTTPAction( url=URL, method='GET', ) mocked_method.return_value = get_fake_response( content=content, code=200 ) result = action.run(mock.Mock()) self.assertEqual(content, result['content']) invoke(None) invoke('') @mock.patch.object(requests, 'request') def test_http_action_none_encoding_not_empty_resp(self, mocked_method): action = std.HTTPAction( url=URL, method='GET', ) mocked_method.return_value = get_fake_response( content='', code=200, encoding=None ) mock_ctx = mock.Mock() result = action.run(mock_ctx) self.assertIsNone(result['encoding']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_std_mistral_http_action.py0000644000175000017500000000643200000000000027706 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import mock import requests from mistral.actions import std_actions as std from mistral.tests.unit import base from mistral_lib import actions as mistral_lib_actions URL = 'http://some_url' DATA = { 'server': { 'id': '12345', 'metadata': { 'name': 'super_server' } } } def get_fake_response(content, code, **kwargs): return base.FakeHTTPResponse( content, code, **kwargs ) def get_success_fake_response(): return get_fake_response( json.dumps(DATA), 200, headers={'Content-Type': 'application/json'} ) def get_error_fake_response(): return get_fake_response( json.dumps(DATA), 401 ) class MistralHTTPActionTest(base.BaseTest): @mock.patch.object(requests, 'request') def test_http_action(self, mocked_method): mocked_method.return_value = get_success_fake_response() mock_ctx = mock.Mock() action = std.MistralHTTPAction( url=URL, method='POST', body=DATA, timeout=20, allow_redirects=True ) DATA_STR = json.dumps(DATA) self.assertEqual(DATA_STR, action.body) self.assertEqual(URL, action.url) result = action.run(mock_ctx) self.assertIsInstance(result, dict) self.assertEqual(DATA, result['content']) self.assertIn('headers', result) self.assertEqual(200, result['status']) mock_ex = mock_ctx.execution headers = { 'Mistral-Workflow-Name': mock_ex.workflow_name, 'Mistral-Task-Id': mock_ex.task_execution_id, 'Mistral-Callback-URL': mock_ex.callback_url, 'Mistral-Action-Execution-Id': mock_ex.action_execution_id, 'Mistral-Workflow-Execution-Id': mock_ex.workflow_execution_id } mocked_method.assert_called_with( 'POST', URL, data=DATA_STR, json=None, headers=headers, cookies=None, params=None, timeout=20, auth=None, allow_redirects=True, proxies=None, verify=None ) @mock.patch.object(requests, 'request') def test_http_action_error_result(self, mocked_method): mocked_method.return_value = get_error_fake_response() mock_ctx = mock.Mock() action = std.MistralHTTPAction( url=URL, method='POST', body=DATA, timeout=20, allow_redirects=True ) result = action.run(mock_ctx) self.assertIsInstance(result, mistral_lib_actions.Result) self.assertEqual(401, result.error['status']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_std_ssh_action.py0000644000175000017500000000475700000000000026001 0ustar00coreycorey00000000000000# Copyright 2018 Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock from mistral.actions import std_actions as std from mistral import exceptions as exc from mistral.tests.unit import base import mistral.utils.ssh_utils class SSHActionTest(base.BaseTest): def test_default_inputs(self): cmd = "echo -n ok" host = "localhost" username = "mistral" action = std.SSHAction(cmd, host, username) mock_ctx = None stdout = action.test(mock_ctx) params = json.loads(stdout) self.assertEqual("", params['password'], "Password does not match.") self.assertIsNone( params['private_key_filename'], "private_key_filename is not None.") @mock.patch.object(mistral.utils.ssh_utils, 'execute_command') def test_ssh_action(self, mocked_method): mocked_method.return_value = (0, 'ok') cmd = "echo -n ok" host = "localhost" username = "mistral" action = std.SSHAction(cmd, host, username) mock_ctx = None stdout = action.run(mock_ctx) self.assertEqual('ok', stdout, 'stdout from SSH command differs from expected') mocked_method.assert_called_with( cmd=cmd, host=host, username=username, password='', private_key_filename=None, private_key=None ) @mock.patch.object(mistral.utils.ssh_utils, 'execute_command') def test_ssh_action_with_stderr(self, mocked_method): mocked_method.return_value = (1, 'Error expected') cmd = "echo -n ok" host = "localhost" username = "mistral" action = std.SSHAction(cmd, host, username) mock_ctx = None self.assertRaisesWithMessageContaining( exc.ActionException, "Failed to execute ssh cmd 'echo -n ok' on ['localhost']", action.run, mock_ctx ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_std_test_dict_action.py0000644000175000017500000000210000000000000027142 0ustar00coreycorey00000000000000# Copyright 2018 Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mistral.actions import std_actions as std from mistral.tests.unit import base class TestDictActionTest(base.BaseTest): def test_default_inputs(self): dict_size = 99 action = std.TestDictAction(dict_size, key_prefix='key', val='val') d = action.run(mock.Mock()) self.assertIsNotNone(d) self.assertEqual(dict_size, len(d)) self.assertIn('key0', d) self.assertIn('key{}'.format(dict_size - 1), d) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/actions/test_types.py0000644000175000017500000000346500000000000024134 0ustar00coreycorey00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.api.controllers.v2 import types from mistral import exceptions as exc from mistral.tests.unit import base from mistral.utils import filter_utils class TestTypesController(base.BaseTest): base_id = '88888888-4444-4444-4444-777777755555' uuid_type = types.uuid def test_uuid_type(self): self.uuid_type.validate(self.base_id) def test_uuid_type_wit_invalid_format(self): self.assertRaises(exc.InputException, self.uuid_type.validate, 'invalid_format') self.assertRaises(exc.InputException, self.uuid_type.validate, '44-231-454-542123') def test_uuid_with_filters(self): for filter_type in filter_utils.ALL: value = '{}{}'.format(filter_type + ':', self.base_id) if filter_type.startswith((filter_utils.IN, filter_utils.NOT_IN)): self.assertRaises(exc.InputException, self.uuid_type.validate, value) else: self.uuid_type.validate(value) def test_uuid_type_with_invalid_prefix(self): value = 'invalid:{}'.format(self.base_id) self.assertRaises(exc.InputException, self.uuid_type.validate, value) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1335678 mistral-10.0.0.0b3/mistral/tests/unit/api/0000755000175000017500000000000000000000000020460 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/__init__.py0000644000175000017500000000000000000000000022557 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/base.py0000644000175000017500000000457200000000000021754 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import pecan import pecan.testing from webtest import app as webtest_app from mistral.api import app as pecan_app from mistral.services import periodic from mistral.tests.unit import base from mistral.tests.unit.mstrlfixtures import policy_fixtures class APITest(base.DbTestCase): def setUp(self): super(APITest, self).setUp() self.override_config('auth_enable', False, group='pecan') self.override_config('enabled', False, group='cron_trigger') self.app = pecan.testing.load_test_app( dict(pecan_app.get_pecan_config()) ) # Adding cron trigger thread clean up explicitly in case if # new tests will provide an alternative configuration for pecan # application. self.addCleanup(periodic.stop_all_periodic_tasks) # Make sure the api get the correct context. self.patch_ctx = mock.patch( 'mistral.context.MistralContext.from_environ' ) self.mock_ctx = self.patch_ctx.start() self.mock_ctx.return_value = self.ctx self.addCleanup(self.patch_ctx.stop) self.policy = self.useFixture(policy_fixtures.PolicyFixture()) def assertNotFound(self, url): try: self.app.get(url, headers={'Accept': 'application/json'}) except webtest_app.AppError as error: self.assertIn('Bad response: 404 Not Found', str(error)) return self.fail('Expected 404 Not found but got OK') def assertUnauthorized(self, url): try: self.app.get(url, headers={'Accept': 'application/json'}) except webtest_app.AppError as error: self.assertIn('Bad response: 401 Unauthorized', str(error)) return self.fail('Expected 401 Unauthorized but got OK') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/test_access_control.py0000644000175000017500000000425500000000000025100 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mistral.api import access_control as acl from mistral import exceptions as exc from mistral.tests.unit import base from mistral.tests.unit.mstrlfixtures import policy_fixtures class PolicyTest(base.BaseTest): """Tests whether the configuration of the policy engine is correct.""" def setUp(self): super(PolicyTest, self).setUp() self.policy = self.useFixture(policy_fixtures.PolicyFixture()) rules = { "example:admin": "rule:admin_only", "example:admin_or_owner": "rule:admin_or_owner" } self.policy.register_rules(rules) def test_admin_api_allowed(self): auth_ctx = base.get_context(default=True, admin=True) self.assertTrue( acl.enforce('example:admin', auth_ctx, auth_ctx.to_dict()) ) def test_admin_api_disallowed(self): auth_ctx = base.get_context(default=True) self.assertRaises( exc.NotAllowedException, acl.enforce, 'example:admin', auth_ctx, auth_ctx.to_dict() ) def test_admin_or_owner_api_allowed(self): auth_ctx = base.get_context(default=True) self.assertTrue( acl.enforce('example:admin_or_owner', auth_ctx, auth_ctx.to_dict()) ) def test_admin_or_owner_api_disallowed(self): auth_ctx = base.get_context(default=True) target = {'project_id': 'another'} self.assertRaises( exc.NotAllowedException, acl.enforce, 'example:admin_or_owner', auth_ctx, target ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/test_auth.py0000644000175000017500000000427200000000000023037 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from oslo_utils import timeutils from oslo_utils import uuidutils import pecan import pecan.testing from mistral.api import app as pecan_app from mistral.tests.unit.api import base WORKBOOKS = [ { u'name': u'my_workbook', u'description': u'My cool Mistral workbook', u'scope': None, u'tags': [u'deployment', u'demo'] } ] PKI_TOKEN_VERIFIED = { 'token': { 'methods': ['password'], 'roles': [{'id': uuidutils.generate_uuid(dashed=False), 'name': 'admin'}], 'expires_at': datetime.datetime.isoformat( datetime.datetime.utcnow() + datetime.timedelta(seconds=60) ), 'project': { 'domain': {'id': 'default', 'name': 'Default'}, 'id': uuidutils.generate_uuid(dashed=False), 'name': 'Mistral' }, 'catalog': [], 'extras': {}, 'user': { 'domain': {'id': 'default', 'name': 'Default'}, 'id': uuidutils.generate_uuid(dashed=False), 'name': 'admin' }, 'issued_at': datetime.datetime.isoformat(timeutils.utcnow()) } } class TestKeystoneMiddleware(base.APITest): """Test keystone middleware AuthProtocol. It checks that keystone middleware AuthProtocol is executed when enabled. """ def setUp(self): super(TestKeystoneMiddleware, self).setUp() self.override_config('auth_enable', True, group='pecan') self.override_config('enabled', False, group='cron_trigger') self.app = pecan.testing.load_test_app( dict(pecan_app.get_pecan_config()) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/test_cors_middleware.py0000644000175000017500000000615300000000000025241 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests cors middleware.""" from mistral.tests.unit.api import base from oslo_config import cfg from oslo_middleware import cors as cors_middleware class TestCORSMiddleware(base.APITest): """Provide a basic smoke test to ensure CORS middleware is active. The tests below provide minimal confirmation that the CORS middleware is active, and may be configured. For comprehensive tests, please consult the test suite in oslo_middleware. """ def setUp(self): # Make sure the CORS options are registered cfg.CONF.register_opts(cors_middleware.CORS_OPTS, 'cors') # Load up our valid domain values before the application is created. self.override_config( "allowed_origin", "http://valid.example.com", group='cors' ) # Create the application. super(TestCORSMiddleware, self).setUp() def test_valid_cors_options_request(self): response = self.app.options( '/', headers={ 'Origin': 'http://valid.example.com', 'Access-Control-Request-Method': 'GET' } ) self.assertEqual(200, response.status_code) self.assertIn('access-control-allow-origin', response.headers) self.assertEqual( 'http://valid.example.com', response.headers['access-control-allow-origin'] ) def test_invalid_cors_options_request(self): response = self.app.options( '/', headers={ 'Origin': 'http://invalid.example.com', 'Access-Control-Request-Method': 'GET' } ) self.assertEqual(200, response.status_code) self.assertNotIn('access-control-allow-origin', response.headers) def test_valid_cors_get_request(self): response = self.app.get( '/', headers={ 'Origin': 'http://valid.example.com' } ) self.assertEqual(200, response.status_code) self.assertIn('access-control-allow-origin', response.headers) self.assertEqual( 'http://valid.example.com', response.headers['access-control-allow-origin'] ) def test_invalid_cors_get_request(self): response = self.app.get( '/', headers={ 'Origin': 'http://invalid.example.com' } ) self.assertEqual(200, response.status_code) self.assertNotIn('access-control-allow-origin', response.headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/test_oslo_middleware.py0000644000175000017500000000273000000000000025244 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests http_proxy_to_wsgi middleware.""" from mistral.tests.unit.api import base from oslo_config import cfg from oslo_middleware import http_proxy_to_wsgi as http_proxy_to_wsgi_middleware class TestHTTPProxyToWSGIMiddleware(base.APITest): """Test oslo_middleware HTTPProxyToWSGI. It checks that oslo_middleware middleware HTTPProxyToWSGI is executed when enabled. """ def setUp(self): # Make sure the HTTPProxyToWSGI options are registered cfg.CONF.register_opts(http_proxy_to_wsgi_middleware.OPTS, 'oslo_middleware') # Enable proxy headers parsing in HTTPProxyToWSGI middleware. self.override_config( "enable_proxy_headers_parsing", "True", group='oslo_middleware' ) # Create the application. super(TestHTTPProxyToWSGIMiddleware, self).setUp() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/test_resource_base.py0000644000175000017500000001154600000000000024721 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from mistral.api.controllers.v2 import resources from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit import base from mistral_lib import utils WF_EXEC = { 'id': 'c0f3be41-88b9-4c86-a669-83e77cd0a1b8', 'spec': {}, 'params': {'task': 'my_task1'}, 'project_id': '', 'scope': 'PUBLIC', 'state': 'IDLE', 'state_info': "Running...", 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'updated_at': None, 'context': None, 'task_execution_id': None, 'description': None, 'output': None, 'accepted': False, 'some_invalid_field': "foobar" } WF = """--- version: '2.0' WF: type: direct input: - no_default_value_input - string_param: "string" - json_param: { string_param: "string" } output: output1: 'output' tasks: task1: action: std.noop """ WF_NO_PARAMS = """--- version: '2.0' WF_NO_PARAMS: type: direct tasks: task1: action: std.noop """ WF_WITH_INPUT = """--- version: '2.0' WF_2_PARAMS: type: direct input: - param1 - param2: "value, param" tasks: task1: action: std.noop WF_3_PARAMS: type: direct input: - param1 - param2: "value" - param tasks: task1: action: std.noop """ class TestRestResource(base.DbTestCase): def test_from_db_model(self): wf_ex = db_api.create_workflow_execution(WF_EXEC) self.assertIsNotNone(wf_ex) wf_ex_resource = resources.Execution.from_db_model(wf_ex) self.assertIsNotNone(wf_ex_resource) expected = copy.copy(WF_EXEC) del expected['some_invalid_field'] utils.datetime_to_str_in_dict(expected, 'created_at') self.assertDictEqual(expected, wf_ex.to_dict()) def test_from_db_model_workflow_resource(self): expected_interface = { 'output': ['output1'], 'input': [ 'no_default_value_input', {'string_param': 'string'}, { 'json_param': {'string_param': 'string'} } ] } workflows_list = wf_service.create_workflows(WF) self.assertEqual(1, len(workflows_list)) wf_resource = resources.Workflow.from_db_model(workflows_list[0]) self.assertDictEqual(expected_interface, wf_resource.interface) def test_from_db_model_workflow_resource_no_params(self): expected_interface = { 'input': [], 'output': [] } workflows_list = wf_service.create_workflows(WF_NO_PARAMS) self.assertEqual(1, len(workflows_list)) wf_resource = resources.Workflow.from_db_model(workflows_list[0]) self.assertDictEqual(expected_interface, wf_resource.interface) def test_from_db_model_workflow_with_input(self): expected_input_two_params = "param1, param2=\"value, param\"" expected_input_three_params = "param1, param2=\"value\", param" workflows_list = wf_service.create_workflows(WF_WITH_INPUT) self.assertEqual(2, len(workflows_list)) wf_two_params = workflows_list[0] \ if workflows_list[0].name == 'WF_2_PARAMS' \ else workflows_list[1] wf_three_params = workflows_list[0] \ if workflows_list[0].name == 'WF_3_PARAMS' \ else workflows_list[1] two_params_wf_resource = resources.Workflow.from_db_model( wf_two_params) three_params_wf_resource = resources.Workflow.from_db_model( wf_three_params) self.assertEqual(expected_input_two_params, two_params_wf_resource.input) self.assertEqual(expected_input_three_params, three_params_wf_resource.input) def test_from_dict(self): wf_ex = db_api.create_workflow_execution(WF_EXEC) self.assertIsNotNone(wf_ex) wf_ex_resource = resources.Execution.from_dict(wf_ex.to_dict()) self.assertIsNotNone(wf_ex_resource) expected = copy.copy(WF_EXEC) del expected['some_invalid_field'] utils.datetime_to_str_in_dict(expected, 'created_at') self.assertDictEqual(expected, wf_ex.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/test_resource_list.py0000644000175000017500000000235400000000000024757 0ustar00coreycorey00000000000000# Copyright 2018 Nokia Networks. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from mistral.api.controllers.v2 import resources class TestResourceList(base.BaseTestCase): def test_next_link_correctness(self): task = resources.Task.sample() result = resources.Tasks.convert_with_links( resources=[task], limit=1, url='https://localhost:8080', sort_keys='created_at,id', sort_dirs='asc,asc', fields='', state='eq:RUNNING' ) next_link = result.next self.assertIn('state=eq:RUNNING', next_link) self.assertIn('sort_keys=created_at,id', next_link) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/test_service.py0000644000175000017500000000512200000000000023531 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_concurrency import processutils from oslo_config import cfg from mistral.api import service from mistral.tests.unit import base class TestWSGIService(base.BaseTest): def setUp(self): super(TestWSGIService, self).setUp() self.override_config('enabled', False, group='cron_trigger') @mock.patch.object(service.wsgi, 'Server') def test_workers_set_default(self, wsgi_server): service_name = "mistral_api" with mock.patch('mistral.api.app.setup_app'): test_service = service.WSGIService(service_name) wsgi_server.assert_called_once_with( cfg.CONF, service_name, test_service.app, host='0.0.0.0', port=8989, use_ssl=False ) def test_workers_set_correct_setting(self): self.override_config('api_workers', 8, group='api') with mock.patch('mistral.api.app.setup_app'): test_service = service.WSGIService("mistral_api") self.assertEqual(8, test_service.workers) def test_workers_set_zero_setting(self): self.override_config('api_workers', 0, group='api') with mock.patch('mistral.api.app.setup_app'): test_service = service.WSGIService("mistral_api") self.assertEqual( processutils.get_worker_count(), test_service.workers ) @mock.patch.object(service.wsgi, 'Server') def test_wsgi_service_with_ssl_enabled(self, wsgi_server): self.override_config('enable_ssl_api', True, group='api') service_name = 'mistral_api' with mock.patch('mistral.api.app.setup_app'): srv = service.WSGIService(service_name) wsgi_server.assert_called_once_with( cfg.CONF, service_name, srv.app, host='0.0.0.0', port=8989, use_ssl=True ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1375678 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/0000755000175000017500000000000000000000000021007 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/__init__.py0000644000175000017500000000000000000000000023106 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_action_executions.py0000644000175000017500000005436700000000000026162 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json import mock from oslo_config import cfg import oslo_messaging from oslo_messaging import exceptions as oslo_exc import sqlalchemy as sa from mistral.api.controllers.v2 import action_execution from mistral.api.controllers.v2 import resources from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.rpc import clients as rpc_clients from mistral.rpc.oslo import oslo_client from mistral.tests.unit.api import base from mistral.utils import rest_utils from mistral.workflow import states from mistral_lib import actions as ml_actions # This line is needed for correct initialization of messaging config. oslo_messaging.get_rpc_transport(cfg.CONF) ACTION_EX_DB = models.ActionExecution( id='123', workflow_name='flow', task_execution=models.TaskExecution(name='task1'), task_execution_id='333', state=states.SUCCESS, state_info=states.SUCCESS, tags=['foo', 'fee'], name='std.echo', description='something', accepted=True, input={}, output={}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1) ) AD_HOC_ACTION_EX_DB = models.ActionExecution( id='123', state=states.SUCCESS, state_info=states.SUCCESS, tags=['foo', 'fee'], name='std.echo', description='something', accepted=True, input={}, output={}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1) ) AD_HOC_ACTION_EX_ERROR = models.ActionExecution( id='123', state=states.ERROR, state_info=states.ERROR, tags=['foo', 'fee'], name='std.echo', description='something', accepted=True, input={}, output={}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1) ) AD_HOC_ACTION_EX_CANCELLED = models.ActionExecution( id='123', state=states.CANCELLED, state_info=states.CANCELLED, tags=['foo', 'fee'], name='std.echo', description='something', accepted=True, input={}, output={}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1) ) ACTION_EX_DB_NOT_COMPLETE = models.ActionExecution( id='123', state=states.RUNNING, state_info=states.RUNNING, tags=['foo', 'fee'], name='std.echo', description='something', accepted=False, input={}, output={}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1) ) ACTION_EX = { 'id': '123', 'workflow_name': 'flow', 'task_execution_id': '333', 'task_name': 'task1', 'state': 'SUCCESS', 'state_info': 'SUCCESS', 'tags': ['foo', 'fee'], 'name': 'std.echo', 'description': 'something', 'accepted': True, 'input': '{}', 'output': '{}', 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00' } UPDATED_ACTION_EX_DB = copy.copy(ACTION_EX_DB).to_dict() UPDATED_ACTION_EX_DB['state'] = 'SUCCESS' UPDATED_ACTION_EX_DB['task_name'] = 'task1' UPDATED_ACTION = copy.deepcopy(ACTION_EX) UPDATED_ACTION['state'] = 'SUCCESS' UPDATED_ACTION_OUTPUT = UPDATED_ACTION['output'] CANCELLED_ACTION_EX_DB = copy.copy(ACTION_EX_DB).to_dict() CANCELLED_ACTION_EX_DB['state'] = 'CANCELLED' CANCELLED_ACTION_EX_DB['task_name'] = 'task1' CANCELLED_ACTION = copy.deepcopy(ACTION_EX) CANCELLED_ACTION['state'] = 'CANCELLED' PAUSED_ACTION_EX_DB = copy.copy(ACTION_EX_DB).to_dict() PAUSED_ACTION_EX_DB['state'] = 'PAUSED' PAUSED_ACTION_EX_DB['task_name'] = 'task1' PAUSED_ACTION = copy.deepcopy(ACTION_EX) PAUSED_ACTION['state'] = 'PAUSED' RUNNING_ACTION_EX_DB = copy.copy(ACTION_EX_DB).to_dict() RUNNING_ACTION_EX_DB['state'] = 'RUNNING' RUNNING_ACTION_EX_DB['task_name'] = 'task1' RUNNING_ACTION = copy.deepcopy(ACTION_EX) RUNNING_ACTION['state'] = 'RUNNING' ERROR_ACTION_EX = copy.copy(ACTION_EX_DB).to_dict() ERROR_ACTION_EX['state'] = 'ERROR' ERROR_ACTION_EX['task_name'] = 'task1' ERROR_ACTION = copy.deepcopy(ACTION_EX) ERROR_ACTION['state'] = 'ERROR' ERROR_ACTION_RES = ERROR_ACTION['output'] ERROR_OUTPUT = "Fake error, it is a test" ERROR_ACTION_EX_WITH_OUTPUT = copy.copy(ACTION_EX_DB).to_dict() ERROR_ACTION_EX_WITH_OUTPUT['state'] = 'ERROR' ERROR_ACTION_EX_WITH_OUTPUT['task_name'] = 'task1' ERROR_ACTION_EX_WITH_OUTPUT['output'] = {"output": ERROR_OUTPUT} ERROR_ACTION_WITH_OUTPUT = copy.deepcopy(ACTION_EX) ERROR_ACTION_WITH_OUTPUT['state'] = 'ERROR' ERROR_ACTION_WITH_OUTPUT['output'] = ( '{"output": "%s"}' % ERROR_OUTPUT ) ERROR_ACTION_RES_WITH_OUTPUT = {"output": ERROR_OUTPUT} DEFAULT_ERROR_OUTPUT = "Unknown error" ERROR_ACTION_EX_FOR_EMPTY_OUTPUT = copy.copy(ACTION_EX_DB).to_dict() ERROR_ACTION_EX_FOR_EMPTY_OUTPUT['state'] = 'ERROR' ERROR_ACTION_EX_FOR_EMPTY_OUTPUT['task_name'] = 'task1' ERROR_ACTION_EX_FOR_EMPTY_OUTPUT['output'] = {"output": DEFAULT_ERROR_OUTPUT} ERROR_ACTION_FOR_EMPTY_OUTPUT = copy.deepcopy(ERROR_ACTION) ERROR_ACTION_FOR_EMPTY_OUTPUT['output'] = ( '{"output": "%s"}' % DEFAULT_ERROR_OUTPUT ) ERROR_ACTION_WITH_NONE_OUTPUT = copy.deepcopy(ERROR_ACTION) ERROR_ACTION_WITH_NONE_OUTPUT['output'] = None BROKEN_ACTION = copy.deepcopy(ACTION_EX) BROKEN_ACTION['output'] = 'string not escaped' MOCK_ACTION = mock.MagicMock(return_value=ACTION_EX_DB) MOCK_ACTION_NOT_COMPLETE = mock.MagicMock( return_value=ACTION_EX_DB_NOT_COMPLETE ) MOCK_ACTION_COMPLETE_ERROR = mock.MagicMock( return_value=AD_HOC_ACTION_EX_ERROR ) MOCK_ACTION_COMPLETE_CANCELLED = mock.MagicMock( return_value=AD_HOC_ACTION_EX_CANCELLED ) MOCK_AD_HOC_ACTION = mock.MagicMock(return_value=AD_HOC_ACTION_EX_DB) MOCK_ACTIONS = mock.MagicMock(return_value=[ACTION_EX_DB]) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_DELETE = mock.MagicMock(return_value=None) ACTION_EX_DB_WITH_PROJECT_ID = AD_HOC_ACTION_EX_DB.get_clone() ACTION_EX_DB_WITH_PROJECT_ID.project_id = '' class TestActionExecutionsController(base.APITest): def setUp(self): super(TestActionExecutionsController, self).setUp() self.addCleanup( cfg.CONF.set_default, 'allow_action_execution_deletion', False, group='api' ) @mock.patch.object(db_api, 'get_action_execution', MOCK_ACTION) def test_get(self): resp = self.app.get('/v2/action_executions/123') self.assertEqual(200, resp.status_int) self.assertDictEqual(ACTION_EX, resp.json) @mock.patch.object(db_api, 'get_action_execution') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), ACTION_EX_DB # Successful run ] resp = self.app.get('/v2/action_executions/123') self.assertEqual(200, resp.status_int) self.assertDictEqual(ACTION_EX, resp.json) def test_basic_get(self): resp = self.app.get('/v2/action_executions/') self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, 'get_action_execution', MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get('/v2/action_executions/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'get_action_execution', return_value=ACTION_EX_DB_WITH_PROJECT_ID) def test_get_within_project_id(self, mock_get): resp = self.app.get('/v2/action_executions/123') self.assertEqual(200, resp.status_int) self.assertTrue('project_id' in resp.json) @mock.patch.object(oslo_client.OsloRPCClient, 'sync_call', mock.MagicMock(side_effect=oslo_exc.MessagingTimeout)) def test_post_timeout(self): self.override_config('rpc_response_timeout', 100) resp = self.app.post_json( '/v2/action_executions', { 'name': 'std.sleep', 'input': {'seconds': 120} }, expect_errors=True ) error_msg = resp.json['faultstring'] self.assertEqual(error_msg, 'This rpc call "start_action" took longer than ' 'configured 100 seconds.') @mock.patch.object(rpc_clients.EngineClient, 'start_action') def test_post(self, f): f.return_value = ACTION_EX_DB.to_dict() resp = self.app.post_json( '/v2/action_executions', { 'name': 'std.echo', 'input': "{}", 'params': '{"save_result": true, "run_sync": true}' } ) self.assertEqual(201, resp.status_int) action_exec = copy.deepcopy(ACTION_EX) del action_exec['task_name'] self.assertDictEqual(action_exec, resp.json) f.assert_called_once_with( action_exec['name'], json.loads(action_exec['input']), description=None, save_result=True, run_sync=True, namespace='' ) @mock.patch.object(rpc_clients.EngineClient, 'start_action') def test_post_with_timeout(self, f): f.return_value = ACTION_EX_DB.to_dict() resp = self.app.post_json( '/v2/action_executions', { 'name': 'std.echo', 'input': "{}", 'params': '{"timeout": 2}' } ) self.assertEqual(201, resp.status_int) action_exec = copy.deepcopy(ACTION_EX) del action_exec['task_name'] self.assertDictEqual(action_exec, resp.json) f.assert_called_once_with( action_exec['name'], json.loads(action_exec['input']), description=None, timeout=2, namespace='' ) @mock.patch.object(rpc_clients.EngineClient, 'start_action') def test_post_json(self, f): f.return_value = ACTION_EX_DB.to_dict() resp = self.app.post_json( '/v2/action_executions', { 'name': 'std.echo', 'input': {}, 'params': '{"save_result": true}' } ) self.assertEqual(201, resp.status_int) action_exec = copy.deepcopy(ACTION_EX) del action_exec['task_name'] self.assertDictEqual(action_exec, resp.json) f.assert_called_once_with( action_exec['name'], json.loads(action_exec['input']), description=None, save_result=True, namespace='' ) @mock.patch.object(rpc_clients.EngineClient, 'start_action') def test_post_without_input(self, f): f.return_value = ACTION_EX_DB.to_dict() f.return_value['output'] = {'result': '123'} resp = self.app.post_json( '/v2/action_executions', {'name': 'nova.servers_list'} ) self.assertEqual(201, resp.status_int) self.assertEqual('{"result": "123"}', resp.json['output']) f.assert_called_once_with('nova.servers_list', {}, description=None, namespace='') def test_post_bad_result(self): resp = self.app.post_json( '/v2/action_executions', {'input': 'null'}, expect_errors=True ) self.assertEqual(400, resp.status_int) def test_post_bad_input(self): resp = self.app.post_json( '/v2/action_executions', {'input': None}, expect_errors=True ) self.assertEqual(400, resp.status_int) def test_post_bad_json_input(self): resp = self.app.post_json( '/v2/action_executions', {'input': 2}, expect_errors=True ) self.assertEqual(400, resp.status_int) @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put(self, f): f.return_value = UPDATED_ACTION_EX_DB resp = self.app.put_json('/v2/action_executions/123', UPDATED_ACTION) self.assertEqual(200, resp.status_int) self.assertDictEqual(UPDATED_ACTION, resp.json) f.assert_called_once_with( UPDATED_ACTION['id'], ml_actions.Result(data=ACTION_EX_DB.output) ) @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_error_with_output(self, f): f.return_value = ERROR_ACTION_EX_WITH_OUTPUT resp = self.app.put_json( '/v2/action_executions/123', ERROR_ACTION_WITH_OUTPUT ) self.assertEqual(200, resp.status_int) self.assertDictEqual(ERROR_ACTION_WITH_OUTPUT, resp.json) f.assert_called_once_with( ERROR_ACTION_WITH_OUTPUT['id'], ml_actions.Result(error=ERROR_ACTION_RES_WITH_OUTPUT) ) @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_error_with_unknown_reason(self, f): f.return_value = ERROR_ACTION_EX_FOR_EMPTY_OUTPUT resp = self.app.put_json('/v2/action_executions/123', ERROR_ACTION) self.assertEqual(200, resp.status_int) self.assertDictEqual(ERROR_ACTION_FOR_EMPTY_OUTPUT, resp.json) f.assert_called_once_with( ERROR_ACTION_FOR_EMPTY_OUTPUT['id'], ml_actions.Result(error=DEFAULT_ERROR_OUTPUT) ) @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_error_with_unknown_reason_output_none(self, f): f.return_value = ERROR_ACTION_EX_FOR_EMPTY_OUTPUT resp = self.app.put_json( '/v2/action_executions/123', ERROR_ACTION_WITH_NONE_OUTPUT ) self.assertEqual(200, resp.status_int) self.assertDictEqual(ERROR_ACTION_FOR_EMPTY_OUTPUT, resp.json) f.assert_called_once_with( ERROR_ACTION_FOR_EMPTY_OUTPUT['id'], ml_actions.Result(error=DEFAULT_ERROR_OUTPUT) ) @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_cancelled(self, on_action_complete_mock_func): on_action_complete_mock_func.return_value = CANCELLED_ACTION_EX_DB resp = self.app.put_json('/v2/action_executions/123', CANCELLED_ACTION) self.assertEqual(200, resp.status_int) self.assertDictEqual(CANCELLED_ACTION, resp.json) on_action_complete_mock_func.assert_called_once_with( CANCELLED_ACTION['id'], ml_actions.Result(cancel=True) ) @mock.patch.object(rpc_clients.EngineClient, 'on_action_update') def test_put_paused(self, on_action_update_mock_func): on_action_update_mock_func.return_value = PAUSED_ACTION_EX_DB resp = self.app.put_json('/v2/action_executions/123', PAUSED_ACTION) self.assertEqual(200, resp.status_int) self.assertDictEqual(PAUSED_ACTION, resp.json) on_action_update_mock_func.assert_called_once_with( PAUSED_ACTION['id'], PAUSED_ACTION['state'] ) @mock.patch.object(rpc_clients.EngineClient, 'on_action_update') def test_put_resume(self, on_action_update_mock_func): on_action_update_mock_func.return_value = RUNNING_ACTION_EX_DB resp = self.app.put_json('/v2/action_executions/123', RUNNING_ACTION) self.assertEqual(200, resp.status_int) self.assertDictEqual(RUNNING_ACTION, resp.json) on_action_update_mock_func.assert_called_once_with( RUNNING_ACTION['id'], RUNNING_ACTION['state'] ) @mock.patch.object( rpc_clients.EngineClient, 'on_action_complete', MOCK_NOT_FOUND ) def test_put_no_action_ex(self): resp = self.app.put_json( '/v2/action_executions/123', UPDATED_ACTION, expect_errors=True ) self.assertEqual(404, resp.status_int) def test_put_bad_state(self): action = copy.deepcopy(ACTION_EX) action['state'] = 'DELAYED' resp = self.app.put_json( '/v2/action_executions/123', action, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn('Expected one of', resp.json['faultstring']) def test_put_bad_result(self): resp = self.app.put_json( '/v2/action_executions/123', BROKEN_ACTION, expect_errors=True ) self.assertEqual(400, resp.status_int) @mock.patch.object(rpc_clients.EngineClient, 'on_action_complete') def test_put_without_result(self, f): action_ex = copy.deepcopy(UPDATED_ACTION) del action_ex['output'] f.return_value = UPDATED_ACTION_EX_DB resp = self.app.put_json('/v2/action_executions/123', action_ex) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, 'get_action_executions', MOCK_ACTIONS) def test_get_all(self): resp = self.app.get('/v2/action_executions') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['action_executions'])) self.assertDictEqual(ACTION_EX, resp.json['action_executions'][0]) @mock.patch.object(db_api, 'get_action_executions') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [ACTION_EX_DB] # Successful run ] resp = self.app.get('/v2/action_executions') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['action_executions'])) self.assertDictEqual(ACTION_EX, resp.json['action_executions'][0]) @mock.patch.object(rest_utils, 'get_all', return_value=resources.ActionExecutions()) def test_get_all_without_output(self, mock_get_all): resp = self.app.get('/v2/action_executions') args, kwargs = mock_get_all.call_args resource_function = kwargs['resource_function'] self.assertEqual(200, resp.status_int) self.assertEqual( action_execution._get_action_execution_resource_for_list, resource_function ) @mock.patch.object(rest_utils, 'get_all', return_value=resources.ActionExecutions()) def test_get_all_with_output(self, mock_get_all): resp = self.app.get('/v2/action_executions?include_output=true') args, kwargs = mock_get_all.call_args resource_function = kwargs['resource_function'] self.assertEqual(200, resp.status_int) self.assertEqual( action_execution._get_action_execution_resource, resource_function ) @mock.patch.object(db_api, 'get_action_executions', MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/action_executions') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['action_executions'])) @mock.patch.object(db_api, 'get_action_execution', MOCK_AD_HOC_ACTION) @mock.patch.object(db_api, 'delete_action_execution', MOCK_DELETE) def test_delete(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123') self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, 'get_action_execution', MOCK_NOT_FOUND) def test_delete_not_found(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) self.assertEqual(404, resp.status_int) def test_delete_not_allowed(self): resp = self.app.delete('/v2/action_executions/123', expect_errors=True) self.assertEqual(403, resp.status_int) self.assertIn( "Action execution deletion is not allowed", resp.body.decode() ) @mock.patch.object(db_api, 'get_action_execution', MOCK_ACTION) def test_delete_action_execution_with_task(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) self.assertEqual(403, resp.status_int) self.assertIn( "Only ad-hoc action execution can be deleted", resp.body.decode() ) @mock.patch.object( db_api, 'get_action_execution', MOCK_ACTION_NOT_COMPLETE ) def test_delete_action_execution_not_complete(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) self.assertEqual(403, resp.status_int) self.assertIn( "Only completed action execution can be deleted", resp.body.decode() ) @mock.patch.object( db_api, 'get_action_execution', MOCK_ACTION_COMPLETE_ERROR ) @mock.patch.object(db_api, 'delete_action_execution', MOCK_DELETE) def test_delete_action_execution_complete_error(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) self.assertEqual(204, resp.status_int) @mock.patch.object( db_api, 'get_action_execution', MOCK_ACTION_COMPLETE_CANCELLED ) @mock.patch.object(db_api, 'delete_action_execution', MOCK_DELETE) def test_delete_action_execution_complete_cancelled(self): cfg.CONF.set_default('allow_action_execution_deletion', True, 'api') resp = self.app.delete('/v2/action_executions/123', expect_errors=True) self.assertEqual(204, resp.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_actions.py0000644000175000017500000003506000000000000024064 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import mock import sqlalchemy as sa from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.tests.unit.api import base from mistral_lib import utils ACTION_DEFINITION = """ --- version: '2.0' my_action: description: My super cool action. tags: ['test', 'v2'] base: std.echo base-input: output: "{$.str1}{$.str2}" """ ACTION_DEFINITION_INVALID_NO_BASE = """ --- version: '2.0' my_action: description: My super cool action. tags: ['test', 'v2'] base-input: output: "{$.str1}{$.str2}" """ ACTION_DEFINITION_INVALID_YAQL = """ --- version: '2.0' my_action: description: My super cool action. tags: ['test', 'v2'] base: std.echo base-input: output: <% $. %> """ ACTION_DSL_PARSE_EXCEPTION = """ --- % """ SYSTEM_ACTION_DEFINITION = """ --- version: '2.0' std.echo: base: std.http base-input: url: "some.url" """ ACTION = { 'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'my_action', 'is_system': False, 'description': 'My super cool action.', 'tags': ['test', 'v2'], 'definition': ACTION_DEFINITION } SYSTEM_ACTION = { 'id': '1234', 'name': 'std.echo', 'is_system': True, 'definition': SYSTEM_ACTION_DEFINITION } ACTION_DB = models.ActionDefinition() ACTION_DB.update(ACTION) SYSTEM_ACTION_DB = models.ActionDefinition() SYSTEM_ACTION_DB.update(SYSTEM_ACTION) PROJECT_ID_ACTION_DB = ACTION_DB.get_clone() PROJECT_ID_ACTION_DB.project_id = '' UPDATED_ACTION_DEFINITION = """ --- version: '2.0' my_action: description: My super cool action. base: std.echo base-input: output: "{$.str1}{$.str2}{$.str3}" """ UPDATED_ACTION_DB = copy.copy(ACTION_DB) UPDATED_ACTION_DB['definition'] = UPDATED_ACTION_DEFINITION UPDATED_ACTION = copy.deepcopy(ACTION) UPDATED_ACTION['definition'] = UPDATED_ACTION_DEFINITION MOCK_ACTION = mock.MagicMock(return_value=ACTION_DB) MOCK_SYSTEM_ACTION = mock.MagicMock(return_value=SYSTEM_ACTION_DB) MOCK_ACTIONS = mock.MagicMock(return_value=[ACTION_DB]) MOCK_UPDATED_ACTION = mock.MagicMock(return_value=UPDATED_ACTION_DB) MOCK_DELETE = mock.MagicMock(return_value=None) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError()) class TestActionsController(base.APITest): @mock.patch.object( db_api, "get_action_definition", MOCK_ACTION) def test_get(self): resp = self.app.get('/v2/actions/my_action') self.assertEqual(200, resp.status_int) self.assertDictEqual(ACTION, resp.json) @mock.patch.object(db_api, 'get_action_definition') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), ACTION_DB # Successful run ] resp = self.app.get('/v2/actions/my_action') self.assertEqual(200, resp.status_int) self.assertDictEqual(ACTION, resp.json) @mock.patch.object( db_api, "get_action_definition", MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get('/v2/actions/my_action', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "update_action_definition", MOCK_UPDATED_ACTION) @mock.patch.object( db_api, "get_action_definition", MOCK_ACTION) def test_get_by_id(self): url = '/v2/actions/{0}'.format(ACTION['id']) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual(ACTION['id'], resp.json['id']) @mock.patch.object( db_api, "get_action_definition", MOCK_NOT_FOUND) def test_get_by_id_not_found(self): url = '/v2/actions/1234' resp = self.app.get(url, expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object( db_api, "get_action_definition", return_value=PROJECT_ID_ACTION_DB) def test_get_within_project_id(self, mock_get): url = '/v2/actions/1234' resp = self.app.get(url, expect_errors=True) self.assertEqual(200, resp.status_int) self.assertTrue('project_id' in resp.json) @mock.patch.object( db_api, "get_action_definition", MOCK_ACTION) @mock.patch.object( db_api, "update_action_definition", MOCK_UPDATED_ACTION ) def test_put(self): resp = self.app.put( '/v2/actions', UPDATED_ACTION_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertEqual({"actions": [UPDATED_ACTION]}, resp.json) @mock.patch.object(db_api, "load_action_definition", MOCK_ACTION) @mock.patch.object(db_api, "update_action_definition") def test_put_public(self, mock_mtd): mock_mtd.return_value = UPDATED_ACTION_DB resp = self.app.put( '/v2/actions?scope=public', UPDATED_ACTION_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertEqual({"actions": [UPDATED_ACTION]}, resp.json) self.assertEqual("public", mock_mtd.call_args[0][1]['scope']) @mock.patch.object(db_api, "update_action_definition", MOCK_NOT_FOUND) def test_put_not_found(self): resp = self.app.put( '/v2/actions', UPDATED_ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object( db_api, "get_action_definition", MOCK_SYSTEM_ACTION) def test_put_system(self): resp = self.app.put( '/v2/actions', SYSTEM_ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( 'Attempt to modify a system action: std.echo', resp.body.decode() ) @mock.patch.object(db_api, "create_action_definition") def test_post(self, mock_mtd): mock_mtd.return_value = ACTION_DB resp = self.app.post( '/v2/actions', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertEqual({"actions": [ACTION]}, resp.json) self.assertEqual(1, mock_mtd.call_count) values = mock_mtd.call_args[0][0] self.assertEqual('My super cool action.', values['description']) spec = values['spec'] self.assertIsNotNone(spec) self.assertEqual(ACTION_DB.name, spec['name']) @mock.patch.object(db_api, "create_action_definition") def test_post_public(self, mock_mtd): mock_mtd.return_value = ACTION_DB resp = self.app.post( '/v2/actions?scope=public', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertEqual({"actions": [ACTION]}, resp.json) self.assertEqual("public", mock_mtd.call_args[0][0]['scope']) @mock.patch.object(db_api, "create_action_definition") def test_post_wrong_scope(self, mock_mtd): mock_mtd.return_value = ACTION_DB resp = self.app.post( '/v2/actions?scope=unique', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Scope must be one of the following", resp.body.decode()) @mock.patch.object(db_api, "create_action_definition", MOCK_DUPLICATE) def test_post_dup(self): resp = self.app.post( '/v2/actions', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(409, resp.status_int) @mock.patch.object( db_api, "get_action_definition", MOCK_ACTION) @mock.patch.object(db_api, "delete_action_definition", MOCK_DELETE) def test_delete(self): resp = self.app.delete('/v2/actions/my_action') self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, "delete_action_definition", MOCK_NOT_FOUND) def test_delete_not_found(self): resp = self.app.delete('/v2/actions/my_action', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object( db_api, "get_action_definition", MOCK_SYSTEM_ACTION) def test_delete_system(self): resp = self.app.delete('/v2/actions/std.echo', expect_errors=True) self.assertEqual(400, resp.status_int) self.assertIn('Attempt to delete a system action: std.echo', resp.json['faultstring']) @mock.patch.object( db_api, "get_action_definitions", MOCK_ACTIONS) def test_get_all(self): resp = self.app.get('/v2/actions') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['actions'])) self.assertDictEqual(ACTION, resp.json['actions'][0]) @mock.patch.object(db_api, 'get_action_definitions') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [ACTION_DB] # Successful run ] resp = self.app.get('/v2/actions') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['actions'])) self.assertDictEqual(ACTION, resp.json['actions'][0]) @mock.patch.object( db_api, "get_action_definitions", MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/actions') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['actions'])) @mock.patch.object( db_api, "get_action_definitions", MOCK_ACTIONS) def test_get_all_pagination(self): resp = self.app.get( '/v2/actions?limit=1&sort_keys=id,name') self.assertEqual(200, resp.status_int) self.assertIn('next', resp.json) self.assertEqual(1, len(resp.json['actions'])) self.assertDictEqual(ACTION, resp.json['actions'][0]) param_dict = utils.get_dict_from_string( resp.json['next'].split('?')[1], delimiter='&' ) expected_dict = { 'marker': '123e4567-e89b-12d3-a456-426655440000', 'limit': 1, 'sort_keys': 'id,name', 'sort_dirs': 'asc,asc' } self.assertTrue( set(expected_dict.items()).issubset(set(param_dict.items())) ) def test_get_all_pagination_limit_negative(self): resp = self.app.get( '/v2/actions?limit=-1&sort_keys=id,name&sort_dirs=asc,asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Limit must be positive", resp.body.decode()) def test_get_all_pagination_limit_not_integer(self): resp = self.app.get( '/v2/actions?limit=1.1&sort_keys=id,name&sort_dirs=asc,asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("unable to convert to int", resp.body.decode()) def test_get_all_pagination_invalid_sort_dirs_length(self): resp = self.app.get( '/v2/actions?limit=1&sort_keys=id,name&sort_dirs=asc,asc,asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( "Length of sort_keys must be equal or greater than sort_dirs", resp.body.decode() ) def test_get_all_pagination_unknown_direction(self): resp = self.app.get( '/v2/actions?limit=1&sort_keys=id&sort_dirs=nonexist', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Unknown sort direction", resp.body.decode()) def test_validate(self): resp = self.app.post( '/v2/actions/validate', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertTrue(resp.json['valid']) def test_validate_invalid_model_exception(self): resp = self.app.post( '/v2/actions/validate', ACTION_DEFINITION_INVALID_NO_BASE, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Invalid DSL", resp.json['error']) def test_validate_dsl_parse_exception(self): resp = self.app.post( '/v2/actions/validate', ACTION_DSL_PARSE_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Definition could not be parsed", resp.json['error']) def test_validate_yaql_parse_exception(self): resp = self.app.post( '/v2/actions/validate', ACTION_DEFINITION_INVALID_YAQL, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("unexpected end of statement", resp.json['error']) def test_validate_empty(self): resp = self.app.post( '/v2/actions/validate', '', headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Invalid DSL", resp.json['error']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_cron_triggers.py0000644000175000017500000002116000000000000025267 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import mock import sqlalchemy as sa from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.services import security from mistral.tests.unit.api import base from mistral.tests.unit import base as unit_base WF = models.WorkflowDefinition( spec={ 'version': '2.0', 'name': 'my_wf', 'tasks': { 'task1': { 'action': 'std.noop' } } } ) WF.update({'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'my_wf'}) TRIGGER = { 'id': '02abb422-55ef-4bb2-8cb9-217a583a6a3f', 'name': 'my_cron_trigger', 'pattern': '* * * * *', 'workflow_name': WF.name, 'workflow_id': '123e4567-e89b-12d3-a456-426655440000', 'workflow_input': '{}', 'workflow_params': '{}', 'scope': 'private', 'remaining_executions': 42 } trigger_values = copy.deepcopy(TRIGGER) trigger_values['workflow_input'] = json.loads( trigger_values['workflow_input']) trigger_values['workflow_params'] = json.loads( trigger_values['workflow_params']) TRIGGER_DB = models.CronTrigger() TRIGGER_DB.update(trigger_values) TRIGGER_DB_WITH_PROJECT_ID = TRIGGER_DB.get_clone() TRIGGER_DB_WITH_PROJECT_ID.project_id = '' MOCK_WF = mock.MagicMock(return_value=WF) MOCK_TRIGGER = mock.MagicMock(return_value=TRIGGER_DB) MOCK_TRIGGERS = mock.MagicMock(return_value=[TRIGGER_DB]) MOCK_DELETE = mock.MagicMock(return_value=1) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError()) class TestCronTriggerController(base.APITest): @mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER) def test_get(self): resp = self.app.get('/v2/cron_triggers/my_cron_trigger') self.assertEqual(200, resp.status_int) self.assertDictEqual(TRIGGER, resp.json) @mock.patch.object(db_api, 'get_cron_trigger') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), TRIGGER_DB # Successful run ] resp = self.app.get('/v2/cron_triggers/my_cron_trigger') self.assertEqual(200, resp.status_int) self.assertDictEqual(TRIGGER, resp.json) @mock.patch.object(db_api, "get_cron_trigger", return_value=TRIGGER_DB_WITH_PROJECT_ID) def test_get_within_project_id(self, mock_get): resp = self.app.get('/v2/cron_triggers/my_cron_trigger') self.assertEqual(200, resp.status_int) self.assertTrue('project_id' in resp.json) @mock.patch.object(db_api, "get_cron_trigger", MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get( '/v2/cron_triggers/my_cron_trigger', expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER) def test_get_by_id(self): resp = self.app.get( "/v2/cron_triggers/02abb422-55ef-4bb2-8cb9-217a583a6a3f") self.assertEqual(200, resp.status_int) self.assertDictEqual(TRIGGER, resp.json) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @mock.patch.object(db_api, "create_cron_trigger") def test_post(self, mock_mtd): mock_mtd.return_value = TRIGGER_DB resp = self.app.post_json('/v2/cron_triggers', TRIGGER) self.assertEqual(201, resp.status_int) self.assertDictEqual(TRIGGER, resp.json) self.assertEqual(1, mock_mtd.call_count) values = mock_mtd.call_args[0][0] self.assertEqual('* * * * *', values['pattern']) self.assertEqual(42, values['remaining_executions']) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @mock.patch.object(db_api, "create_cron_trigger", MOCK_DUPLICATE) @mock.patch.object(security, "delete_trust") def test_post_dup(self, delete_trust): resp = self.app.post_json( '/v2/cron_triggers', TRIGGER, expect_errors=True ) self.assertEqual(1, delete_trust.call_count) self.assertEqual(409, resp.status_int) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @mock.patch.object(db_api, "create_cron_trigger", MOCK_DUPLICATE) def test_post_same_wf_and_input(self): trig = TRIGGER.copy() trig['name'] = 'some_trigger_name' resp = self.app.post_json( '/v2/cron_triggers', trig, expect_errors=True ) self.assertEqual(409, resp.status_int) @mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER) @mock.patch.object(db_api, "delete_cron_trigger", MOCK_DELETE) @mock.patch.object(security, "delete_trust") def test_delete(self, delete_trust): resp = self.app.delete('/v2/cron_triggers/my_cron_trigger') self.assertEqual(1, delete_trust.call_count) self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, "get_cron_trigger", MOCK_TRIGGER) @mock.patch.object(db_api, "delete_cron_trigger", MOCK_DELETE) @mock.patch.object(security, "delete_trust") def test_delete_by_id(self, delete_trust): resp = self.app.delete( '/v2/cron_triggers/02abb422-55ef-4bb2-8cb9-217a583a6a3f') self.assertEqual(1, delete_trust.call_count) self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, "delete_cron_trigger", MOCK_NOT_FOUND) def test_delete_not_found(self): resp = self.app.delete( '/v2/cron_triggers/my_cron_trigger', expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "get_cron_triggers", MOCK_TRIGGERS) def test_get_all(self): resp = self.app.get('/v2/cron_triggers') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['cron_triggers'])) self.assertDictEqual(TRIGGER, resp.json['cron_triggers'][0]) @mock.patch.object(db_api, 'get_cron_triggers') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [TRIGGER_DB] # Successful run ] resp = self.app.get('/v2/cron_triggers') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['cron_triggers'])) self.assertDictEqual(TRIGGER, resp.json['cron_triggers'][0]) @mock.patch.object(db_api, 'get_cron_triggers') @mock.patch('mistral.context.MistralContext.from_environ') def test_get_all_projects_admin(self, mock_context, mock_get_triggers): admin_ctx = unit_base.get_context(admin=True) mock_context.return_value = admin_ctx resp = self.app.get('/v2/cron_triggers?all_projects=true') self.assertEqual(200, resp.status_int) self.assertTrue(mock_get_triggers.call_args[1].get('insecure', False)) @mock.patch.object(db_api, 'get_cron_triggers') @mock.patch('mistral.context.MistralContext.from_environ') def test_get_all_filter_project(self, mock_context, mock_get_triggers): admin_ctx = unit_base.get_context(admin=True) mock_context.return_value = admin_ctx resp = self.app.get( '/v2/cron_triggers?all_projects=true&' 'project_id=192796e61c174f718d6147b129f3f2ff' ) self.assertEqual(200, resp.status_int) self.assertTrue(mock_get_triggers.call_args[1].get('insecure', False)) self.assertEqual( {'eq': '192796e61c174f718d6147b129f3f2ff'}, mock_get_triggers.call_args[1].get('project_id') ) @mock.patch.object(db_api, "get_cron_triggers", MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/cron_triggers') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['cron_triggers'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_environment.py0000644000175000017500000002357100000000000024774 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json import mock import six import sqlalchemy as sa from mistral.api.controllers.v2 import resources from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models as db from mistral import exceptions as exc from mistral.tests.unit.api import base from oslo_utils import uuidutils DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f' VARIABLES = { 'host': 'localhost', 'db': 'test', 'timeout': 600, 'verbose': True, '__actions': { 'std.sql': { 'conn': 'mysql://admin:secret@<% env().host %>/<% env().db %>' } } } ENVIRONMENT_FOR_CREATE = { 'name': 'test', 'description': 'my test settings', 'variables': VARIABLES, } ENVIRONMENT_FOR_UPDATE = { 'name': 'test', 'description': 'my test settings', 'variables': VARIABLES, 'scope': 'private' } ENVIRONMENT_FOR_UPDATE_NO_SCOPE = { 'name': 'test', 'description': 'my test settings', 'variables': VARIABLES } ENVIRONMENT = { 'id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'my test settings', 'variables': VARIABLES, 'scope': 'private', 'project_id': '', 'created_at': str(datetime.datetime.utcnow()), 'updated_at': str(datetime.datetime.utcnow()) } ENVIRONMENT_WITH_ILLEGAL_FIELD = { 'id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'my test settings', 'extra_field': 'I can add whatever I want here', 'variables': VARIABLES, 'scope': 'private', } ENVIRONMENT_DB = db.Environment( id=ENVIRONMENT['id'], name=ENVIRONMENT['name'], description=ENVIRONMENT['description'], variables=copy.deepcopy(VARIABLES), scope=ENVIRONMENT['scope'], project_id=ENVIRONMENT['project_id'], created_at=datetime.datetime.strptime(ENVIRONMENT['created_at'], DATETIME_FORMAT), updated_at=datetime.datetime.strptime(ENVIRONMENT['updated_at'], DATETIME_FORMAT) ) ENVIRONMENT_DB_WITH_PROJECT_ID = ENVIRONMENT_DB.get_clone() ENVIRONMENT_DB_WITH_PROJECT_ID.project_id = '' ENVIRONMENT_DB_DICT = {k: v for k, v in ENVIRONMENT_DB.items()} UPDATED_VARIABLES = copy.deepcopy(VARIABLES) UPDATED_VARIABLES['host'] = '127.0.0.1' FOR_UPDATED_ENVIRONMENT = copy.deepcopy(ENVIRONMENT_FOR_UPDATE) FOR_UPDATED_ENVIRONMENT['variables'] = json.dumps(UPDATED_VARIABLES) UPDATED_ENVIRONMENT = copy.deepcopy(ENVIRONMENT) UPDATED_ENVIRONMENT['variables'] = json.dumps(UPDATED_VARIABLES) UPDATED_ENVIRONMENT_DB = db.Environment(**ENVIRONMENT_DB_DICT) UPDATED_ENVIRONMENT_DB.variables = copy.deepcopy(UPDATED_VARIABLES) MOCK_ENVIRONMENT = mock.MagicMock(return_value=ENVIRONMENT_DB) MOCK_ENVIRONMENTS = mock.MagicMock(return_value=[ENVIRONMENT_DB]) MOCK_UPDATED_ENVIRONMENT = mock.MagicMock(return_value=UPDATED_ENVIRONMENT_DB) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError()) MOCK_DELETE = mock.MagicMock(return_value=None) def _convert_vars_to_dict(env_dict): """Converts 'variables' in the given environment dict into dictionary.""" if ('variables' in env_dict and isinstance(env_dict.get('variables'), six.string_types)): env_dict['variables'] = json.loads(env_dict['variables']) return env_dict def _convert_vars_to_json(env_dict): """Converts 'variables' in the given environment dict into string.""" if ('variables' in env_dict and isinstance(env_dict.get('variables'), dict)): env_dict['variables'] = json.dumps(env_dict['variables']) return env_dict class TestEnvironmentController(base.APITest): def _assert_dict_equal(self, expected, actual): self.assertIsInstance(expected, dict) self.assertIsInstance(actual, dict) _convert_vars_to_dict(expected) _convert_vars_to_dict(actual) self.assertDictEqual(expected, actual) def test_resource(self): resource = resources.Environment(**copy.deepcopy(ENVIRONMENT)) self._assert_dict_equal( copy.deepcopy(ENVIRONMENT), resource.to_dict() ) @mock.patch.object(db_api, 'get_environments', MOCK_ENVIRONMENTS) def test_get_all(self): resp = self.app.get('/v2/environments') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['environments'])) @mock.patch.object(db_api, 'get_environments') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [ENVIRONMENT_DB] # Successful run ] resp = self.app.get('/v2/environments') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['environments'])) self._assert_dict_equal(ENVIRONMENT, resp.json['environments'][0]) def test_get_all_empty(self): resp = self.app.get('/v2/environments') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['environments'])) @mock.patch.object(db_api, 'get_environment', MOCK_ENVIRONMENT) def test_get(self): resp = self.app.get('/v2/environments/123') self.assertEqual(200, resp.status_int) self._assert_dict_equal(ENVIRONMENT, resp.json) @mock.patch.object(db_api, 'get_environment') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), ENVIRONMENT_DB # Successful run ] resp = self.app.get('/v2/environments/123') self.assertEqual(200, resp.status_int) self._assert_dict_equal(ENVIRONMENT, resp.json) @mock.patch.object(db_api, 'get_environment', return_value=ENVIRONMENT_DB_WITH_PROJECT_ID) def test_get_within_project_id(self, mock_get): resp = self.app.get('/v2/environments/123') self.assertEqual(200, resp.status_int) self.assertEqual('', resp.json['project_id']) @mock.patch.object(db_api, "get_environment", MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get('/v2/environments/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'create_environment', MOCK_ENVIRONMENT) def test_post(self): resp = self.app.post_json( '/v2/environments', _convert_vars_to_json(copy.deepcopy(ENVIRONMENT_FOR_CREATE)) ) self.assertEqual(201, resp.status_int) self._assert_dict_equal(copy.deepcopy(ENVIRONMENT), resp.json) @mock.patch.object(db_api, 'create_environment', MOCK_ENVIRONMENT) def test_post_with_illegal_field(self): resp = self.app.post_json( '/v2/environments', _convert_vars_to_json( copy.deepcopy(ENVIRONMENT_WITH_ILLEGAL_FIELD)), expect_errors=True ) self.assertEqual(400, resp.status_int) @mock.patch.object(db_api, 'create_environment', MOCK_DUPLICATE) def test_post_dup(self): resp = self.app.post_json( '/v2/environments', _convert_vars_to_json(copy.deepcopy(ENVIRONMENT_FOR_CREATE)), expect_errors=True ) self.assertEqual(409, resp.status_int) @mock.patch.object(db_api, 'create_environment', MOCK_ENVIRONMENT) def test_post_default_scope(self): env = _convert_vars_to_json(copy.deepcopy(ENVIRONMENT_FOR_CREATE)) resp = self.app.post_json('/v2/environments', env) self.assertEqual(201, resp.status_int) self._assert_dict_equal(copy.deepcopy(ENVIRONMENT), resp.json) @mock.patch.object(db_api, 'update_environment', MOCK_UPDATED_ENVIRONMENT) def test_put(self): resp = self.app.put_json( '/v2/environments', copy.deepcopy(FOR_UPDATED_ENVIRONMENT) ) self.assertEqual(200, resp.status_int) self._assert_dict_equal(UPDATED_ENVIRONMENT, resp.json) @mock.patch.object(db_api, 'update_environment', MOCK_UPDATED_ENVIRONMENT) def test_put_default_scope(self): env = copy.deepcopy(ENVIRONMENT_FOR_UPDATE_NO_SCOPE) env['variables'] = json.dumps(env) resp = self.app.put_json('/v2/environments', env) self.assertEqual(200, resp.status_int) self._assert_dict_equal(copy.deepcopy(UPDATED_ENVIRONMENT), resp.json) @mock.patch.object(db_api, 'update_environment', MOCK_NOT_FOUND) def test_put_not_found(self): env = copy.deepcopy(FOR_UPDATED_ENVIRONMENT) resp = self.app.put_json( '/v2/environments', env, expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'delete_environment', MOCK_DELETE) def test_delete(self): resp = self.app.delete('/v2/environments/123') self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, 'delete_environment', MOCK_NOT_FOUND) def test_delete_not_found(self): resp = self.app.delete('/v2/environments/123', expect_errors=True) self.assertEqual(404, resp.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_event_trigger.py0000644000175000017500000002322000000000000025263 0ustar00coreycorey00000000000000# Copyright 2016 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import mock import sqlalchemy as sa from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.services import security from mistral.services import triggers from mistral.tests.unit.api import base from mistral.tests.unit import base as unit_base WF = models.WorkflowDefinition( spec={ 'version': '2.0', 'name': 'my_wf', 'tasks': { 'task1': { 'action': 'std.noop' } } } ) WF.update({'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'my_wf'}) TRIGGER = { 'id': '09cc56a9-d15e-4494-a6e2-c4ec8bdaacae', 'name': 'my_event_trigger', 'workflow_id': '123e4567-e89b-12d3-a456-426655440000', 'workflow_input': '{}', 'workflow_params': '{}', 'scope': 'private', 'exchange': 'openstack', 'topic': 'notification', 'event': 'compute.instance.create.start' } trigger_values = copy.deepcopy(TRIGGER) trigger_values['workflow_input'] = json.loads( trigger_values['workflow_input']) trigger_values['workflow_params'] = json.loads( trigger_values['workflow_params']) TRIGGER_DB = models.EventTrigger() TRIGGER_DB.update(trigger_values) MOCK_WF = mock.MagicMock(return_value=WF) MOCK_TRIGGER = mock.MagicMock(return_value=TRIGGER_DB) MOCK_TRIGGERS = mock.MagicMock(return_value=[TRIGGER_DB]) MOCK_NONE = mock.MagicMock(return_value=None) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) class TestEventTriggerController(base.APITest): @mock.patch.object(db_api, "get_event_trigger", MOCK_TRIGGER) def test_get(self): resp = self.app.get( '/v2/event_triggers/09cc56a9-d15e-4494-a6e2-c4ec8bdaacae' ) self.assertEqual(200, resp.status_int) self.assertDictEqual(TRIGGER, resp.json) @mock.patch.object(db_api, 'get_event_trigger') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), TRIGGER_DB # Successful run ] resp = self.app.get( '/v2/event_triggers/09cc56a9-d15e-4494-a6e2-c4ec8bdaacae' ) self.assertEqual(200, resp.status_int) self.assertDictEqual(TRIGGER, resp.json) @mock.patch.object(db_api, "get_event_trigger", MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get( '/v2/event_triggers/09cc56a9-d15e-4494-a6e2-c4ec8bdaacae', expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "get_workflow_definition_by_id", MOCK_WF) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @mock.patch.object(db_api, "create_event_trigger", MOCK_TRIGGER) @mock.patch.object(db_api, "get_event_triggers", MOCK_TRIGGERS) @mock.patch('mistral.rpc.clients.get_event_engine_client') def test_post(self, mock_rpc_client): client = mock.Mock() mock_rpc_client.return_value = client CREATE_TRIGGER = copy.deepcopy(TRIGGER) CREATE_TRIGGER.pop('id') resp = self.app.post_json('/v2/event_triggers', CREATE_TRIGGER) self.assertEqual(201, resp.status_int) self.assertEqual(1, client.create_event_trigger.call_count) trigger_db = TRIGGER_DB.to_dict() trigger_db['workflow_namespace'] = None self.assertDictEqual( trigger_db, client.create_event_trigger.call_args[0][0] ) self.assertListEqual( ['compute.instance.create.start'], client.create_event_trigger.call_args[0][1] ) @mock.patch.object(db_api, "get_workflow_definition_by_id", MOCK_WF) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) @mock.patch.object(triggers, "create_event_trigger") def test_post_public(self, create_trigger): self.ctx = unit_base.get_context(default=False, admin=True) self.mock_ctx.return_value = self.ctx trigger = copy.deepcopy(TRIGGER) trigger['scope'] = 'public' trigger.pop('id') resp = self.app.post_json('/v2/event_triggers', trigger) self.assertEqual(201, resp.status_int) self.assertTrue(create_trigger.called) self.assertEqual('public', create_trigger.call_args[1]["scope"]) def test_post_no_workflow_id(self): CREATE_TRIGGER = copy.deepcopy(TRIGGER) CREATE_TRIGGER.pop('id') CREATE_TRIGGER.pop('workflow_id') resp = self.app.post_json( '/v2/event_triggers', CREATE_TRIGGER, expect_errors=True ) self.assertEqual(400, resp.status_int) @mock.patch.object(db_api, "get_workflow_definition_by_id", MOCK_NOT_FOUND) def test_post_workflow_not_found(self): CREATE_TRIGGER = copy.deepcopy(TRIGGER) CREATE_TRIGGER.pop('id') resp = self.app.post_json( '/v2/event_triggers', CREATE_TRIGGER, expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'get_event_trigger', MOCK_NONE) @mock.patch('mistral.rpc.clients.get_event_engine_client') @mock.patch('mistral.db.v2.api.update_event_trigger') def test_put(self, mock_update, mock_rpc_client): client = mock.Mock() mock_rpc_client.return_value = client UPDATED_TRIGGER = models.EventTrigger() UPDATED_TRIGGER.update(trigger_values) UPDATED_TRIGGER.update({'name': 'new_name'}) mock_update.return_value = UPDATED_TRIGGER resp = self.app.put_json( '/v2/event_triggers/09cc56a9-d15e-4494-a6e2-c4ec8bdaacae', {'name': 'new_name'} ) self.assertEqual(200, resp.status_int) self.assertEqual(1, client.update_event_trigger.call_count) self.assertDictEqual( UPDATED_TRIGGER.to_dict(), client.update_event_trigger.call_args[0][0] ) def test_put_field_not_allowed(self): resp = self.app.put_json( '/v2/event_triggers/09cc56a9-d15e-4494-a6e2-c4ec8bdaacae', {'exchange': 'new_exchange'}, expect_errors=True ) self.assertEqual(400, resp.status_int) @mock.patch('mistral.rpc.clients.get_event_engine_client') @mock.patch('mistral.db.v2.api.get_event_trigger') @mock.patch.object(db_api, "get_event_triggers", mock.MagicMock(return_value=[])) @mock.patch.object(db_api, "delete_event_trigger", MOCK_NONE) @mock.patch.object(security, "delete_trust", MOCK_NONE) def test_delete(self, mock_delete, mock_rpc_client): client = mock.Mock() mock_rpc_client.return_value = client DELETE_TRIGGER = models.EventTrigger() DELETE_TRIGGER.update(trigger_values) DELETE_TRIGGER.update( {'trust_id': 'c30e50e8-ee7d-4f8a-9515-f0530d9dc54b'} ) mock_delete.return_value = DELETE_TRIGGER resp = self.app.delete( '/v2/event_triggers/09cc56a9-d15e-4494-a6e2-c4ec8bdaacae' ) self.assertEqual(204, resp.status_int) self.assertEqual(1, client.delete_event_trigger.call_count) self.assertDictEqual( DELETE_TRIGGER.to_dict(), client.delete_event_trigger.call_args[0][0] ) self.assertListEqual( [], client.delete_event_trigger.call_args[0][1] ) @mock.patch.object(db_api, "get_event_trigger", MOCK_NOT_FOUND) def test_delete_not_found(self): resp = self.app.delete( '/v2/event_triggers/09cc56a9-d15e-4494-a6e2-c4ec8bdaacae', expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "get_event_triggers", MOCK_TRIGGERS) def test_get_all(self): resp = self.app.get('/v2/event_triggers') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['event_triggers'])) self.assertDictEqual(TRIGGER, resp.json['event_triggers'][0]) @mock.patch.object(db_api, 'get_event_triggers') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [TRIGGER_DB] # Successful run ] resp = self.app.get('/v2/event_triggers') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['event_triggers'])) self.assertDictEqual(TRIGGER, resp.json['event_triggers'][0]) @mock.patch('mistral.db.v2.api.get_event_triggers') @mock.patch('mistral.context.MistralContext.from_environ') def test_get_all_projects_admin(self, mock_context, mock_get_wf_defs): admin_ctx = unit_base.get_context(admin=True) mock_context.return_value = admin_ctx resp = self.app.get('/v2/event_triggers?all_projects=true') self.assertEqual(200, resp.status_int) self.assertTrue(mock_get_wf_defs.call_args[1].get('insecure', False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_execution_report.py0000644000175000017500000003207500000000000026025 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.api import base from mistral.tests.unit.engine import base as engine_base from mistral.workflow import states class TestExecutionReportController(base.APITest, engine_base.EngineTestCase): def test_simple_sequence_wf(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) resp = self.app.get('/v2/executions/%s/report' % wf_ex.id) self.assertEqual(200, resp.status_int) # Now let's verify the response structure self.assertIn('root_workflow_execution', resp.json) root_wf_ex = resp.json['root_workflow_execution'] self.assertIsInstance(root_wf_ex, dict) self.assertEqual(wf_ex.id, root_wf_ex['id']) self.assertEqual(wf_ex.name, root_wf_ex['name']) self.assertEqual(states.ERROR, root_wf_ex['state']) self.assertGreater(len(root_wf_ex['state_info']), 0) tasks = root_wf_ex['task_executions'] self.assertIsInstance(tasks, list) self.assertEqual(2, len(tasks)) # Verify task1 info. task1 = self._assert_single_item( tasks, name='task1', state=states.SUCCESS ) self.assertEqual(0, len(task1['workflow_executions'])) self.assertEqual(1, len(task1['action_executions'])) task1_action = task1['action_executions'][0] self.assertEqual(states.SUCCESS, task1_action['state']) self.assertEqual('std.noop', task1_action['name']) # Verify task2 info. task2 = self._assert_single_item( tasks, name='task2', state=states.ERROR ) self.assertEqual(1, len(task2['action_executions'])) task2_action = task2['action_executions'][0] self.assertEqual(0, len(task2['workflow_executions'])) self.assertEqual(states.ERROR, task2_action['state']) # Verify statistics. stat = resp.json['statistics'] self.assertEqual(1, stat['error_tasks_count']) self.assertEqual(0, stat['idle_tasks_count']) self.assertEqual(0, stat['paused_tasks_count']) self.assertEqual(0, stat['running_tasks_count']) self.assertEqual(1, stat['success_tasks_count']) self.assertEqual(2, stat['total_tasks_count']) def test_nested_wf(self): wb_text = """--- version: '2.0' name: wb workflows: parent_wf: tasks: task1: action: std.noop on-success: task2 task2: workflow: sub_wf on-success: task3 task3: action: std.fail sub_wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail """ wb_service.create_workbook_v2(wb_text) wf_ex = self.engine.start_workflow('wb.parent_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get('/v2/executions/%s/report' % wf_ex.id) self.assertEqual(200, resp.status_int) # Now let's verify the response structure self.assertIn('root_workflow_execution', resp.json) root_wf_ex = resp.json['root_workflow_execution'] self.assertIsInstance(root_wf_ex, dict) self.assertEqual('wb.parent_wf', root_wf_ex['name']) self.assertEqual(states.ERROR, root_wf_ex['state']) self.assertGreater(len(root_wf_ex['state_info']), 0) tasks = root_wf_ex['task_executions'] self.assertIsInstance(tasks, list) self.assertEqual(2, len(tasks)) # Verify task1 info. task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.SUCCESS, task1['state']) self.assertEqual(0, len(task1['workflow_executions'])) self.assertEqual(1, len(task1['action_executions'])) task1_action = task1['action_executions'][0] self.assertEqual(states.SUCCESS, task1_action['state']) self.assertEqual('std.noop', task1_action['name']) # Verify task2 info. task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(states.ERROR, task2['state']) self.assertEqual(0, len(task2['action_executions'])) self.assertEqual(1, len(task2['workflow_executions'])) sub_wf_entry = task2['workflow_executions'][0] self.assertEqual(states.ERROR, sub_wf_entry['state']) sub_wf_tasks = sub_wf_entry['task_executions'] self.assertEqual(2, len(sub_wf_tasks)) sub_wf_task1 = self._assert_single_item( sub_wf_tasks, name='task1', state=states.SUCCESS ) sub_wf_task2 = self._assert_single_item( sub_wf_tasks, name='task2', state=states.ERROR ) self.assertEqual(1, len(sub_wf_task1['action_executions'])) self.assertEqual( states.SUCCESS, sub_wf_task1['action_executions'][0]['state'] ) self.assertEqual(1, len(sub_wf_task2['action_executions'])) self.assertEqual( states.ERROR, sub_wf_task2['action_executions'][0]['state'] ) # Verify statistics. stat = resp.json['statistics'] self.assertEqual(2, stat['error_tasks_count']) self.assertEqual(0, stat['idle_tasks_count']) self.assertEqual(0, stat['paused_tasks_count']) self.assertEqual(0, stat['running_tasks_count']) self.assertEqual(2, stat['success_tasks_count']) self.assertEqual(4, stat['total_tasks_count']) def test_nested_wf_errors_only(self): wb_text = """--- version: '2.0' name: wb workflows: parent_wf: tasks: task1: action: std.noop on-success: task2 task2: workflow: sub_wf on-success: task3 task3: action: std.fail sub_wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail """ wb_service.create_workbook_v2(wb_text) wf_ex = self.engine.start_workflow('wb.parent_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get( '/v2/executions/%s/report?errors_only=true' % wf_ex.id ) self.assertEqual(200, resp.status_int) # Now let's verify the response structure self.assertIn('root_workflow_execution', resp.json) root_wf_ex = resp.json['root_workflow_execution'] self.assertIsInstance(root_wf_ex, dict) self.assertEqual('wb.parent_wf', root_wf_ex['name']) self.assertEqual(states.ERROR, root_wf_ex['state']) self.assertGreater(len(root_wf_ex['state_info']), 0) tasks = root_wf_ex['task_executions'] self.assertIsInstance(tasks, list) self.assertEqual(1, len(tasks)) # There must be only task2 in the response. # Verify task2 info. task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(states.ERROR, task2['state']) self.assertEqual(0, len(task2['action_executions'])) self.assertEqual(1, len(task2['workflow_executions'])) sub_wf_entry = task2['workflow_executions'][0] self.assertEqual(states.ERROR, sub_wf_entry['state']) sub_wf_tasks = sub_wf_entry['task_executions'] self.assertEqual(1, len(sub_wf_tasks)) sub_wf_task2 = self._assert_single_item( sub_wf_tasks, name='task2', state=states.ERROR ) self.assertEqual(1, len(sub_wf_task2['action_executions'])) self.assertEqual( states.ERROR, sub_wf_task2['action_executions'][0]['state'] ) # Verify statistics. stat = resp.json['statistics'] self.assertEqual(2, stat['error_tasks_count']) self.assertEqual(0, stat['idle_tasks_count']) self.assertEqual(0, stat['paused_tasks_count']) self.assertEqual(0, stat['running_tasks_count']) self.assertEqual(0, stat['success_tasks_count']) self.assertEqual(2, stat['total_tasks_count']) def test_nested_wf_max_depth(self): wb_text = """--- version: '2.0' name: wb workflows: parent_wf: tasks: task1: action: std.noop on-success: task2 task2: workflow: sub_wf on-success: task3 task3: action: std.fail sub_wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail """ wb_service.create_workbook_v2(wb_text) wf_ex = self.engine.start_workflow('wb.parent_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get('/v2/executions/%s/report?max_depth=0' % wf_ex.id) self.assertEqual(200, resp.status_int) # Now let's verify the response structure self.assertIn('root_workflow_execution', resp.json) root_wf_ex = resp.json['root_workflow_execution'] self.assertIsInstance(root_wf_ex, dict) self.assertEqual('wb.parent_wf', root_wf_ex['name']) self.assertEqual(states.ERROR, root_wf_ex['state']) self.assertGreater(len(root_wf_ex['state_info']), 0) tasks = root_wf_ex['task_executions'] self.assertIsInstance(tasks, list) self.assertEqual(2, len(tasks)) # Verify task1 info. task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.SUCCESS, task1['state']) self.assertEqual(0, len(task1['workflow_executions'])) self.assertEqual(1, len(task1['action_executions'])) task1_action = task1['action_executions'][0] self.assertEqual(states.SUCCESS, task1_action['state']) self.assertEqual('std.noop', task1_action['name']) # Verify task2 info. task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(states.ERROR, task2['state']) self.assertEqual(0, len(task2['action_executions'])) self.assertEqual(1, len(task2['workflow_executions'])) sub_wf_entry = task2['workflow_executions'][0] self.assertEqual(states.ERROR, sub_wf_entry['state']) # We still must have an entry for the subworkflow itself # but it must not have info about task executions because # we've now limited max depth. self.assertNotIn('task_executions', sub_wf_entry) # Verify statistics. stat = resp.json['statistics'] self.assertEqual(1, stat['error_tasks_count']) self.assertEqual(0, stat['idle_tasks_count']) self.assertEqual(0, stat['paused_tasks_count']) self.assertEqual(0, stat['running_tasks_count']) self.assertEqual(1, stat['success_tasks_count']) self.assertEqual(2, stat['total_tasks_count']) def test_retry_count(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail retry: delay=0.1 count=3 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) resp = self.app.get('/v2/executions/%s/report' % wf_ex.id) self.assertEqual(200, resp.status_int) # Now let's verify the response structure root_wf_ex = resp.json['root_workflow_execution'] tasks = root_wf_ex['task_executions'] self.assertEqual(2, len(tasks)) # Verify task1 presence. self._assert_single_item(tasks, name='task1', state=states.SUCCESS) # Verify task2 info. task2 = self._assert_single_item( tasks, name='task2', state=states.ERROR ) self.assertEqual(3, task2['retry_count']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_executions.py0000644000175000017500000010151000000000000024604 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2015 Huawei Technologies Co., Ltd. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json import mock from oslo_config import cfg import oslo_messaging from oslo_utils import uuidutils import sqlalchemy as sa from webtest import app as webtest_app from mistral.api.controllers.v2 import execution from mistral.api.controllers.v2 import resources from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import api as sql_db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.rpc import base as rpc_base from mistral.rpc import clients as rpc_clients from mistral.tests.unit.api import base from mistral.tests.unit import base as unit_base from mistral.utils import rest_utils from mistral.workflow import states from mistral_lib import utils # This line is needed for correct initialization of messaging config. oslo_messaging.get_rpc_transport(cfg.CONF) WF_EX = models.WorkflowExecution( id='123e4567-e89b-12d3-a456-426655440000', workflow_name='some', workflow_id='123e4567-e89b-12d3-a456-426655441111', description='execution description.', spec={'name': 'some'}, state=states.RUNNING, state_info=None, context={}, input={'foo': 'bar'}, output={}, params={'env': {'k1': 'abc'}}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1) ) WF_EX_JSON = { 'id': '123e4567-e89b-12d3-a456-426655440000', 'input': '{"foo": "bar"}', 'output': '{}', 'params': '{"env": {"k1": "abc"}}', 'state': 'RUNNING', 'state_info': None, 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', 'workflow_name': 'some', 'workflow_id': '123e4567-e89b-12d3-a456-426655441111' } SUB_WF_EX = models.WorkflowExecution( id=uuidutils.generate_uuid(), workflow_name='some', workflow_id='123e4567-e89b-12d3-a456-426655441111', description='foobar', spec={'name': 'some'}, state=states.RUNNING, state_info=None, context={}, input={'foo': 'bar'}, output={}, params={'env': {'k1': 'abc'}}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), task_execution_id=uuidutils.generate_uuid() ) SUB_WF_EX_JSON = { 'id': SUB_WF_EX.id, 'workflow_name': 'some', 'workflow_id': '123e4567-e89b-12d3-a456-426655441111', 'input': '{"foo": "bar"}', 'output': '{}', 'params': '{"env": {"k1": "abc"}}', 'state': 'RUNNING', 'state_info': None, 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', 'task_execution_id': SUB_WF_EX.task_execution_id } MOCK_SUB_WF_EXECUTIONS = mock.MagicMock(return_value=[SUB_WF_EX]) SUB_WF_EX_JSON_WITH_DESC = copy.deepcopy(SUB_WF_EX_JSON) SUB_WF_EX_JSON_WITH_DESC['description'] = SUB_WF_EX.description UPDATED_WF_EX = copy.deepcopy(WF_EX) UPDATED_WF_EX['state'] = states.PAUSED UPDATED_WF_EX_JSON = copy.deepcopy(WF_EX_JSON) UPDATED_WF_EX_JSON['state'] = states.PAUSED UPDATED_WF_EX_ENV = copy.deepcopy(UPDATED_WF_EX) UPDATED_WF_EX_ENV['params'] = {'env': {'k1': 'def'}} UPDATED_WF_EX_ENV_DESC = copy.deepcopy(UPDATED_WF_EX) UPDATED_WF_EX_ENV_DESC['description'] = 'foobar' UPDATED_WF_EX_ENV_DESC['params'] = {'env': {'k1': 'def'}} WF_EX_JSON_WITH_DESC = copy.deepcopy(WF_EX_JSON) WF_EX_JSON_WITH_DESC['description'] = WF_EX.description WF_EX_WITH_PROJECT_ID = WF_EX.get_clone() WF_EX_WITH_PROJECT_ID.project_id = '' SOURCE_WF_EX = copy.deepcopy(WF_EX) SOURCE_WF_EX['source_execution_id'] = WF_EX.id SOURCE_WF_EX['id'] = uuidutils.generate_uuid() SOURCE_WF_EX_JSON_WITH_DESC = copy.deepcopy(WF_EX_JSON_WITH_DESC) SOURCE_WF_EX_JSON_WITH_DESC['id'] = SOURCE_WF_EX.id SOURCE_WF_EX_JSON_WITH_DESC['source_execution_id'] = \ SOURCE_WF_EX.source_execution_id MOCK_WF_EX = mock.MagicMock(return_value=WF_EX) MOCK_SUB_WF_EX = mock.MagicMock(return_value=SUB_WF_EX) MOCK_SOURCE_WF_EX = mock.MagicMock(return_value=SOURCE_WF_EX) MOCK_WF_EXECUTIONS = mock.MagicMock(return_value=[WF_EX]) MOCK_UPDATED_WF_EX = mock.MagicMock(return_value=UPDATED_WF_EX) MOCK_DELETE = mock.MagicMock(return_value=None) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_ACTION_EXC = mock.MagicMock(side_effect=exc.ActionException()) ERROR_WF_EX = copy.deepcopy(WF_EX) ERROR_WF_EX['state'] = states.ERROR MOCK_ERROR_WF_EX = mock.MagicMock(return_value=ERROR_WF_EX) SUCCESS_WF_EX = copy.deepcopy(WF_EX) SUCCESS_WF_EX['state'] = states.SUCCESS MOCK_SUCCESS_WF_EX = mock.MagicMock(return_value=SUCCESS_WF_EX) @mock.patch.object(rpc_base, '_IMPL_CLIENT', mock.Mock()) class TestExecutionsController(base.APITest): @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) def test_get(self): resp = self.app.get('/v2/executions/123') self.assertEqual(200, resp.status_int) expected = WF_EX_JSON_WITH_DESC.copy() expected['published_global'] = '{}' self.assertDictEqual(expected, resp.json) @mock.patch.object(db_api, 'get_workflow_execution') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), WF_EX # Successful run ] resp = self.app.get('/v2/executions/123') self.assertEqual(200, resp.status_int) expected = WF_EX_JSON_WITH_DESC.copy() expected['published_global'] = '{}' self.assertDictEqual(expected, resp.json) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_SUB_WF_EX) def test_get_sub_wf_ex(self): resp = self.app.get('/v2/executions/123') self.assertEqual(200, resp.status_int) expected = SUB_WF_EX_JSON_WITH_DESC.copy() expected['published_global'] = '{}' self.assertDictEqual(expected, resp.json) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get('/v2/executions/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'get_workflow_execution', return_value=WF_EX_WITH_PROJECT_ID) def test_get_within_project_id(self, mock_get): resp = self.app.get('/v2/executions/123', expect_errors=True) self.assertEqual(200, resp.status_int) self.assertTrue('project_id' in resp.json) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) @mock.patch.object( rpc_clients.EngineClient, 'pause_workflow', MOCK_UPDATED_WF_EX ) def test_put_state_paused(self): update_exec = { 'id': WF_EX['id'], 'state': states.PAUSED } resp = self.app.put_json('/v2/executions/123', update_exec) expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC) expected_exec['state'] = states.PAUSED self.assertEqual(200, resp.status_int) self.assertDictEqual(expected_exec, resp.json) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) @mock.patch.object(rpc_clients.EngineClient, 'stop_workflow') def test_put_state_error(self, mock_stop_wf): update_exec = { 'id': WF_EX['id'], 'state': states.ERROR, 'state_info': 'Force' } wf_ex = copy.deepcopy(WF_EX) wf_ex['state'] = states.ERROR wf_ex['state_info'] = 'Force' mock_stop_wf.return_value = wf_ex resp = self.app.put_json('/v2/executions/123', update_exec) expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC) expected_exec['state'] = states.ERROR expected_exec['state_info'] = 'Force' self.assertEqual(200, resp.status_int) self.assertDictEqual(expected_exec, resp.json) mock_stop_wf.assert_called_once_with('123', 'ERROR', 'Force') @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) @mock.patch.object(rpc_clients.EngineClient, 'stop_workflow') def test_put_state_cancelled(self, mock_stop_wf): update_exec = { 'id': WF_EX['id'], 'state': states.CANCELLED, 'state_info': 'Cancelled by user.' } wf_ex = copy.deepcopy(WF_EX) wf_ex['state'] = states.CANCELLED wf_ex['state_info'] = 'Cancelled by user.' mock_stop_wf.return_value = wf_ex resp = self.app.put_json('/v2/executions/123', update_exec) expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC) expected_exec['state'] = states.CANCELLED expected_exec['state_info'] = 'Cancelled by user.' self.assertEqual(200, resp.status_int) self.assertDictEqual(expected_exec, resp.json) mock_stop_wf.assert_called_once_with( '123', 'CANCELLED', 'Cancelled by user.' ) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) @mock.patch.object(rpc_clients.EngineClient, 'resume_workflow') def test_put_state_resume(self, mock_resume_wf): update_exec = { 'id': WF_EX['id'], 'state': states.RUNNING } wf_ex = copy.deepcopy(WF_EX) wf_ex['state'] = states.RUNNING wf_ex['state_info'] = None mock_resume_wf.return_value = wf_ex resp = self.app.put_json('/v2/executions/123', update_exec) expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC) expected_exec['state'] = states.RUNNING expected_exec['state_info'] = None self.assertEqual(200, resp.status_int) self.assertDictEqual(expected_exec, resp.json) mock_resume_wf.assert_called_once_with('123', env=None) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) def test_put_invalid_state(self): invalid_states = [states.IDLE, states.WAITING, states.RUNNING_DELAYED] for state in invalid_states: update_exec = { 'id': WF_EX['id'], 'state': state } resp = self.app.put_json( '/v2/executions/123', update_exec, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( 'Cannot change state to %s.' % state, resp.json['faultstring'] ) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) @mock.patch.object(rpc_clients.EngineClient, 'stop_workflow') def test_put_state_info_unset(self, mock_stop_wf): update_exec = { 'id': WF_EX['id'], 'state': states.ERROR, } wf_ex = copy.deepcopy(WF_EX) wf_ex['state'] = states.ERROR del wf_ex.state_info mock_stop_wf.return_value = wf_ex resp = self.app.put_json('/v2/executions/123', update_exec) expected_exec = copy.deepcopy(WF_EX_JSON_WITH_DESC) expected_exec['state'] = states.ERROR expected_exec['state_info'] = None self.assertEqual(200, resp.status_int) self.assertDictEqual(expected_exec, resp.json) mock_stop_wf.assert_called_once_with('123', 'ERROR', None) @mock.patch('mistral.db.v2.api.get_workflow_execution') @mock.patch( 'mistral.db.v2.api.update_workflow_execution', return_value=WF_EX ) def test_put_description(self, mock_update, mock_ensure): update_params = {'description': 'execution description.'} resp = self.app.put_json('/v2/executions/123', update_params) self.assertEqual(200, resp.status_int) mock_ensure.assert_called_once_with( '123', fields=(models.WorkflowExecution.id,) ) mock_update.assert_called_once_with('123', update_params) @mock.patch.object( sql_db_api, 'get_workflow_execution', mock.MagicMock(return_value=copy.deepcopy(UPDATED_WF_EX)) ) @mock.patch( 'mistral.services.workflows.update_workflow_execution_env', return_value=copy.deepcopy(UPDATED_WF_EX_ENV) ) def test_put_env(self, mock_update_env): update_exec = {'params': '{"env": {"k1": "def"}}'} resp = self.app.put_json('/v2/executions/123', update_exec) self.assertEqual(200, resp.status_int) self.assertEqual(update_exec['params'], resp.json['params']) mock_update_env.assert_called_once_with(UPDATED_WF_EX, {'k1': 'def'}) @mock.patch.object(db_api, 'update_workflow_execution', MOCK_NOT_FOUND) def test_put_not_found(self): resp = self.app.put_json( '/v2/executions/123', dict(state=states.PAUSED), expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) def test_put_empty(self): resp = self.app.put_json('/v2/executions/123', {}, expect_errors=True) self.assertEqual(400, resp.status_int) self.assertIn( 'state, description, or env is not provided for update', resp.json['faultstring'] ) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) def test_put_state_and_description(self): resp = self.app.put_json( '/v2/executions/123', {'description': 'foobar', 'state': states.ERROR}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( 'description must be updated separately from state', resp.json['faultstring'] ) @mock.patch.object( sql_db_api, 'get_workflow_execution', mock.MagicMock(return_value=copy.deepcopy(UPDATED_WF_EX)) ) @mock.patch( 'mistral.db.v2.api.update_workflow_execution', return_value=WF_EX ) @mock.patch( 'mistral.services.workflows.update_workflow_execution_env', return_value=copy.deepcopy(UPDATED_WF_EX_ENV_DESC) ) def test_put_env_and_description(self, mock_update_env, mock_update): update_exec = { 'description': 'foobar', 'params': '{"env": {"k1": "def"}}' } resp = self.app.put_json('/v2/executions/123', update_exec) self.assertEqual(200, resp.status_int) self.assertEqual(update_exec['description'], resp.json['description']) self.assertEqual(update_exec['params'], resp.json['params']) mock_update.assert_called_once_with('123', {'description': 'foobar'}) mock_update_env.assert_called_once_with(UPDATED_WF_EX, {'k1': 'def'}) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=None) ) def test_put_env_wrong_state(self): update_exec = { 'id': WF_EX['id'], 'state': states.SUCCESS, 'params': '{"env": {"k1": "def"}}' } resp = self.app.put_json( '/v2/executions/123', update_exec, expect_errors=True ) self.assertEqual(400, resp.status_int) expected_fault = ( 'env can only be updated when workflow execution ' 'is not running or on resume from pause' ) self.assertIn(expected_fault, resp.json['faultstring']) @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') def test_post_auto_id(self, start_wf_func): # NOTE: In fact, we use "white box" testing here to understand # if the REST controller calls other APIs as expected. This is # the only way of testing available with the current testing # infrastructure. wf_ex_dict = WF_EX.to_dict() start_wf_func.return_value = wf_ex_dict json_body = WF_EX_JSON_WITH_DESC.copy() expected_json = WF_EX_JSON_WITH_DESC resp = self.app.post_json('/v2/executions', json_body) self.assertEqual(201, resp.status_int) self.assertDictEqual(expected_json, resp.json) kwargs = json.loads(expected_json['params']) kwargs['description'] = expected_json['description'] start_wf_func.assert_called_once_with( expected_json['workflow_id'], '', wf_ex_dict['id'], json.loads(expected_json['input']), **kwargs ) @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') @mock.patch.object(db_api, 'load_workflow_execution') def test_post_with_exec_id_exec_doesnt_exist(self, load_wf_ex_func, start_wf_func): # NOTE: In fact, we use "white box" testing here to understand # if the REST controller calls other APIs as expected. This is # the only way of testing available with the current testing # infrastructure. # Imitate that the execution doesn't exist in DB. load_wf_ex_func.return_value = None start_wf_func.return_value = WF_EX.to_dict() # We want to pass execution ID in this case so we don't delete 'id' # from the dict. json_body = WF_EX_JSON_WITH_DESC.copy() expected_json = WF_EX_JSON_WITH_DESC resp = self.app.post_json('/v2/executions', json_body) self.assertEqual(201, resp.status_int) self.assertDictEqual(expected_json, resp.json) load_wf_ex_func.assert_called_once_with(expected_json['id']) kwargs = json.loads(expected_json['params']) kwargs['description'] = expected_json['description'] start_wf_func.assert_called_once_with( expected_json['workflow_id'], '', expected_json['id'], json.loads(expected_json['input']), **kwargs ) @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') @mock.patch.object(db_api, 'load_workflow_execution') def test_post_with_exec_id_exec_exists(self, load_wf_ex_func, start_wf_func): # NOTE: In fact, we use "white box" testing here to understand # if the REST controller calls other APIs as expected. This is # the only way of testing available with the current testing # infrastructure. # Imitate that the execution exists in DB. load_wf_ex_func.return_value = WF_EX # We want to pass execution ID in this case so we don't delete 'id' # from the dict. json_body = WF_EX_JSON_WITH_DESC.copy() expected_json = WF_EX_JSON_WITH_DESC resp = self.app.post_json('/v2/executions', json_body) self.assertEqual(201, resp.status_int) self.assertDictEqual(expected_json, resp.json) load_wf_ex_func.assert_called_once_with(expected_json['id']) # Note that "start_workflow" method on engine API should not be called # in this case because we passed execution ID to the endpoint and the # corresponding object exists. start_wf_func.assert_not_called() @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') def test_post_with_source_execution_id(self, wf_exec_mock): wf_exec_mock.return_value = SOURCE_WF_EX.to_dict() resp = self.app.post_json('/v2/executions/', SOURCE_WF_EX_JSON_WITH_DESC) source_wf_ex_json = copy.copy(SOURCE_WF_EX_JSON_WITH_DESC) del source_wf_ex_json['source_execution_id'] self.assertEqual(201, resp.status_int) self.assertDictEqual(source_wf_ex_json, resp.json) exec_dict = source_wf_ex_json expected_description = "{} Based on the execution '{}'".format( exec_dict['description'], SOURCE_WF_EX_JSON_WITH_DESC['source_execution_id'] ) wf_exec_mock.assert_called_once_with( exec_dict['workflow_id'], '', exec_dict['id'], json.loads(exec_dict['input']), description=expected_description, **json.loads(exec_dict['params']) ) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') def test_post_with_src_exec_id_without_exec_id(self, wf_exec_mock): source_wf_ex = copy.copy(SOURCE_WF_EX) source_wf_ex_json = copy.copy(SOURCE_WF_EX_JSON_WITH_DESC) wf_exec_mock.return_value = source_wf_ex.to_dict() resp = self.app.post_json('/v2/executions/', source_wf_ex_json) del source_wf_ex_json['source_execution_id'] self.assertEqual(201, resp.status_int) self.assertDictEqual(source_wf_ex_json, resp.json) exec_dict = source_wf_ex_json expected_description = "{} Based on the execution '{}'".format( exec_dict['description'], SOURCE_WF_EX_JSON_WITH_DESC['source_execution_id'] ) wf_exec_mock.assert_called_once_with( exec_dict['workflow_id'], '', exec_dict['id'], json.loads(exec_dict['input']), description=expected_description, **json.loads(exec_dict['params']) ) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_EMPTY) @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') def test_post_without_source_execution_id(self, wf_exec_mock): wf_exec_mock.return_value = SOURCE_WF_EX.to_dict() source_wf_ex_json = copy.copy(SOURCE_WF_EX_JSON_WITH_DESC) source_wf_ex_json['source_execution_id'] = "" # here we want to pass an empty value into the api for the # source execution id to make sure that the correct actions are # taken. resp = self.app.post_json('/v2/executions/', source_wf_ex_json) self.assertEqual(201, resp.status_int) del source_wf_ex_json['source_execution_id'] # here we have to remove the source execution key as the # id is only used to perform a lookup. self.assertDictEqual(source_wf_ex_json, resp.json) exec_dict = source_wf_ex_json wf_exec_mock.assert_called_once_with( exec_dict['workflow_id'], '', exec_dict['id'], json.loads(exec_dict['input']), description=exec_dict['description'], **json.loads(exec_dict['params']) ) @mock.patch.object(rpc_clients.EngineClient, 'start_workflow') def test_post_with_params_none(self, start_wf_func): wf_ex_dict = WF_EX.to_dict() start_wf_func.return_value = wf_ex_dict json_body = WF_EX_JSON_WITH_DESC.copy() json_body['params'] = None expected_json = WF_EX_JSON_WITH_DESC resp = self.app.post_json('/v2/executions', json_body) self.assertEqual(201, resp.status_int) self.assertDictEqual(expected_json, resp.json) @mock.patch.object( rpc_clients.EngineClient, 'start_workflow', MOCK_ACTION_EXC ) def test_post_throws_exception(self): context = self.assertRaises( webtest_app.AppError, self.app.post_json, '/v2/executions', WF_EX_JSON ) self.assertIn('Bad response: 400', context.args[0]) def test_post_without_workflow_id_and_name(self): context = self.assertRaises( webtest_app.AppError, self.app.post_json, '/v2/executions', {'description': 'some description here.'} ) self.assertIn('Bad response: 400', context.args[0]) @mock.patch.object( db_api, 'get_workflow_execution', mock.MagicMock(return_value=(states.RUNNING,)) ) def test_delete_running_execution(self): resp = self.app.delete('/v2/executions/123', expect_errors=True) self.assertEqual(403, resp.status_int) self.assertIn( "Only completed executions can be deleted. " "Use --force to override this. " "Execution 123 is in RUNNING state", resp.body.decode() ) @mock.patch.object(db_api, 'get_workflow_execution', mock.MagicMock(return_value=(states.ERROR,))) @mock.patch.object(db_api, 'delete_workflow_execution', MOCK_DELETE) def test_delete_error_exec(self): resp = self.app.delete('/v2/executions/123') self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, 'get_workflow_execution', mock.MagicMock(return_value=(states.SUCCESS,))) @mock.patch.object(db_api, 'delete_workflow_execution', MOCK_DELETE) def test_delete_success_exec(self): resp = self.app.delete('/v2/executions/123') self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, 'delete_workflow_execution', MOCK_NOT_FOUND) def test_delete_not_found(self): resp = self.app.delete('/v2/executions/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'get_workflow_executions', MOCK_WF_EXECUTIONS) def test_get_all(self): resp = self.app.get('/v2/executions') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['executions'])) self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0]) @mock.patch.object(db_api, 'get_workflow_executions') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [WF_EX] # Successful run ] resp = self.app.get('/v2/executions') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['executions'])) self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0]) @mock.patch.object(db_api, 'get_workflow_executions', MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/executions') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['executions'])) @mock.patch.object(db_api, "get_workflow_executions", MOCK_WF_EXECUTIONS) def test_get_all_pagination(self): resp = self.app.get( '/v2/executions?limit=1&sort_keys=id,workflow_name' '&sort_dirs=asc,desc') self.assertEqual(200, resp.status_int) self.assertIn('next', resp.json) self.assertEqual(1, len(resp.json['executions'])) self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0]) param_dict = utils.get_dict_from_string( resp.json['next'].split('?')[1], delimiter='&' ) expected_dict = { 'marker': '123e4567-e89b-12d3-a456-426655440000', 'limit': 1, 'sort_keys': 'id,workflow_name', 'sort_dirs': 'asc,desc' } self.assertDictEqual(expected_dict, param_dict) def test_get_all_pagination_limit_negative(self): resp = self.app.get( '/v2/executions?limit=-1&sort_keys=id&sort_dirs=asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Limit must be positive", resp.body.decode()) def test_get_all_pagination_limit_not_integer(self): resp = self.app.get( '/v2/executions?limit=1.1&sort_keys=id&sort_dirs=asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("unable to convert to int", resp.body.decode()) def test_get_all_pagination_invalid_sort_dirs_length(self): resp = self.app.get( '/v2/executions?limit=1&sort_keys=id&sort_dirs=asc,asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( "Length of sort_keys must be equal or greater than sort_dirs", resp.body.decode() ) def test_get_all_pagination_unknown_direction(self): resp = self.app.get( '/v2/actions?limit=1&sort_keys=id&sort_dirs=nonexist', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Unknown sort direction", resp.body.decode()) @mock.patch.object( db_api, 'get_workflow_executions', MOCK_SUB_WF_EXECUTIONS ) def test_get_task_workflow_executions(self): resp = self.app.get( '/v2/tasks/%s/workflow_executions' % SUB_WF_EX.task_execution_id ) self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['executions'])) self.assertDictEqual( SUB_WF_EX_JSON_WITH_DESC, resp.json['executions'][0] ) @mock.patch.object(db_api, 'get_workflow_executions', MOCK_WF_EXECUTIONS) @mock.patch.object(rest_utils, 'get_all', return_value=resources.Executions()) def test_get_all_executions_with_output(self, mock_get_all): resp = self.app.get('/v2/executions?include_output=true') self.assertEqual(200, resp.status_int) args, kwargs = mock_get_all.call_args resource_function = kwargs['resource_function'] self.assertEqual( execution._get_workflow_execution_resource_with_output, resource_function ) @mock.patch.object(db_api, 'get_workflow_executions', MOCK_WF_EXECUTIONS) @mock.patch.object(rest_utils, 'get_all', return_value=resources.Executions()) def test_get_all_executions_without_output(self, mock_get_all): resp = self.app.get('/v2/executions') self.assertEqual(200, resp.status_int) args, kwargs = mock_get_all.call_args resource_function = kwargs['resource_function'] self.assertEqual( execution._get_workflow_execution_resource, resource_function ) @mock.patch('mistral.db.v2.api.get_workflow_executions') @mock.patch('mistral.context.MistralContext.from_environ') def test_get_all_projects_admin(self, mock_context, mock_get_execs): admin_ctx = unit_base.get_context(admin=True) mock_context.return_value = admin_ctx resp = self.app.get('/v2/executions?all_projects=true') self.assertEqual(200, resp.status_int) self.assertTrue(mock_get_execs.call_args[1].get('insecure', False)) def test_get_all_projects_normal_user(self): resp = self.app.get( '/v2/executions?all_projects=true', expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch('mistral.db.v2.api.get_workflow_executions') @mock.patch('mistral.context.MistralContext.from_environ') def test_get_all_filter_by_project_id(self, mock_context, mock_get_execs): admin_ctx = unit_base.get_context(admin=True) mock_context.return_value = admin_ctx fake_project_id = uuidutils.generate_uuid() resp = self.app.get('/v2/executions?project_id=%s' % fake_project_id) self.assertEqual(200, resp.status_int) self.assertTrue(mock_get_execs.call_args[1].get('insecure', False)) self.assertTrue( mock_get_execs.call_args[1].get('project_id', fake_project_id) ) def test_get_all_with_nulls_not_valid(self): resp = self.app.get( '/v2/executions?limit=10&sort_keys=id&sort_dirs=asc&nulls=invalid', expect_errors=True ) self.assertEqual(500, resp.status_int) self.assertIn( "'invalid' is not a valid field name.", resp.body.decode() ) resp = self.app.get( '/v2/executions?limit=10&sort_keys=id&sort_dirs=asc&nulls=id', expect_errors=True ) self.assertEqual(500, resp.status_int) self.assertIn( "The field 'id' can't hold None value.", resp.body.decode() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_global_publish.py0000644000175000017500000000445700000000000025420 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mistral.services import workflows as wf_service from mistral.tests.unit.api import base from mistral.tests.unit.engine import base as engine_base WF_TEXT = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: publish: branch: my_var: Branch local value global: my_var: Global value next: - task2 task2: action: std.noop publish: local: <% $.my_var %> global: <% global(my_var) %> """ def _find_task(task_name, tasks): return next( ( task for task in tasks if task['name'] == task_name ), None ) class TestGlobalPublish(base.APITest, engine_base.EngineTestCase): def setUp(self): super(TestGlobalPublish, self).setUp() wf_service.create_workflows(WF_TEXT) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) self.wf_id = wf_ex.id def test_global_publish_in_task_exec(self): resp = self.app.get('/v2/tasks/') tasks = resp.json['tasks'] task = _find_task('task1', tasks) self.assertIsNotNone(task, 'task1 not found') resp = self.app.get('/v2/tasks/%s/' % task['id']) self.assert_for_published_global(resp) def test_global_publish_in_wf_exec(self): resp = self.app.get('/v2/executions/%s/' % self.wf_id) self.assert_for_published_global(resp) def assert_for_published_global(self, resp): self.assertEqual(200, resp.status_int) self.assertEqual( resp.json['published_global'], '{"my_var": "Global value"}' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_keycloak_auth.py0000644000175000017500000003105000000000000025242 0ustar00coreycorey00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import mock import pecan import pecan.testing import requests import requests_mock import webob from mistral.api import app as pecan_app from mistral.auth import keycloak from mistral import context from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.services import periodic from mistral.tests.unit import base from mistral.tests.unit.mstrlfixtures import policy_fixtures KEYCLOAK_JSON = { "keys": [ { "kid": "FJ86GcF3jTbNLOco4NvZkUCIUmfYCqoqtOQeMfbhNlE", "kty": "RSA", "alg": "RS256", "use": "sig", "n": "q1awrk7QK24Gmcy9Yb4dMbS-ZnO6", "e": "AQAB" } ] } WF_DEFINITION = """ --- version: '2.0' flow: type: direct input: - param1 tasks: task1: action: std.echo output="Hi" """ WF_DB = models.WorkflowDefinition( id='123e4567-e89b-12d3-a456-426655440000', name='flow', definition=WF_DEFINITION, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), spec={'input': ['param1']} ) WF = { 'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'flow', 'definition': WF_DEFINITION, 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', 'input': 'param1', 'interface': {"input": ["param1"], "output": []} } MOCK_WF = mock.MagicMock(return_value=WF_DB) # Set up config options. AUTH_URL = 'https://my.keycloak.com:8443/auth' REALM_NAME = 'my_realm' USER_INFO_ENDPOINT = ( "%s/realms/%s/protocol/openid-connect/userinfo" % (AUTH_URL, REALM_NAME) ) USER_CLAIMS = { "sub": "248289761001", "name": "Jane Doe", "given_name": "Jane", "family_name": "Doe", "preferred_username": "j.doe", "email": "janedoe@example.com", "picture": "http://example.com/janedoe/me.jpg" } WWW_AUTHENTICATE_HEADER = {'WWW-Authenticate': 'unauthorized reason is ...'} class TestKeyCloakOIDCAuth(base.BaseTest): def setUp(self): super(TestKeyCloakOIDCAuth, self).setUp() self.override_config('auth_url', AUTH_URL, group='keycloak_oidc') self.auth_handler = keycloak.KeycloakAuthHandler() def _build_request(self, token): req = webob.Request.blank("/") req.headers["x-auth-token"] = token req.get_response = lambda app: None return req @mock.patch("requests.get") def test_header_parsing(self, mocked_get): self.override_config( 'user_info_endpoint_url', 'https://127.0.0.1:9080', 'keycloak_oidc' ) token = { "iss": "http://localhost:8080/auth/realms/my_realm", "realm_access": { "roles": ["role1", "role2"] } } mocked_resp = mock.Mock() mocked_resp.status_code = 200 mocked_resp.json.return_value = KEYCLOAK_JSON mocked_get.return_value = mocked_resp req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self.auth_handler.authenticate(req) self.assertEqual("Confirmed", req.headers["X-Identity-Status"]) self.assertEqual("my_realm", req.headers["X-Project-Id"]) self.assertEqual("role1,role2", req.headers["X-Roles"]) self.assertEqual(1, mocked_get.call_count) def test_no_auth_token(self): req = webob.Request.blank("/") self.assertRaises( exc.UnauthorizedException, self.auth_handler.authenticate, req ) @mock.patch("requests.get") def test_no_realm_roles(self, mocked_get): token = { "aud": "openstack", "iss": "http://localhost:8080/auth/realms/my_realm", } mocked_resp = mock.Mock() mocked_resp.status_code = 200 mocked_resp.json.return_value = KEYCLOAK_JSON mocked_get.return_value = mocked_resp req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self.auth_handler.authenticate(req) self.assertEqual("Confirmed", req.headers["X-Identity-Status"]) self.assertEqual("my_realm", req.headers["X-Project-Id"]) self.assertEqual("", req.headers["X-Roles"]) def test_wrong_token_format(self): req = self._build_request(token="WRONG_FORMAT_TOKEN") self.assertRaises( exc.UnauthorizedException, self.auth_handler.authenticate, req ) @requests_mock.Mocker() def test_server_unauthorized(self, req_mock): self.override_config( 'user_info_endpoint_url', 'https://127.0.0.1:9080', 'keycloak_oidc' ) token = { "aud": "openstack", "iss": "http://localhost:8080/auth/realms/my_realm", } # Imitate failure response from KeyCloak. req_mock.get( 'https://127.0.0.1:9080', status_code=401, reason='Access token is invalid', headers=WWW_AUTHENTICATE_HEADER ) req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): try: self.auth_handler.authenticate(req) except requests.exceptions.HTTPError as e: self.assertIn( "401 Client Error: Access token is invalid for url", str(e) ) self.assertEqual( 'unauthorized reason is ...', e.response.headers.get('WWW-Authenticate') ) else: raise Exception("Test is broken") @mock.patch("requests.get") def test_connection_error(self, mocked_get): token = { "aud": "openstack", "iss": "http://localhost:8080/auth/realms/my_realm", "realm_access": { "roles": ["role1", "role2"] } } mocked_get.side_effect = requests.ConnectionError req = self._build_request(token) with mock.patch("jwt.decode", return_value=token): self.assertRaises( exc.MistralException, self.auth_handler.authenticate, req ) class TestKeyCloakOIDCAuthScenarios(base.DbTestCase): def setUp(self): super(TestKeyCloakOIDCAuthScenarios, self).setUp() self.override_config('enabled', False, group='cron_trigger') self.override_config('auth_enable', True, group='pecan') self.override_config('auth_type', 'keycloak-oidc') self.override_config('auth_url', AUTH_URL, group='keycloak_oidc') self.app = pecan.testing.load_test_app( dict(pecan_app.get_pecan_config()) ) # Adding cron trigger thread clean up explicitly in case if # new tests will provide an alternative configuration for pecan # application. self.addCleanup(periodic.stop_all_periodic_tasks) # Make sure the api get the correct context. self.patch_ctx = mock.patch( 'mistral.context.MistralContext.from_environ' ) self.mock_ctx = self.patch_ctx.start() self.mock_ctx.return_value = self.ctx self.addCleanup(self.patch_ctx.stop) self.policy = self.useFixture(policy_fixtures.PolicyFixture()) @mock.patch("requests.get") @mock.patch.object(db_api, 'get_workflow_definition', MOCK_WF) def test_get_workflow_success_auth(self, mocked_get): mocked_resp = mock.Mock() mocked_resp.status_code = 200 mocked_resp.json.return_value = KEYCLOAK_JSON mocked_get.return_value = mocked_resp token = { "iss": "http://localhost:8080/auth/realms/%s" % REALM_NAME, "realm_access": { "roles": ["role1", "role2"] } } headers = {'X-Auth-Token': str(token)} with mock.patch("jwt.decode", return_value=token): resp = self.app.get('/v2/workflows/123', headers=headers) resp_json = resp.json resp_json['interface'] = json.loads(resp_json['interface']) self.assertEqual(200, resp.status_code) self.assertDictEqual(WF, resp_json) @mock.patch("requests.get") @mock.patch.object(db_api, 'get_workflow_definition', MOCK_WF) def test_get_workflow_invalid_token_format(self, mocked_get): token = 'WRONG_FORMAT_TOKEN' headers = {'X-Auth-Token': str(token)} # We don't mock jwt.decode so the test must fail. resp = self.app.get( '/v2/workflows/123', headers=headers, expect_errors=True ) self.assertEqual(401, resp.status_code) self.assertEqual('401 Unauthorized', resp.status) self.assertIn('Failed to validate access token', resp.text) self.assertIn( "Token can't be decoded because of wrong format", resp.text ) @mock.patch("requests.get") @mock.patch.object(db_api, 'get_workflow_definition', MOCK_WF) def test_get_workflow_failed_auth(self, mocked_get): mocked_resp = mock.Mock() mocked_resp.status_code = 200 mocked_resp.json.return_value = KEYCLOAK_JSON mocked_get.return_value = mocked_resp # A token without an issuer (iss). token = { "realm_access": { "roles": ["role1", "role2"] } } headers = {'X-Auth-Token': str(token)} with mock.patch("jwt.decode", return_value=token): resp = self.app.get( '/v2/workflows/123', headers=headers, expect_errors=True ) self.assertEqual(401, resp.status_code) self.assertEqual('401 Unauthorized', resp.status) self.assertIn("Failed to validate access token: 'iss'", resp.text) class TestKeyCloakOIDCAuthApp(base.DbTestCase): """Test that Keycloak auth params get passed to the security context.""" def setUp(self): super(TestKeyCloakOIDCAuthApp, self).setUp() self.override_config('enabled', False, group='cron_trigger') self.override_config('auth_enable', True, group='pecan') self.override_config('auth_type', 'keycloak-oidc') self.override_config('auth_url', AUTH_URL, group='keycloak_oidc') self.app = pecan.testing.load_test_app( dict(pecan_app.get_pecan_config()) ) # Adding cron trigger thread clean up explicitly in case if # new tests will provide an alternative configuration for pecan # application. self.addCleanup(periodic.stop_all_periodic_tasks) self.policy = self.useFixture(policy_fixtures.PolicyFixture()) @mock.patch("requests.get") @mock.patch.object(db_api, 'get_workflow_definition', MOCK_WF) def test_params_transition(self, mocked_get): mocked_resp = mock.Mock() mocked_resp.status_code = 200 mocked_resp.json.return_value = KEYCLOAK_JSON mocked_get.return_value = mocked_resp token = { "iss": "http://localhost:8080/auth/realms/%s" % REALM_NAME, "realm_access": { "roles": ["role1", "role2"] } } headers = {'X-Auth-Token': str(token)} with mock.patch("jwt.decode", return_value=token): with mock.patch("mistral.context.set_ctx") as mocked_set_cxt: self.app.get('/v2/workflows/123', headers=headers) calls = mocked_set_cxt.call_args_list self.assertEqual(2, len(calls)) # First positional argument of the first call ('before'). ctx = calls[0][0][0] self.assertIsInstance(ctx, context.MistralContext) self.assertEqual('my_realm', ctx.project_id) self.assertEqual(["role1", "role2"], ctx.roles) # Second call of set_ctx ('after'), where we reset the context. self.assertIsNone(calls[1][0][0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_members.py0000644000175000017500000003004500000000000024054 0ustar00coreycorey00000000000000# Copyright 2016 - Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_config import cfg from oslo_utils import uuidutils import sqlalchemy as sa from mistral.db.v2 import api as db_api from mistral.services import security from mistral.tests.unit.api import base GET_PROJECT_PATH = 'mistral.services.security.get_project_id' WF_DEFINITION = { 'name': 'test_wf', 'definition': 'empty', 'spec': {}, 'tags': ['mc'], 'scope': 'private', 'project_id': security.get_project_id(), 'trust_id': '1234' } WORKFLOW_MEMBER_PENDING = { 'member_id': '11-22-33', 'project_id': '', 'resource_type': 'workflow', 'status': 'pending' } WORKFLOW_MEMBER_ACCEPTED = {} MEMBER_URL = None class TestMembersController(base.APITest): def setUp(self): super(TestMembersController, self).setUp() self.override_config('auth_enable', True, group='pecan') wf = db_api.create_workflow_definition(WF_DEFINITION) global MEMBER_URL, WORKFLOW_MEMBER_ACCEPTED MEMBER_URL = '/v2/workflows/%s/members' % wf.id WORKFLOW_MEMBER_PENDING['resource_id'] = wf.id WORKFLOW_MEMBER_ACCEPTED = copy.deepcopy(WORKFLOW_MEMBER_PENDING) WORKFLOW_MEMBER_ACCEPTED['status'] = 'accepted' cfg.CONF.set_default('auth_enable', True, group='pecan') def test_membership_api_without_auth(self): self.override_config('auth_enable', False, group='pecan') resp = self.app.get(MEMBER_URL, expect_errors=True) self.assertEqual(400, resp.status_int) self.assertIn( "Resource sharing feature can only be supported with " "authentication enabled", resp.body.decode() ) @mock.patch('mistral.context.AuthHook.before') def test_create_resource_member(self, auth_mock): # Workflow owner shares workflow to another tenant. resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) self._assert_dict_contains_subset(WORKFLOW_MEMBER_PENDING, resp.json) @mock.patch('mistral.context.AuthHook.before') def test_create_membership_nonexistent_wf(self, auth_mock): nonexistent_wf_id = uuidutils.generate_uuid() resp = self.app.post_json( '/v2/workflows/%s/members' % nonexistent_wf_id, {'member_id': '11-22-33'}, expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch('mistral.context.AuthHook.before') def test_create_duplicate_membership(self, auth_mock): resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) resp = self.app.post_json( MEMBER_URL, {'member_id': '11-22-33'}, expect_errors=True ) self.assertEqual(409, resp.status_int) self.assertIn("Duplicate entry for ResourceMember", resp.body.decode()) @mock.patch('mistral.context.AuthHook.before') def test_create_membership_public_wf(self, auth_mock): WF_DEFINITION_PUBLIC = copy.deepcopy(WF_DEFINITION) WF_DEFINITION_PUBLIC['name'] = 'test_wf1' WF_DEFINITION_PUBLIC['scope'] = 'public' wf_public = db_api.create_workflow_definition(WF_DEFINITION_PUBLIC) resp = self.app.post_json( '/v2/workflows/%s/members' % wf_public.id, {'member_id': '11-22-33'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( "Only private resource could be shared", resp.body.decode() ) @mock.patch('mistral.context.AuthHook.before') def test_create_membership_untransferable(self, auth_mock): resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='11-22-33') with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.post_json( MEMBER_URL, {'member_id': 'other_tenant'}, expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch('mistral.context.AuthHook.before') def test_get_other_memberships(self, auth_mock): resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='other_tenant') with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.get(MEMBER_URL) self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['members'])) @mock.patch('mistral.context.AuthHook.before') @mock.patch.object(db_api, 'get_resource_member') def test_get_operational_error(self, mocked_get, auth_mock): member_data = {'member_id': '11-22-33'} mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), member_data # Successful run ] resp = self.app.post_json(MEMBER_URL, member_data) self.assertEqual(201, resp.status_int) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='other_tenant') with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.get(MEMBER_URL) self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['members'])) @mock.patch('mistral.context.AuthHook.before') def test_get_memberships_nonexistent_wf(self, auth_mock): nonexistent_wf_id = uuidutils.generate_uuid() resp = self.app.get('/v2/workflows/%s/members' % nonexistent_wf_id) self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['members'])) @mock.patch('mistral.context.AuthHook.before') def test_get_resource_memberips(self, auth_mock): # Workflow owner shares workflow to another tenant. resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) self._assert_dict_contains_subset(WORKFLOW_MEMBER_PENDING, resp.json) # Workflow owner queries the workflow members. resp = self.app.get(MEMBER_URL) self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['members'])) self._assert_dict_contains_subset( WORKFLOW_MEMBER_PENDING, resp.json['members'][0] ) # Workflow owner queries the exact workflow member. resp = self.app.get('%s/11-22-33' % MEMBER_URL) self.assertEqual(200, resp.status_int) self._assert_dict_contains_subset( WORKFLOW_MEMBER_PENDING, resp.json ) @mock.patch('mistral.context.AuthHook.before') def test_get_other_membership(self, auth_mock): resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='other_tenant') with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.get( '%s/11-22-33' % MEMBER_URL, expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch('mistral.context.AuthHook.before') def test_update_membership(self, auth_mock): # Workflow owner shares workflow to another tenant. resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='11-22-33') # Tenant accepts the workflow shared to him. with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.put_json( '%s/11-22-33' % MEMBER_URL, {'status': 'accepted'} ) self.assertEqual(200, resp.status_int) self._assert_dict_contains_subset( WORKFLOW_MEMBER_ACCEPTED, resp.json ) # Tenant queries exact member of workflow shared to him. # (status=accepted). with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.get('%s/11-22-33' % MEMBER_URL) self.assertEqual(200, resp.status_int) self._assert_dict_contains_subset( WORKFLOW_MEMBER_ACCEPTED, resp.json ) # Workflow owner queries the exact workflow member. # (status=accepted). resp = self.app.get('%s/11-22-33' % MEMBER_URL) self.assertEqual(200, resp.status_int) self._assert_dict_contains_subset( WORKFLOW_MEMBER_ACCEPTED, resp.json ) @mock.patch('mistral.context.AuthHook.before') def test_update_membership_invalid_status(self, auth_mock): resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='11-22-33') with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.put_json( '%s/11-22-33' % MEMBER_URL, {'status': 'invalid'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( "Invalid input", resp.body.decode() ) @mock.patch('mistral.context.AuthHook.before') def test_update_membership_not_shared_user(self, auth_mock): resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) resp = self.app.put_json( '%s/11-22-33' % MEMBER_URL, {'status': 'accepted'}, expect_errors=True ) self.assertEqual(404, resp.status_int) @mock.patch('mistral.context.AuthHook.before') def test_delete_membership(self, auth_mock): # Workflow owner shares workflow to another tenant. resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) # Workflow owner deletes the exact workflow member. resp = self.app.delete('%s/11-22-33' % MEMBER_URL) self.assertEqual(204, resp.status_int) # Workflow owner queries the workflow members. resp = self.app.get(MEMBER_URL) self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['members'])) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='11-22-33') # Tenant queries members of workflow shared to him, after deletion. with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.get(MEMBER_URL) self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['members'])) @mock.patch('mistral.context.AuthHook.before') def test_delete_membership_not_owner(self, auth_mock): resp = self.app.post_json(MEMBER_URL, {'member_id': '11-22-33'}) self.assertEqual(201, resp.status_int) # Using mock to switch to another tenant. get_mock = mock.MagicMock(return_value='11-22-33') with mock.patch(GET_PROJECT_PATH, get_mock): resp = self.app.delete( '%s/11-22-33' % MEMBER_URL, expect_errors=True ) self.assertEqual(404, resp.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_root.py0000644000175000017500000001130400000000000023402 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from mistral.tests.unit.api import base from mistral.tests.unit.api import test_auth from mistral.tests.unit.api import test_oslo_middleware class TestRootController(base.APITest): def test_index(self): resp = self.app.get('/', headers={'Accept': 'application/json'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) data = data['versions'] self.assertEqual('v2.0', data[0]['id']) self.assertEqual('CURRENT', data[0]['status']) self.assertEqual( [{'href': 'http://localhost/v2', 'rel': 'self', 'target': 'v2'}], data[0]['links'] ) def test_v2_root(self): resp = self.app.get('/v2/', headers={'Accept': 'application/json'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) self.assertEqual( 'http://localhost/v2', data['uri'] ) class TestRootControllerWithAuth(test_auth.TestKeystoneMiddleware): def test_index(self): resp = self.app.get('/', headers={'Accept': 'application/json'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) data = data['versions'] self.assertEqual('v2.0', data[0]['id']) self.assertEqual('CURRENT', data[0]['status']) self.assertEqual( [{'href': 'http://localhost/v2', 'rel': 'self', 'target': 'v2'}], data[0]['links'] ) def test_v2_root(self): resp = self.app.get('/v2/', headers={'Accept': 'application/json'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) self.assertEqual( 'http://localhost/v2', data['uri'] ) class TestRootControllerWithHTTPProxyToWSGI(test_oslo_middleware. TestHTTPProxyToWSGIMiddleware): def test_index(self): resp = self.app.get('/', headers={'Accept': 'application/json', 'Host': 'localhost'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) data = data['versions'] self.assertEqual('v2.0', data[0]['id']) self.assertEqual('CURRENT', data[0]['status']) self.assertEqual( [{'href': 'http://localhost/v2', 'rel': 'self', 'target': 'v2'}], data[0]['links'] ) def test_v2_root(self): resp = self.app.get('/v2/', headers={'Accept': 'application/json', 'Host': 'localhost'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) self.assertEqual( 'http://localhost/v2', data['uri'] ) def test_index_with_prefix(self): resp = self.app.get('/', headers={'Accept': 'application/json', 'Host': 'openstack', 'X-Forwarded-Proto': 'https', 'X-Forwarded-Prefix': '/workflowv2'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) data = data['versions'] self.assertEqual('v2.0', data[0]['id']) self.assertEqual('CURRENT', data[0]['status']) self.assertEqual( [{'href': 'https://openstack/workflowv2/v2', 'rel': 'self', 'target': 'v2'}], data[0]['links'] ) def test_v2_root_with_prefix(self): resp = self.app.get('/v2/', headers={'Accept': 'application/json', 'Host': 'openstack', 'X-Forwarded-Proto': 'https', 'X-Forwarded-Prefix': '/workflowv2'}) self.assertEqual(200, resp.status_int) data = jsonutils.loads(resp.body.decode()) self.assertEqual( 'https://openstack/workflowv2/v2', data['uri'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_services.py0000644000175000017500000000472600000000000024254 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg import tooz.coordination from webtest import app as webtest_app from mistral.service import coordination from mistral.tests.unit.api import base class TestServicesController(base.APITest): def test_get_all(self): cfg.CONF.set_default('backend_url', 'zake://', 'coordination') coordination.cleanup_service_coordinator() service_coordinator = coordination.get_service_coordinator( my_id='service1' ) service_coordinator.join_group('api_group') resp = self.app.get('/v2/services') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['services'])) srv_ret = [{"name": "service1", "type": "api_group"}] self.assertItemsEqual(srv_ret, resp.json['services']) def test_get_all_without_backend(self): cfg.CONF.set_default('backend_url', None, 'coordination') coordination.cleanup_service_coordinator() coordination.get_service_coordinator() context = self.assertRaises( webtest_app.AppError, self.app.get, '/v2/services', ) self.assertIn('Service API is not supported', context.args[0]) @mock.patch('mistral.service.coordination.ServiceCoordinator.get_members', side_effect=tooz.coordination.ToozError('error message')) def test_get_all_with_get_members_error(self, mock_get_members): cfg.CONF.set_default('backend_url', 'zake://', 'coordination') coordination.cleanup_service_coordinator() coordination.get_service_coordinator() context = self.assertRaises( webtest_app.AppError, self.app.get, '/v2/services', ) self.assertIn( 'Failed to get service members from coordination backend', context.args[0] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_sub_execution.py0000644000175000017500000001306600000000000025302 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.services import workflows as wf_service from mistral.tests.unit.api import base from mistral.tests.unit.engine import base as engine_base WF_TEXT = """--- version: "2.0" action_wf: tasks: action_task: action: std.noop fail_wf: tasks: fail_task: action: std.fail middle_wf: tasks: middle_task: workflow: action_wf fail_task: workflow: fail_wf main_wf: tasks: main_task: workflow: middle_wf """ class TestSubExecutionsController(base.APITest, engine_base.EngineTestCase): def setUp(self): super(TestSubExecutionsController, self).setUp() wf_service.create_workflows(WF_TEXT) def test_sub_executions_wf_ex_id(self): wf_ex = self.engine.start_workflow('main_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get('/v2/executions/%s/executions' % wf_ex.id) self.assertEqual(200, resp.status_int) main_wf_ex_list = resp.json['executions'] self.assertEqual(4, len(main_wf_ex_list)) self._assert_single_item(main_wf_ex_list, workflow_name='main_wf') self._assert_single_item(main_wf_ex_list, workflow_name='action_wf') self._assert_single_item(main_wf_ex_list, workflow_name='fail_wf') middle_wf = self._assert_single_item( main_wf_ex_list, workflow_name='middle_wf' ) # check the sub execs of a sub-ex resp = self.app.get('/v2/executions/%s/executions' % middle_wf['id']) self.assertEqual(200, resp.status_int) middle_wf_ex_list = resp.json['executions'] self.assertEqual(3, len(middle_wf_ex_list)) self._assert_single_item(middle_wf_ex_list, workflow_name='middle_wf') self._assert_single_item(middle_wf_ex_list, workflow_name='action_wf') self._assert_single_item(middle_wf_ex_list, workflow_name='fail_wf') def test_sub_executions_errors_only(self): wf_ex = self.engine.start_workflow('main_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get( '/v2/executions/%s/executions?errors_only=True' % wf_ex.id ) self.assertEqual(200, resp.status_int) main_wf_ex_list = resp.json['executions'] self.assertEqual(3, len(main_wf_ex_list)) self._assert_single_item(main_wf_ex_list, workflow_name='middle_wf') self._assert_single_item(main_wf_ex_list, workflow_name='fail_wf') self._assert_no_item(main_wf_ex_list, workflow_name='action_wf') def test_sub_executions_with_max_depth(self): wf_ex = self.engine.start_workflow('main_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get( '/v2/executions/%s/executions?max_depth=1' % wf_ex.id ) self.assertEqual(200, resp.status_int) main_wf_ex_list = resp.json['executions'] self.assertEqual(2, len(main_wf_ex_list)) self._assert_single_item(main_wf_ex_list, workflow_name='middle_wf') self._assert_single_item(main_wf_ex_list, workflow_name='main_wf') def test_sub_executions_task_id(self): wf_ex = self.engine.start_workflow('main_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get('/v2/executions/%s/executions' % wf_ex.id) self.assertEqual(200, resp.status_int) main_wf_ex_list = resp.json['executions'] self.assertEqual(4, len(main_wf_ex_list)) middle_wf = self._assert_single_item( main_wf_ex_list, workflow_name='middle_wf' ) resp = self.app.get( '/v2/tasks/%s/executions' % middle_wf['task_execution_id'] ) self.assertEqual(200, resp.status_int) main_task_ex_list = resp.json['executions'] self.assertEqual(3, len(main_task_ex_list)) self._assert_single_item(main_task_ex_list, workflow_name='fail_wf') self._assert_single_item(main_task_ex_list, workflow_name='middle_wf') self._assert_single_item(main_task_ex_list, workflow_name='action_wf') def test_sub_executions_with_include_output(self): wf_ex = self.engine.start_workflow('main_wf') self.await_workflow_error(wf_ex.id) resp = self.app.get( '/v2/executions/%s/executions?include_output=true' % wf_ex.id ) self.assertEqual(200, resp.status_int) main_wf = self._assert_single_item( resp.json['executions'], workflow_name='main_wf' ) self.assertIsNotNone(main_wf.get('output')) resp = self.app.get('/v2/executions/%s/executions' % wf_ex.id) self.assertEqual(200, resp.status_int) main_wf = self._assert_single_item( resp.json['executions'], workflow_name='main_wf' ) self.assertIsNone(main_wf.get('output')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_tasks.py0000644000175000017500000003351600000000000023555 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2019 - NetCracker Technology Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json import mock import sqlalchemy as sa from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.rpc import clients as rpc from mistral.tests.unit.api import base from mistral.workflow import data_flow from mistral.workflow import states # TODO(everyone): later we need additional tests verifying all the errors etc. RESULT = {"some": "result"} PUBLISHED = {"var": "val"} RUNTIME_CONTEXT = { 'triggered_by': [ { 'task_id': '123-123-123', 'event': 'on-success' } ] } WF_EX = models.WorkflowExecution( id='abc', workflow_name='some', description='execution description.', spec={'name': 'some'}, state=states.RUNNING, state_info=None, input={'foo': 'bar'}, output={}, params={'env': {'k1': 'abc'}}, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1) ) TASK_EX = models.TaskExecution( id='123', name='task', workflow_name='flow', workflow_id='123e4567-e89b-12d3-a456-426655441111', spec={ 'type': 'direct', 'version': '2.0', 'name': 'task' }, action_spec={}, state=states.RUNNING, tags=['a', 'b'], in_context={}, runtime_context=RUNTIME_CONTEXT, workflow_execution_id=WF_EX.id, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), started_at=datetime.datetime(1970, 1, 1), finished_at=datetime.datetime(1970, 1, 1), published=PUBLISHED, processed=True ) WITH_ITEMS_TASK_EX = models.TaskExecution( id='123', name='task', workflow_name='flow', workflow_id='123e4567-e89b-12d3-a456-426655441111', spec={ 'type': 'direct', 'version': '2.0', 'name': 'task', 'with-items': 'var in [1, 2, 3]' }, action_spec={}, state=states.RUNNING, tags=['a', 'b'], in_context={}, runtime_context=RUNTIME_CONTEXT, workflow_execution_id=WF_EX.id, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), started_at=datetime.datetime(1970, 1, 1), finished_at=datetime.datetime(1970, 1, 1), published=PUBLISHED, processed=True ) TASK = { 'id': '123', 'name': 'task', 'workflow_name': 'flow', 'workflow_id': '123e4567-e89b-12d3-a456-426655441111', 'tags': ['a', 'b'], 'state': 'RUNNING', 'workflow_execution_id': WF_EX.id, 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', 'started_at': '1970-01-01 00:00:00', 'finished_at': '1970-01-01 00:00:00', 'result': json.dumps(RESULT), 'published': json.dumps(PUBLISHED), 'runtime_context': json.dumps(RUNTIME_CONTEXT), 'processed': True } TASK_WITHOUT_RESULT = copy.deepcopy(TASK) del TASK_WITHOUT_RESULT['result'] UPDATED_TASK_EX = copy.deepcopy(TASK_EX) UPDATED_TASK_EX['state'] = 'SUCCESS' UPDATED_TASK = copy.deepcopy(TASK) UPDATED_TASK['state'] = 'SUCCESS' ERROR_TASK_EX = copy.deepcopy(TASK_EX) ERROR_TASK_EX['state'] = 'ERROR' ERROR_ITEMS_TASK_EX = copy.deepcopy(WITH_ITEMS_TASK_EX) ERROR_ITEMS_TASK_EX['state'] = 'ERROR' ERROR_TASK = copy.deepcopy(TASK) ERROR_TASK['state'] = 'ERROR' BROKEN_TASK = copy.deepcopy(TASK) RERUN_TASK = { 'id': '123', 'state': 'RUNNING' } MOCK_WF_EX = mock.MagicMock(return_value=WF_EX) TASK_EX.workflow_execution = WF_EX MOCK_TASK = mock.MagicMock(return_value=TASK_EX) MOCK_TASKS = mock.MagicMock(return_value=[TASK_EX]) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_ERROR_TASK = mock.MagicMock(return_value=ERROR_TASK_EX) MOCK_ERROR_ITEMS_TASK = mock.MagicMock(return_value=ERROR_ITEMS_TASK_EX) TASK_EX_WITH_PROJECT_ID = TASK_EX.get_clone() TASK_EX_WITH_PROJECT_ID.project_id = '' TASK_EX_WITH_PROJECT_ID.workflow_execution = WF_EX @mock.patch.object( data_flow, 'get_task_execution_result', mock.Mock(return_value=RESULT) ) class TestTasksController(base.APITest): @mock.patch.object(db_api, 'get_task_execution', MOCK_TASK) def test_get(self): resp = self.app.get('/v2/tasks/123') self.assertEqual(200, resp.status_int) self.assertDictEqual(TASK, resp.json) @mock.patch.object(db_api, 'get_task_execution') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), TASK_EX # Successful run ] resp = self.app.get('/v2/tasks/123') self.assertEqual(200, resp.status_int) self.assertDictEqual(TASK, resp.json) @mock.patch.object(db_api, 'get_task_execution', MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get('/v2/tasks/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, 'get_task_executions', MOCK_TASKS) def test_get_all(self): resp = self.app.get('/v2/tasks') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['tasks'])) self.assertDictEqual(TASK_WITHOUT_RESULT, resp.json['tasks'][0]) @mock.patch.object(db_api, 'get_task_executions') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [TASK_EX] # Successful run ] resp = self.app.get('/v2/tasks') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['tasks'])) self.assertDictEqual(TASK_WITHOUT_RESULT, resp.json['tasks'][0]) @mock.patch.object(db_api, 'get_task_execution', return_value=TASK_EX_WITH_PROJECT_ID) def test_get_within_project_id(self, mock_get): resp = self.app.get('/v2/tasks/123') self.assertEqual(200, resp.status_int) self.assertTrue('project_id' in resp.json) @mock.patch.object(db_api, 'get_task_executions', MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/tasks') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['tasks'])) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object( db_api, 'get_task_execution', mock.MagicMock(side_effect=[ERROR_TASK_EX, TASK_EX]) ) @mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX) def test_put(self): params = copy.deepcopy(RERUN_TASK) params['reset'] = True resp = self.app.put_json('/v2/tasks/123', params=params) self.assertEqual(200, resp.status_int) self.assertDictEqual(TASK, resp.json) rpc.EngineClient.rerun_workflow.assert_called_with( TASK_EX.id, reset=params['reset'], env=None ) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object( db_api, 'get_task_execution', mock.MagicMock(side_effect=[ERROR_TASK_EX, TASK_EX]) ) @mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX) def test_put_missing_reset(self): params = copy.deepcopy(RERUN_TASK) resp = self.app.put_json( '/v2/tasks/123', params=params, expect_errors=True) self.assertEqual(400, resp.status_int) self.assertIn('faultstring', resp.json) self.assertIn('Mandatory field missing', resp.json['faultstring']) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object( db_api, 'get_task_execution', mock.MagicMock(side_effect=[ERROR_ITEMS_TASK_EX, WITH_ITEMS_TASK_EX]) ) @mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX) def test_put_with_items(self): params = copy.deepcopy(RERUN_TASK) params['reset'] = False resp = self.app.put_json('/v2/tasks/123', params=params) self.assertEqual(200, resp.status_int) self.assertDictEqual(TASK, resp.json) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object( db_api, 'get_task_execution', mock.MagicMock(side_effect=[ERROR_TASK_EX, TASK_EX]) ) @mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX) def test_put_env(self): params = copy.deepcopy(RERUN_TASK) params['reset'] = True params['env'] = '{"k1": "def"}' resp = self.app.put_json('/v2/tasks/123', params=params) self.assertEqual(200, resp.status_int) self.assertDictEqual(TASK, resp.json) rpc.EngineClient.rerun_workflow.assert_called_with( TASK_EX.id, reset=params['reset'], env=json.loads(params['env']) ) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_TASK) def test_put_current_task_not_in_error(self): params = copy.deepcopy(RERUN_TASK) params['reset'] = True resp = self.app.put_json( '/v2/tasks/123', params=params, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn('faultstring', resp.json) self.assertIn('execution must be in ERROR', resp.json['faultstring']) @mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK) def test_put_current_task_in_error(self): params = copy.deepcopy(RERUN_TASK) params['reset'] = True params['env'] = '{"k1": "def"}' resp = self.app.put_json('/v2/tasks/123', params=params) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK) def test_put_invalid_state(self): params = copy.deepcopy(RERUN_TASK) params['state'] = states.IDLE params['reset'] = True resp = self.app.put_json( '/v2/tasks/123', params=params, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn('faultstring', resp.json) self.assertIn('Invalid task state', resp.json['faultstring']) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK) def test_put_invalid_reset(self): params = copy.deepcopy(RERUN_TASK) params['reset'] = False resp = self.app.put_json( '/v2/tasks/123', params=params, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn('faultstring', resp.json) self.assertIn('Only with-items task', resp.json['faultstring']) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK) def test_put_valid_state(self): params = copy.deepcopy(RERUN_TASK) params['state'] = states.RUNNING params['reset'] = True resp = self.app.put_json( '/v2/tasks/123', params=params ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK) def test_put_mismatch_task_name(self): params = copy.deepcopy(RERUN_TASK) params['name'] = 'abc' params['reset'] = True resp = self.app.put_json( '/v2/tasks/123', params=params, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn('faultstring', resp.json) self.assertIn('Task name does not match', resp.json['faultstring']) @mock.patch.object(rpc.EngineClient, 'rerun_workflow', MOCK_WF_EX) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK) def test_put_match_task_name(self): params = copy.deepcopy(RERUN_TASK) params['name'] = 'task' params['reset'] = True resp = self.app.put_json( '/v2/tasks/123', params=params, expect_errors=True ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX) @mock.patch.object(db_api, 'get_task_execution', MOCK_ERROR_TASK) def test_put_mismatch_workflow_name(self): params = copy.deepcopy(RERUN_TASK) params['workflow_name'] = 'xyz' params['reset'] = True resp = self.app.put_json( '/v2/tasks/123', params=params, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn('faultstring', resp.json) self.assertIn('Workflow name does not match', resp.json['faultstring']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_workbooks.py0000644000175000017500000003403700000000000024447 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import mock import sqlalchemy as sa from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.services import workbooks from mistral.tests.unit.api import base WORKBOOK_DEF = """ --- version: '2.0' name: 'test' workflows: flow: type: direct tasks: task1: action: step param="Hello !" actions: step: input: - param base: std.echo output="<% $.param %>"" """ UPDATED_WORKBOOK_DEF = """ --- version: '2.0' name: 'book' workflows: flow: type: direct tasks: task1: action: step arg="Hello !" actions: step: input: - param base: std.echo output="<% $.param %>"" """ WORKBOOK = { 'id': '123', 'name': 'test', 'definition': WORKBOOK_DEF, 'tags': ['deployment', 'demo'], 'scope': 'public', 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00' } WB_WITH_NAMESPACE = { 'id': '123', 'name': 'test', 'namespace': 'xyz', 'definition': WORKBOOK_DEF, 'tags': ['deployment', 'demo'], 'scope': 'public', 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00' } ACTION = { 'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'step', 'is_system': False, 'description': 'My super cool action.', 'tags': ['test', 'v2'], 'definition': '' } WF = { 'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'flow', 'definition': '', 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', } ACTION_DB = models.ActionDefinition() ACTION_DB.update(ACTION) WORKBOOK_DB = models.Workbook() WORKBOOK_DB.update(WORKBOOK) WB_DB_WITH_NAMESPACE = models.Workbook(**WB_WITH_NAMESPACE) WF_DB = models.WorkflowDefinition() WF_DB.update(WF) WORKBOOK_DB_PROJECT_ID = WORKBOOK_DB.get_clone() WORKBOOK_DB_PROJECT_ID.project_id = '' UPDATED_WORKBOOK_DB = copy.copy(WORKBOOK_DB) UPDATED_WORKBOOK_DB['definition'] = UPDATED_WORKBOOK_DEF UPDATED_WORKBOOK = copy.deepcopy(WORKBOOK) UPDATED_WORKBOOK['definition'] = UPDATED_WORKBOOK_DEF WB_DEF_INVALID_MODEL_EXCEPTION = """ --- version: '2.0' name: 'book' workflows: flow: type: direct tasks: task1: action: std.echo output="Hi" workflow: wf1 """ WB_DEF_DSL_PARSE_EXCEPTION = """ --- % """ WB_DEF_YAQL_PARSE_EXCEPTION = """ --- version: '2.0' name: 'book' workflows: flow: type: direct tasks: task1: action: std.echo output=<% * %> """ MOCK_WORKBOOK = mock.MagicMock(return_value=WORKBOOK_DB) MOCK_WB_WITH_NAMESPACE = mock.MagicMock(return_value=WB_DB_WITH_NAMESPACE) MOCK_WORKBOOKS = mock.MagicMock(return_value=[WORKBOOK_DB]) MOCK_UPDATED_WORKBOOK = mock.MagicMock(return_value=UPDATED_WORKBOOK_DB) MOCK_DELETE = mock.MagicMock(return_value=None) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError()) class TestWorkbooksController(base.APITest): @mock.patch.object(db_api, "get_workbook", MOCK_WORKBOOK) def test_get(self): resp = self.app.get('/v2/workbooks/123') self.assertEqual(200, resp.status_int) self.assertDictEqual(WORKBOOK, resp.json) @mock.patch.object(db_api, "get_workbook", MOCK_WB_WITH_NAMESPACE) def test_get_with_namespace(self): resp = self.app.get('/v2/workbooks/123?namespace=xyz') self.assertEqual(200, resp.status_int) self.assertDictEqual(WB_WITH_NAMESPACE, resp.json) @mock.patch.object(db_api, 'get_workbook') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), WORKBOOK_DB # Successful run ] resp = self.app.get('/v2/workbooks/123') self.assertEqual(200, resp.status_int) self.assertDictEqual(WORKBOOK, resp.json) @mock.patch.object(db_api, "get_workbook", MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get('/v2/workbooks/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "get_workbook", return_value=WORKBOOK_DB_PROJECT_ID) def test_get_within_project_id(self, mock_get): resp = self.app.get('/v2/workbooks/123') self.assertEqual(200, resp.status_int) self.assertTrue('project_id' in resp.json) @mock.patch.object(workbooks, "update_workbook_v2", MOCK_UPDATED_WORKBOOK) def test_put(self): resp = self.app.put( '/v2/workbooks', UPDATED_WORKBOOK_DEF, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertEqual(UPDATED_WORKBOOK, resp.json) @mock.patch.object(workbooks, "update_workbook_v2", MOCK_NOT_FOUND) def test_put_not_found(self): resp = self.app.put_json( '/v2/workbooks', UPDATED_WORKBOOK_DEF, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(404, resp.status_int) def test_put_invalid(self): resp = self.app.put( '/v2/workbooks', WB_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Invalid DSL", resp.body.decode()) @mock.patch.object(workbooks, "update_workbook_v2", MOCK_UPDATED_WORKBOOK) def test_put_invalid_skip_validation(self): self.override_config('validation_mode', 'enabled', 'api') resp = self.app.put( '/v2/workbooks?skip_validation', WB_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, "update_workbook") @mock.patch.object(db_api, "create_or_update_workflow_definition") @mock.patch.object(db_api, "create_or_update_action_definition") def test_put_public(self, mock_action, mock_wf, mock_wb): mock_wb.return_value = UPDATED_WORKBOOK_DB mock_wf.return_value = WF_DB mock_action.return_value = ACTION_DB resp = self.app.put( '/v2/workbooks?scope=public', UPDATED_WORKBOOK_DEF, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertDictEqual(UPDATED_WORKBOOK, resp.json) self.assertEqual("public", mock_wb.call_args[0][1]['scope']) self.assertEqual("public", mock_wf.call_args[0][1]['scope']) self.assertEqual("public", mock_action.call_args[0][1]['scope']) def test_put_wrong_scope(self): resp = self.app.put( '/v2/workbooks?scope=unique', UPDATED_WORKBOOK_DEF, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Scope must be one of the following", resp.body.decode()) @mock.patch.object(workbooks, "create_workbook_v2", MOCK_WORKBOOK) def test_post(self): resp = self.app.post( '/v2/workbooks', WORKBOOK_DEF, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertEqual(WORKBOOK, resp.json) @mock.patch.object(workbooks, "create_workbook_v2", MOCK_WB_WITH_NAMESPACE) def test_post_namespace(self): namespace = 'xyz' resp = self.app.post( '/v2/workbooks?namespace=%s' % namespace, WORKBOOK_DEF, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertEqual(WB_WITH_NAMESPACE, resp.json) @mock.patch.object(workbooks, "create_workbook_v2", MOCK_DUPLICATE) def test_post_dup(self): resp = self.app.post( '/v2/workbooks', WORKBOOK_DEF, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(409, resp.status_int) def test_post_invalid(self): resp = self.app.post( '/v2/workbooks', WB_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Invalid DSL", resp.body.decode()) def test_post_invalid_skip_validation(self): self.override_config('validation_mode', 'enabled', 'api') resp = self.app.post( '/v2/workbooks?skip_validation', WB_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(201, resp.status_int) @mock.patch.object(db_api, "create_workbook") @mock.patch.object(db_api, "create_or_update_workflow_definition") @mock.patch.object(db_api, "create_or_update_action_definition") def test_post_public(self, mock_action, mock_wf, mock_wb): mock_wb.return_value = WORKBOOK_DB mock_wf.return_value = WF_DB mock_action.return_value = ACTION_DB resp = self.app.post( '/v2/workbooks?scope=public', WORKBOOK_DEF, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertEqual(WORKBOOK, resp.json) self.assertEqual("public", mock_wb.call_args[0][0]['scope']) self.assertEqual("public", mock_wf.call_args[0][1]['scope']) self.assertEqual("public", mock_action.call_args[0][1]['scope']) def test_post_wrong_scope(self): resp = self.app.post( '/v2/workbooks?scope=unique', WORKBOOK_DEF, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Scope must be one of the following", resp.body.decode()) @mock.patch.object(db_api, "delete_workbook", MOCK_DELETE) def test_delete(self): resp = self.app.delete('/v2/workbooks/123') self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, "delete_workbook", MOCK_NOT_FOUND) def test_delete_not_found(self): resp = self.app.delete('/v2/workbooks/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "get_workbooks", MOCK_WORKBOOKS) def test_get_all(self): resp = self.app.get('/v2/workbooks') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['workbooks'])) self.assertDictEqual(WORKBOOK, resp.json['workbooks'][0]) @mock.patch.object(db_api, 'get_workbooks') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [WORKBOOK_DB] # Successful run ] resp = self.app.get('/v2/workbooks') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['workbooks'])) self.assertDictEqual(WORKBOOK, resp.json['workbooks'][0]) @mock.patch.object(db_api, "get_workbooks", MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/workbooks') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['workbooks'])) def test_validate(self): resp = self.app.post( '/v2/workbooks/validate', WORKBOOK_DEF, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertTrue(resp.json['valid']) def test_validate_invalid_model_exception(self): resp = self.app.post( '/v2/workbooks/validate', WB_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Invalid DSL", resp.json['error']) def test_validate_dsl_parse_exception(self): resp = self.app.post( '/v2/workbooks/validate', WB_DEF_DSL_PARSE_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Definition could not be parsed", resp.json['error']) def test_validate_yaql_parse_exception(self): resp = self.app.post( '/v2/workbooks/validate', WB_DEF_YAQL_PARSE_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("unexpected '*' at position 1", resp.json['error']) def test_validate_empty(self): resp = self.app.post( '/v2/workbooks/validate', '', headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Invalid DSL", resp.json['error']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/api/v2/test_workflows.py0000644000175000017500000006311700000000000024465 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json import mock import sqlalchemy as sa import yaml from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.tests.unit.api import base from mistral.tests.unit import base as unit_base from mistral_lib import utils WF_DEFINITION = """ --- version: '2.0' flow: type: direct input: - param1 tasks: task1: action: std.echo output="Hi" """ WF_DB = models.WorkflowDefinition( id='123e4567-e89b-12d3-a456-426655440000', name='flow', definition=WF_DEFINITION, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), spec={'input': ['param1']} ) WF_DB_SYSTEM = WF_DB.get_clone() WF_DB_SYSTEM.is_system = True WF = { 'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'flow', 'definition': WF_DEFINITION, 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', 'input': 'param1', 'interface': {"output": [], "input": ["param1"]} } WF_DB_WITHIN_ABC_NAMESPACE = models.WorkflowDefinition( id='234560fe-162a-4060-a16a-a0d9eee9b408', name='flow', namespace='abc', definition=WF_DEFINITION, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), spec={'input': ['param1']} ) WF_WITH_NAMESPACE = { 'id': '234560fe-162a-4060-a16a-a0d9eee9b408', 'name': 'flow', 'namespace': 'abc', 'definition': WF_DEFINITION, 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', 'input': 'param1', 'interface': {'input': ['param1'], 'output': []} } WF_DEFINITION_WITH_INPUT = """ --- version: '2.0' flow: type: direct input: - param1 - param2: 2 tasks: task1: action: std.echo output="Hi" """ WF_DB_WITH_INPUT = models.WorkflowDefinition( name='flow', definition=WF_DEFINITION_WITH_INPUT, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), spec={'input': ['param1', {'param2': 2}]} ) WF_WITH_DEFAULT_INPUT = { 'name': 'flow', 'definition': WF_DEFINITION_WITH_INPUT, 'created_at': '1970-01-01 00:00:00', 'updated_at': '1970-01-01 00:00:00', 'input': 'param1, param2="2"', 'interface': { "input": ["param1", {"param2": 2}], "output": [] } } WF_DB_PROJECT_ID = WF_DB.get_clone() WF_DB_PROJECT_ID.project_id = '' UPDATED_WF_DEFINITION = """ --- version: '2.0' flow: type: direct input: - param1 - param2 tasks: task1: action: std.echo output="Hi" """ UPDATED_WF_DB = copy.copy(WF_DB) UPDATED_WF_DB['definition'] = UPDATED_WF_DEFINITION UPDATED_WF = copy.deepcopy(WF) UPDATED_WF['definition'] = UPDATED_WF_DEFINITION WF_DEF_INVALID_MODEL_EXCEPTION = """ --- version: '2.0' flow: type: direct tasks: task1: action: std.echo output="Hi" workflow: wf1 """ WF_DEF_DSL_PARSE_EXCEPTION = """ --- % """ WF_DEF_YAQL_PARSE_EXCEPTION = """ --- version: '2.0' flow: type: direct tasks: task1: action: std.echo output=<% * %> """ WFS_DEFINITION = """ --- version: '2.0' wf1: tasks: task1: action: std.echo output="Hello" wf2: tasks: task1: action: std.echo output="Mistral" """ WFS_YAML = yaml.load(WFS_DEFINITION) FIRST_WF_DEF = yaml.dump({ 'version': '2.0', 'wf1': WFS_YAML['wf1'] }) SECOND_WF_DEF = yaml.dump({ 'version': '2.0', 'wf2': WFS_YAML['wf2'] }) FIRST_WF_DICT = { 'name': 'wf1', 'tasks': { 'task1': { 'action': 'std.echo output="Hello"', 'name': 'task1', 'type': 'direct', 'version': '2.0' } }, 'version': '2.0' } FIRST_WF = { 'name': 'wf1', 'tags': [], 'definition': FIRST_WF_DEF, 'spec': FIRST_WF_DICT, 'scope': 'private', 'namespace': '', 'is_system': False } SECOND_WF_DICT = { 'name': 'wf2', 'tasks': { 'task1': { 'action': 'std.echo output="Mistral"', 'name': 'task1', 'type': 'direct', 'version': '2.0' } }, 'version': '2.0' } SECOND_WF = { 'name': 'wf2', 'tags': [], 'definition': SECOND_WF_DEF, 'spec': SECOND_WF_DICT, 'scope': 'private', 'namespace': '', 'is_system': False } MOCK_WF = mock.MagicMock(return_value=WF_DB) MOCK_WF_SYSTEM = mock.MagicMock(return_value=WF_DB_SYSTEM) MOCK_WF_WITH_INPUT = mock.MagicMock(return_value=WF_DB_WITH_INPUT) MOCK_WFS = mock.MagicMock(return_value=[WF_DB]) MOCK_UPDATED_WF = mock.MagicMock(return_value=UPDATED_WF_DB) MOCK_DELETE = mock.MagicMock(return_value=None) MOCK_EMPTY = mock.MagicMock(return_value=[]) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError()) class TestWorkflowsController(base.APITest): @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) def test_get(self): resp = self.app.get('/v2/workflows/123') resp_json = resp.json resp_json['interface'] = json.loads(resp_json['interface']) self.assertEqual(200, resp.status_int) self.assertDictEqual(WF, resp_json) @mock.patch.object(db_api, 'get_workflow_definition') def test_get_operational_error(self, mocked_get): mocked_get.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), WF_DB # Successful run ] resp = self.app.get('/v2/workflows/123') resp_json = resp.json resp_json['interface'] = json.loads(resp_json['interface']) self.assertEqual(200, resp.status_int) self.assertDictEqual(WF, resp_json) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF_WITH_INPUT) def test_get_with_input(self): resp = self.app.get('/v2/workflows/123') self.maxDiff = None resp_json = resp.json resp_json['interface'] = json.loads(resp_json['interface']) self.assertEqual(200, resp.status_int) self.assertDictEqual(WF_WITH_DEFAULT_INPUT, resp_json) @mock.patch.object(db_api, "get_workflow_definition", MOCK_NOT_FOUND) def test_get_not_found(self): resp = self.app.get('/v2/workflows/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object( db_api, "update_workflow_definition", MOCK_UPDATED_WF ) def test_put(self): resp = self.app.put( '/v2/workflows', UPDATED_WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.maxDiff = None self.assertEqual(200, resp.status_int) self.assertDictEqual({'workflows': [UPDATED_WF]}, resp.json) @mock.patch("mistral.services.workflows.update_workflows") def test_put_with_uuid(self, update_mock): update_mock.return_value = [UPDATED_WF_DB] resp = self.app.put( '/v2/workflows/123e4567-e89b-12d3-a456-426655440000', UPDATED_WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) update_mock.assert_called_once_with( UPDATED_WF_DEFINITION, scope='private', identifier='123e4567-e89b-12d3-a456-426655440000', namespace='', validate=True ) self.assertDictEqual(UPDATED_WF, resp.json) @mock.patch( "mistral.db.v2.sqlalchemy.api.get_workflow_definition", return_value=WF_DB_SYSTEM ) def test_put_system(self, get_mock): resp = self.app.put( '/v2/workflows', UPDATED_WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( "Can not modify a system", resp.body.decode() ) @mock.patch.object(db_api, "update_workflow_definition") def test_put_public(self, mock_update): mock_update.return_value = UPDATED_WF_DB resp = self.app.put( '/v2/workflows?scope=public', UPDATED_WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertDictEqual({'workflows': [UPDATED_WF]}, resp.json) self.assertEqual("public", mock_update.call_args[0][1]['scope']) def test_put_wrong_scope(self): resp = self.app.put( '/v2/workflows?scope=unique', UPDATED_WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Scope must be one of the following", resp.body.decode()) @mock.patch.object( db_api, "update_workflow_definition", MOCK_WF_WITH_INPUT ) def test_put_with_input(self): resp = self.app.put( '/v2/workflows', WF_DEFINITION_WITH_INPUT, headers={'Content-Type': 'text/plain'} ) self.maxDiff = None self.assertEqual(200, resp.status_int) self.assertDictEqual({'workflows': [WF_WITH_DEFAULT_INPUT]}, resp.json) @mock.patch.object( db_api, "update_workflow_definition", MOCK_NOT_FOUND ) def test_put_not_found(self): resp = self.app.put( '/v2/workflows', UPDATED_WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True, ) self.assertEqual(404, resp.status_int) def test_put_invalid(self): resp = self.app.put( '/v2/workflows', WF_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Invalid DSL", resp.body.decode()) @mock.patch.object( db_api, "update_workflow_definition", MOCK_UPDATED_WF ) def test_put_invalid_skip_validation(self): self.override_config('validation_mode', 'enabled', 'api') resp = self.app.put( '/v2/workflows?skip_validation', WF_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, "update_workflow_definition") def test_put_multiple(self, mock_mtd): spec_mock = mock_mtd.return_value.get.return_value spec_mock.get.return_value = {} self.app.put( '/v2/workflows', WFS_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(2, mock_mtd.call_count) mock_mtd.assert_any_call('wf1', FIRST_WF) mock_mtd.assert_any_call('wf2', SECOND_WF) def test_put_more_workflows_with_uuid(self): resp = self.app.put( '/v2/workflows/123e4567-e89b-12d3-a456-426655440000', WFS_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( "More than one workflows are not supported for update", resp.body.decode() ) @mock.patch.object(db_api, "create_workflow_definition") def test_post(self, mock_mtd): mock_mtd.return_value = WF_DB resp = self.app.post( '/v2/workflows', WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertDictEqual({'workflows': [WF]}, resp.json) self.assertEqual(1, mock_mtd.call_count) spec = mock_mtd.call_args[0][0]['spec'] self.assertIsNotNone(spec) self.assertEqual(WF_DB.name, spec['name']) @mock.patch.object(db_api, "create_workflow_definition") def test_post_public(self, mock_mtd): mock_mtd.return_value = WF_DB resp = self.app.post( '/v2/workflows?scope=public', WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertEqual({"workflows": [WF]}, resp.json) self.assertEqual("public", mock_mtd.call_args[0][0]['scope']) def test_post_wrong_scope(self): resp = self.app.post( '/v2/workflows?scope=unique', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Scope must be one of the following", resp.body.decode()) @mock.patch.object(db_api, "create_workflow_definition", MOCK_DUPLICATE) def test_post_dup(self): resp = self.app.post( '/v2/workflows', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(409, resp.status_int) @mock.patch.object(db_api, "create_workflow_definition") def test_post_multiple(self, mock_mtd): spec_mock = mock_mtd.return_value.get.return_value spec_mock.get.return_value = {} self.app.post( '/v2/workflows', WFS_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(2, mock_mtd.call_count) mock_mtd.assert_any_call(FIRST_WF) mock_mtd.assert_any_call(SECOND_WF) def test_post_invalid(self): resp = self.app.post( '/v2/workflows', WF_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Invalid DSL", resp.body.decode()) def test_post_invalid_skip_validation(self): self.override_config('validation_mode', 'enabled', 'api') resp = self.app.post( '/v2/workflows?skip_validation', WF_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(201, resp.status_int) @mock.patch.object(db_api, "delete_workflow_definition", MOCK_DELETE) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) def test_delete(self): resp = self.app.delete('/v2/workflows/123') self.assertEqual(204, resp.status_int) @mock.patch( "mistral.db.v2.sqlalchemy.api.get_workflow_definition", return_value=WF_DB_SYSTEM ) def test_delete_system(self, get_mock): resp = self.app.delete('/v2/workflows/123', expect_errors=True) self.assertEqual(400, resp.status_int) self.assertIn( "Can not modify a system", resp.body.decode() ) @mock.patch.object(db_api, "delete_workflow_definition", MOCK_NOT_FOUND) def test_delete_not_found(self): resp = self.app.delete('/v2/workflows/123', expect_errors=True) self.assertEqual(404, resp.status_int) @mock.patch.object(db_api, "get_workflow_definitions", MOCK_WFS) def test_get_all(self): resp = self.app.get('/v2/workflows') self.assertEqual(200, resp.status_int) resp_json = resp.json['workflows'][0] resp_json['interface'] = json.loads(resp_json['interface']) self.assertEqual(1, len(resp.json['workflows'])) self.assertDictEqual(WF, resp_json) @mock.patch.object(db_api, 'get_workflow_definitions') def test_get_all_operational_error(self, mocked_get_all): mocked_get_all.side_effect = [ # Emulating DB OperationalError sa.exc.OperationalError('Mock', 'mock', 'mock'), [WF_DB] # Successful run ] resp = self.app.get('/v2/workflows') resp_workflow_json = resp.json['workflows'][0] resp_workflow_json['interface'] = \ json.loads(resp_workflow_json['interface']) self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['workflows'])) self.assertDictEqual(WF, resp_workflow_json) @mock.patch.object(db_api, "get_workflow_definitions", MOCK_EMPTY) def test_get_all_empty(self): resp = self.app.get('/v2/workflows') self.assertEqual(200, resp.status_int) self.assertEqual(0, len(resp.json['workflows'])) @mock.patch('mistral.db.v2.api.get_workflow_definitions') @mock.patch('mistral.context.MistralContext.from_environ') def test_get_all_projects_admin(self, mock_context, mock_get_wf_defs): admin_ctx = unit_base.get_context(admin=True) mock_context.return_value = admin_ctx resp = self.app.get('/v2/workflows?all_projects=true') self.assertEqual(200, resp.status_int) self.assertTrue(mock_get_wf_defs.call_args[1].get('insecure', False)) def test_get_all_projects_normal_user(self): resp = self.app.get( '/v2/workflows?all_projects=true', expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "get_workflow_definitions", MOCK_WFS) def test_get_all_pagination(self): resp = self.app.get( '/v2/workflows?limit=1&sort_keys=id,name') self.assertEqual(200, resp.status_int) self.assertIn('next', resp.json) resp_workflow_json = resp.json['workflows'][0] resp_workflow_json['interface'] = \ json.loads(resp_workflow_json['interface']) self.assertEqual(1, len(resp.json['workflows'])) self.assertDictEqual(WF, resp_workflow_json) param_dict = utils.get_dict_from_string( resp.json['next'].split('?')[1], delimiter='&' ) expected_dict = { 'marker': '123e4567-e89b-12d3-a456-426655440000', 'limit': 1, 'sort_keys': 'id,name', 'sort_dirs': 'asc,asc', } self.assertDictEqual(expected_dict, param_dict) def test_get_all_pagination_limit_negative(self): resp = self.app.get( '/v2/workflows?limit=-1&sort_keys=id,name&sort_dirs=asc,asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Limit must be positive", resp.body.decode()) def test_get_all_pagination_limit_not_integer(self): resp = self.app.get( '/v2/workflows?limit=1.1&sort_keys=id,name&sort_dirs=asc,asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("unable to convert to int", resp.body.decode()) def test_get_all_pagination_invalid_sort_dirs_length(self): resp = self.app.get( '/v2/workflows?limit=1&sort_keys=id,name&sort_dirs=asc,asc,asc', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn( "Length of sort_keys must be equal or greater than sort_dirs", resp.body.decode() ) def test_get_all_pagination_unknown_direction(self): resp = self.app.get( '/v2/workflows?limit=1&sort_keys=id&sort_dirs=nonexist', expect_errors=True ) self.assertEqual(400, resp.status_int) self.assertIn("Unknown sort direction", resp.body.decode()) @mock.patch('mistral.db.v2.api.get_workflow_definitions') def test_get_all_with_fields_filter(self, mock_get_db_wfs): mock_get_db_wfs.return_value = [ ('123e4567-e89b-12d3-a456-426655440000', 'fake_name') ] resp = self.app.get('/v2/workflows?fields=name') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['workflows'])) expected_dict = { 'id': '123e4567-e89b-12d3-a456-426655440000', 'name': 'fake_name' } self.assertDictEqual(expected_dict, resp.json['workflows'][0]) @mock.patch('mistral.db.v2.api.get_workflow_definitions') def test_get_all_with_fields_input_filter(self, mock_get_db_wfs): expected_dict = { 'id': '65df1f59-938f-4c17-bc2a-562524ef5e40', 'input': 'param1, param2="2"', 'interface': { "output": [], "input": ["param1", {"param2": 2} ] } } def mock_get_defintions(fields=None, session=None, **kwargs): if fields and 'input' in fields: fields.remove('input') fields.append('spec') return [ ('65df1f59-938f-4c17-bc2a-562524ef5e40', {'input': ['param1', {'param2': 2}]}) ] mock_get_db_wfs.side_effect = mock_get_defintions resp = self.app.get('/v2/workflows?fields=input') self.assertEqual(200, resp.status_int) self.assertEqual(1, len(resp.json['workflows'])) resp_workflow_json = resp.json['workflows'][0] resp_workflow_json['interface'] = \ json.loads(resp_workflow_json['interface']) self.assertDictEqual(expected_dict, resp_workflow_json) def test_get_all_with_invalid_field(self): resp = self.app.get( '/v2/workflows?fields=name,nonexist', expect_errors=True ) self.assertEqual(400, resp.status_int) response_msg = resp.body.decode() self.assertIn("nonexist", response_msg) self.assertIn("do not exist", response_msg) def test_validate(self): resp = self.app.post( '/v2/workflows/validate', WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) self.assertTrue(resp.json['valid']) def test_validate_invalid_model_exception(self): resp = self.app.post( '/v2/workflows/validate', WF_DEF_INVALID_MODEL_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Invalid DSL", resp.json['error']) def test_validate_dsl_parse_exception(self): resp = self.app.post( '/v2/workflows/validate', WF_DEF_DSL_PARSE_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Definition could not be parsed", resp.json['error']) def test_validate_yaql_parse_exception(self): resp = self.app.post( '/v2/workflows/validate', WF_DEF_YAQL_PARSE_EXCEPTION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("unexpected '*' at position 1", resp.json['error']) def test_validate_empty(self): resp = self.app.post( '/v2/workflows/validate', '', headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) self.assertFalse(resp.json['valid']) self.assertIn("Invalid DSL", resp.json['error']) @mock.patch("mistral.services.workflows.update_workflows") @mock.patch.object(db_api, "create_workflow_definition") def test_workflow_within_namespace(self, mock_mtd, update_mock): mock_mtd.return_value = WF_DB_WITHIN_ABC_NAMESPACE namespace = 'abc' resp = self.app.post( '/v2/workflows?namespace=%s' % namespace, WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(201, resp.status_int) self.assertDictEqual({'workflows': [WF_WITH_NAMESPACE]}, resp.json) self.assertEqual(1, mock_mtd.call_count) spec = mock_mtd.call_args[0][0]['spec'] self.assertIsNotNone(spec) self.assertEqual(WF_DB.name, spec['name']) self.assertEqual(WF_DB_WITHIN_ABC_NAMESPACE.namespace, namespace) update_mock.return_value = [WF_DB_WITHIN_ABC_NAMESPACE] id_ = '234560fe-162a-4060-a16a-a0d9eee9b408' resp = self.app.put( '/v2/workflows/%s?namespace=%s' % (id_, namespace), WF_DEFINITION, headers={'Content-Type': 'text/plain'} ) self.assertEqual(200, resp.status_int) update_mock.assert_called_once_with( WF_DEFINITION, scope='private', identifier=id_, namespace='abc', validate=True ) self.assertDictEqual(WF_WITH_NAMESPACE, resp.json) @mock.patch.object(db_api, "get_workflow_definition") def test_workflow_within_project_id(self, mock_get): mock_get.return_value = WF_DB_PROJECT_ID resp = self.app.get( '/v2/workflows/123e4567-e89b-12d3-a456-426655440000') self.assertEqual(200, resp.status_int) self.assertTrue('project_id' in resp.json) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/base.py0000644000175000017500000002437400000000000021205 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import pkg_resources as pkg import sys import time import mock from oslo_config import cfg from oslo_log import log as logging from oslotest import base import testtools.matchers as ttm from mistral import context as auth_context from mistral.db.sqlalchemy import base as db_sa_base from mistral.db.sqlalchemy import sqlite_lock from mistral.db.v2 import api as db_api from mistral.lang import parser as spec_parser from mistral.services import action_manager from mistral.services import security from mistral.tests.unit import config as test_config from mistral import version from mistral_lib.utils import inspect_utils as i_utils RESOURCES_PATH = 'tests/resources/' LOG = logging.getLogger(__name__) test_config.parse_args() def get_resource(resource_name): return open(pkg.resource_filename( version.version_info.package, RESOURCES_PATH + resource_name)).read() def get_context(default=True, admin=False): if default: return auth_context.MistralContext.from_dict({ 'user_name': 'test-user', 'user': '1-2-3-4', 'tenant': security.DEFAULT_PROJECT_ID, 'project_name': 'test-project', 'is_admin': admin }) else: return auth_context.MistralContext.from_dict({ 'user_name': 'test-user', 'user': '9-0-44-5', 'tenant': '99-88-33', 'project_name': 'test-another', 'is_admin': admin }) def register_action_class(name, cls, attributes=None, desc=None): action_manager.register_action_class( name, '%s.%s' % (cls.__module__, cls.__name__), attributes or {}, input_str=i_utils.get_arg_list_as_str(cls.__init__) ) class FakeHTTPResponse(object): def __init__(self, text, status_code, reason=None, headers=None, history=None, encoding='utf-8', url='', cookies=None, elapsed=None): self.text = text self.content = text self.status_code = status_code self.reason = reason self.headers = headers or {} self.history = history self.encoding = encoding self.url = url self.cookies = cookies or {} self.elapsed = elapsed or datetime.timedelta(milliseconds=123) def json(self, **kwargs): return json.loads(self.text, **kwargs) class BaseTest(base.BaseTestCase): def setUp(self): super(BaseTest, self).setUp() self.addCleanup(spec_parser.clear_caches) def register_action_class(self, name, cls, attributes=None, desc=None): # Added for convenience (to avoid unnecessary imports). register_action_class(name, cls, attributes, desc) def assertRaisesWithMessage(self, exception, msg, func, *args, **kwargs): try: func(*args, **kwargs) self.assertFail() except exception as e: self.assertEqual(msg, e.message) def assertRaisesWithMessageContaining(self, exception, msg, func, *args, **kwargs): try: func(*args, **kwargs) self.assertFail() except exception as e: self.assertIn(msg, e.message) def assertListEqual(self, l1, l2): if tuple(sys.version_info)[0:2] < (2, 7): # for python 2.6 compatibility self.assertEqual(l1, l2) else: super(BaseTest, self).assertListEqual(l1, l2) def assertDictEqual(self, cmp1, cmp2): if tuple(sys.version_info)[0:2] < (2, 7): # for python 2.6 compatibility self.assertThat(cmp1, ttm.Equals(cmp2)) else: super(BaseTest, self).assertDictEqual(cmp1, cmp2) def _assert_single_item(self, items, **props): return self._assert_multiple_items(items, 1, **props)[0] def _assert_no_item(self, items, **props): self._assert_multiple_items(items, 0, **props) def _assert_multiple_items(self, items, count, **props): def _matches(item, **props): for prop_name, prop_val in props.items(): v = item[prop_name] if isinstance( item, dict) else getattr(item, prop_name) if v != prop_val: return False return True filtered_items = list( [item for item in items if _matches(item, **props)] ) found = len(filtered_items) if found != count: LOG.info("[failed test ctx] items=%s, expected_props=%s", str( items), props) self.fail("Wrong number of items found [props=%s, " "expected=%s, found=%s]" % (props, count, found)) return filtered_items def _assert_dict_contains_subset(self, expected, actual, msg=None): """Checks whether actual is a superset of expected. Note: This is almost the exact copy of the standard method assertDictContainsSubset() that appeared in Python 2.7, it was added to use it with Python 2.6. """ missing = [] mismatched = [] for key, value in expected.items(): if key not in actual: missing.append(key) elif value != actual[key]: mismatched.append('%s, expected: %s, actual: %s' % (key, value, actual[key])) if not (missing or mismatched): return standardMsg = '' if missing: standardMsg = 'Missing: %s' % ','.join(m for m in missing) if mismatched: if standardMsg: standardMsg += '; ' standardMsg += 'Mismatched values: %s' % ','.join(mismatched) self.fail(self._formatMessage(msg, standardMsg)) def _await(self, predicate, delay=1, timeout=60, fail_message="no detail", fail_message_formatter=lambda x: x): """Awaits for predicate function to evaluate to True. If within a configured timeout predicate function hasn't evaluated to True then an exception is raised. :param predicate: Predicate function. :param delay: Delay in seconds between predicate function calls. :param timeout: Maximum amount of time to wait for predication function to evaluate to True. :param fail_message: explains what was expected :param fail_message_formatter: lambda that formats the fail_message :return: """ end_time = time.time() + timeout while True: if predicate(): break if time.time() + delay > end_time: raise AssertionError( "Failed to wait for expected result: " + fail_message_formatter(fail_message) ) time.sleep(delay) def _sleep(self, seconds): time.sleep(seconds) def override_config(self, name, override, group=None): """Cleanly override CONF variables.""" cfg.CONF.set_override(name, override, group) self.addCleanup(cfg.CONF.clear_override, name, group) class DbTestCase(BaseTest): is_heavy_init_called = False @classmethod def __heavy_init(cls): """Method that runs heavy_init(). Make this method private to prevent extending this one. It runs heavy_init() only once. Note: setUpClass() can be used, but it magically is not invoked from child class in another module. """ if not cls.is_heavy_init_called: cls.heavy_init() cls.is_heavy_init_called = True @classmethod def heavy_init(cls): """Runs a long initialization. This method runs long initialization once by class and can be extended by child classes. """ # If using sqlite, change to memory. The default is file based. if cfg.CONF.database.connection.startswith('sqlite'): cfg.CONF.set_default('connection', 'sqlite://', group='database') cfg.CONF.set_default('max_overflow', -1, group='database') cfg.CONF.set_default('max_pool_size', 1000, group='database') db_api.setup_db() action_manager.sync_db() def _clean_db(self): db_api._ACTION_DEF_CACHE.clear() contexts = [ get_context(default=False), get_context(default=True) ] for ctx in contexts: auth_context.set_ctx(ctx) with mock.patch('mistral.services.security.get_project_id', new=mock.MagicMock(return_value=ctx.project_id)): with db_api.transaction(): db_api.delete_event_triggers() db_api.delete_cron_triggers() db_api.delete_workflow_executions() db_api.delete_task_executions() db_api.delete_action_executions() db_api.delete_workbooks() db_api.delete_workflow_definitions() db_api.delete_environments() db_api.delete_resource_members() db_api.delete_delayed_calls() db_api.delete_scheduled_jobs() sqlite_lock.cleanup() if not cfg.CONF.database.connection.startswith('sqlite'): db_sa_base.get_engine().dispose() def setUp(self): super(DbTestCase, self).setUp() self.__heavy_init() self.ctx = get_context() auth_context.set_ctx(self.ctx) self.addCleanup(auth_context.set_ctx, None) self.addCleanup(self._clean_db) def is_db_session_open(self): return db_sa_base._get_thread_local_session() is not None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/config.py0000644000175000017500000000174000000000000021530 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_config import cfg def parse_args(): # Look for .mistral.conf in the project directory by default. project_dir = '%s/../../..' % os.path.dirname(__file__) config_file = '%s/.mistral.conf' % os.path.realpath(project_dir) config_files = [config_file] if os.path.isfile(config_file) else None cfg.CONF(args=[], default_config_files=config_files) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1375678 mistral-10.0.0.0b3/mistral/tests/unit/db/0000755000175000017500000000000000000000000020274 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/db/__init__.py0000644000175000017500000000000000000000000022373 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1375678 mistral-10.0.0.0b3/mistral/tests/unit/db/v2/0000755000175000017500000000000000000000000020623 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/db/v2/__init__.py0000644000175000017500000000000000000000000022722 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/db/v2/test_db_model.py0000644000175000017500000000504700000000000024007 0ustar00coreycorey00000000000000# Copyright 2017 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime from mistral.db.v2.sqlalchemy import api as db_api from mistral.tests.unit import base as test_base from mistral_lib import utils WF_EXEC = { 'id': 'c0f3be41-88b9-4c86-a669-83e77cd0a1b8', 'spec': {}, 'params': {'task': 'my_task1'}, 'project_id': '', 'scope': 'PUBLIC', 'state': 'IDLE', 'state_info': "Running...", 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'updated_at': None, 'context': None, 'task_execution_id': None, 'description': None, 'output': None, 'accepted': False, 'some_invalid_field': "foobar" } class DBModelTest(test_base.DbTestCase): def test_iterate_column_names(self): wf_ex = db_api.create_workflow_execution(WF_EXEC) self.assertIsNotNone(wf_ex) c_names = [c_name for c_name in wf_ex.iter_column_names()] expected = set(WF_EXEC.keys()) expected.remove('some_invalid_field') self.assertEqual(expected, set(c_names)) def test_iterate_columns(self): wf_ex = db_api.create_workflow_execution(WF_EXEC) self.assertIsNotNone(wf_ex) values = {c_name: c_val for c_name, c_val in wf_ex.iter_columns()} expected = copy.copy(WF_EXEC) del expected['some_invalid_field'] self.assertDictEqual(expected, values) def test_to_dict(self): wf_ex = db_api.create_workflow_execution(WF_EXEC) self.assertIsNotNone(wf_ex) expected = copy.copy(WF_EXEC) del expected['some_invalid_field'] actual = wf_ex.to_dict() # The method to_dict() returns date as strings. So, we have to # check them separately. self.assertEqual( utils.datetime_to_str(expected['created_at']), actual['created_at'] ) # Now check the rest of the columns. del expected['created_at'] del actual['created_at'] self.assertDictEqual(expected, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/db/v2/test_locking.py0000644000175000017500000000666700000000000023701 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from oslo_config import cfg import random import testtools from mistral import context as auth_context from mistral.db.sqlalchemy import sqlite_lock from mistral.db.v2.sqlalchemy import api as db_api from mistral.db.v2.sqlalchemy import models as db_models from mistral.tests.unit import base as test_base WF_EXEC = { 'name': '1', 'spec': {}, 'start_params': {}, 'state': 'RUNNING', 'state_info': "Running...", 'created_at': None, 'updated_at': None, 'context': None, 'task_id': None, 'trust_id': None } @testtools.skipIf( 'sqlite' not in cfg.CONF.database.connection, 'Not using SQLite for DB backend.') class SQLiteLocksTest(test_base.DbTestCase): def setUp(self): super(SQLiteLocksTest, self).setUp() cfg.CONF.set_default('auth_enable', True, group='pecan') self.addCleanup( cfg.CONF.set_default, 'auth_enable', False, group='pecan' ) def _random_sleep(self): eventlet.sleep(random.Random().randint(0, 10) * 0.001) def _run_acquire_release_sqlite_lock(self, obj_id, session): self._random_sleep() sqlite_lock.acquire_lock(obj_id, session) self._random_sleep() sqlite_lock.release_locks(session) def test_acquire_release_sqlite_lock(self): threads = [] id = "object_id" number = 500 for i in range(1, number): threads.append( eventlet.spawn(self._run_acquire_release_sqlite_lock, id, i) ) [t.wait() for t in threads] [t.kill() for t in threads] self.assertEqual(1, len(sqlite_lock.get_locks())) sqlite_lock.cleanup() self.assertEqual(0, len(sqlite_lock.get_locks())) def _run_correct_locking(self, wf_ex): # Set context info for the thread. auth_context.set_ctx(test_base.get_context()) self._random_sleep() with db_api.transaction(): # Lock workflow execution and get the most up-to-date object. wf_ex = db_api.acquire_lock(db_models.WorkflowExecution, wf_ex.id) # Refresh the object. db_api.get_workflow_execution(wf_ex.id) wf_ex.name = str(int(wf_ex.name) + 1) return wf_ex.name def test_correct_locking(self): wf_ex = db_api.create_workflow_execution(WF_EXEC) threads = [] number = 500 for i in range(1, number): threads.append( eventlet.spawn(self._run_correct_locking, wf_ex) ) [t.wait() for t in threads] [t.kill() for t in threads] wf_ex = db_api.get_workflow_execution(wf_ex.id) print("Correct locking test gave object name: %s" % wf_ex.name) self.assertEqual(str(number), wf_ex.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/db/v2/test_sqlalchemy_db_api.py0000644000175000017500000033710200000000000025702 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(rakhmerov): Add checks for timestamps. import copy import datetime import time from oslo_config import cfg from mistral import context as auth_context from mistral.db.v2.sqlalchemy import api as db_api from mistral.db.v2.sqlalchemy import models as db_models from mistral import exceptions as exc from mistral.services import security from mistral.tests.unit import base as test_base from mistral.utils import filter_utils from mistral_lib import utils DEFAULT_CTX = test_base.get_context() USER_CTX = test_base.get_context(default=False) ADM_CTX = test_base.get_context(default=False, admin=True) WORKBOOKS = [ { 'name': 'my_workbook1', 'namespace': 'test', 'definition': 'empty', 'spec': {}, 'tags': ['mc'], 'scope': 'public', 'updated_at': None, 'project_id': '1233', 'trust_id': '1234', 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0) }, { 'name': 'my_workbook2', 'namespace': 'test', 'description': 'my description', 'definition': 'empty', 'spec': {}, 'tags': ['mc', 'hammer'], 'scope': 'private', 'updated_at': None, 'project_id': '1233', 'trust_id': '12345', 'created_at': datetime.datetime(2016, 12, 1, 15, 1, 0) }, { 'name': 'my_workbook3', 'namespace': '', 'description': 'my description', 'definition': 'empty', 'spec': {}, 'tags': ['nonamespace'], 'scope': 'private', 'updated_at': None, 'project_id': '1233', 'trust_id': '12345', 'created_at': datetime.datetime(2018, 7, 1, 15, 1, 0) } ] class SQLAlchemyTest(test_base.DbTestCase): def setUp(self): super(SQLAlchemyTest, self).setUp() cfg.CONF.set_default('auth_enable', True, group='pecan') self.addCleanup(cfg.CONF.set_default, 'auth_enable', False, group='pecan') class WorkbookTest(SQLAlchemyTest): def test_create_and_get_and_load_workbook(self): created = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbook(created['name'], created['namespace']) self.assertEqual(created, fetched) fetched = db_api.load_workbook(created.name, created.namespace) self.assertEqual(created, fetched) self.assertIsNone(db_api.load_workbook("not-existing-wb")) def test_create_and_get_and_load_workbook_with_default_namespace(self): created = db_api.create_workbook(WORKBOOKS[2]) fetched = db_api.get_workbook(created['name']) self.assertEqual(created, fetched) fetched = db_api.load_workbook(created.name) self.assertEqual(created, fetched) def test_get_workbook_with_fields(self): with db_api.transaction(): created = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbook( created['name'], namespace=created['namespace'], fields=(db_models.Workbook.scope,) ) self.assertNotEqual(created, fetched) self.assertIsInstance(fetched, tuple) self.assertEqual(1, len(fetched)) self.assertEqual(created.scope, fetched[0]) def test_create_workbook_duplicate_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') db_api.create_workbook(WORKBOOKS[0]) self.assertRaisesWithMessage( exc.DBDuplicateEntryError, "Duplicate entry for WorkbookDefinition " "['name', 'namespace', 'project_id']:" " my_workbook1, test, ", db_api.create_workbook, WORKBOOKS[0] ) def test_update_workbook(self): created = db_api.create_workbook(WORKBOOKS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_workbook( created.name, { 'definition': 'my new definition', 'namespace': 'test' } ) self.assertEqual('my new definition', updated.definition) fetched = db_api.get_workbook( created['name'], namespace=created['namespace'] ) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_create_or_update_workbook(self): name = WORKBOOKS[0]['name'] namespace = WORKBOOKS[0]['namespace'] self.assertIsNone(db_api.load_workbook(name, namespace=namespace)) created = db_api.create_or_update_workbook( name, WORKBOOKS[0] ) self.assertIsNotNone(created) self.assertIsNotNone(created.name) updated = db_api.create_or_update_workbook( created.name, { 'definition': 'my new definition', 'namespace': 'test' } ) self.assertEqual('my new definition', updated.definition) self.assertEqual( 'my new definition', db_api.load_workbook(updated.name, updated.namespace).definition ) fetched = db_api.get_workbook(created.name, created.namespace) self.assertEqual(updated, fetched) def test_get_workbooks(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) fetched = db_api.get_workbooks() self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_workbooks_by_equal_value(self): db_api.create_workbook(WORKBOOKS[0]) created = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'name', created.name, 'eq' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) def test_filter_workbooks_by_not_equal_value(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'name', created0.name, 'neq' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workbooks_by_greater_than_value(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gt' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workbooks_by_greater_than_equal_value(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gte' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_workbooks_by_less_than_value(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lt' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_workbooks_by_less_than_equal_value(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lte' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_workbooks_by_values_in_list(self): created0 = db_api.create_workbook(WORKBOOKS[0]) db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at']], 'in' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_workbooks_by_values_notin_list(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at']], 'nin' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workbooks_by_multiple_columns(self): created0 = db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at'], created1['created_at']], 'in' ) _filter = filter_utils.create_or_update_filter( 'name', 'my_workbook2', 'eq', _filter ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workbooks_by_single_tags(self): db_api.create_workbook(WORKBOOKS[0]) db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'tags', "mc", 'eq' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(2, len(fetched)) def test_filter_workbooks_by_multiple_tags(self): db_api.create_workbook(WORKBOOKS[0]) created1 = db_api.create_workbook(WORKBOOKS[1]) _filter = filter_utils.create_or_update_filter( 'tags', "mc,hammer", 'eq' ) fetched = db_api.get_workbooks(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_delete_workbook(self): created = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbook(created.name, created.namespace) self.assertEqual(created, fetched) db_api.delete_workbook(created.name, created.namespace) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workbook, created.name, created.namespace ) def test_workbooks_in_two_projects(self): created = db_api.create_workbook(WORKBOOKS[1]) fetched = db_api.get_workbooks() self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) # Create a new user. auth_context.set_ctx(USER_CTX) created = db_api.create_workbook(WORKBOOKS[1]) fetched = db_api.get_workbooks() self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) def test_workbook_private(self): # Create a workbook(scope=private) as under one project # then make sure it's NOT visible for other projects. created1 = db_api.create_workbook(WORKBOOKS[1]) fetched = db_api.get_workbooks() self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) # Create a new user. auth_context.set_ctx(USER_CTX) fetched = db_api.get_workbooks() self.assertEqual(0, len(fetched)) def test_workbook_public(self): # Create a workbook(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbooks() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) # Assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0.project_id, auth_context.ctx().project_id) self.assertNotEqual(WORKBOOKS[0]['project_id'], auth_context.ctx().project_id) # Create a new user. auth_context.set_ctx(USER_CTX) fetched = db_api.get_workbooks() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) self.assertEqual('public', created0.scope) def test_workbook_repr(self): s = db_api.create_workbook(WORKBOOKS[0]).__repr__() self.assertIn('Workbook ', s) self.assertIn("'name': 'my_workbook1'", s) WF_DEFINITIONS = [ { 'name': 'my_wf1', 'definition': 'empty', 'spec': {}, 'tags': ['mc'], 'scope': 'public', 'project_id': '1233', 'trust_id': '1234', 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'namespace': '' }, { 'name': 'my_wf2', 'definition': 'empty', 'spec': {}, 'tags': ['mc'], 'scope': 'private', 'project_id': '1233', 'trust_id': '12345', 'created_at': datetime.datetime(2016, 12, 1, 15, 1, 0), 'namespace': '' }, { 'name': 'my_wf3', 'definition': 'empty', 'spec': {}, 'tags': ['mc'], 'scope': 'private', 'project_id': '1233', 'trust_id': '12345', 'created_at': datetime.datetime(2016, 12, 1, 15, 1, 0), 'namespace': 'mynamespace' }, { 'name': 'my_wf1_with_namespace', 'definition': 'empty', 'spec': {}, 'tags': ['mc'], 'scope': 'public', 'project_id': '1233', 'trust_id': '1234', 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'namespace': 'abc' }, { 'name': 'my_wf1_with_namespace', 'definition': 'empty', 'spec': {}, 'tags': ['mc'], 'scope': 'public', 'project_id': '1233', 'trust_id': '1234', 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'namespace': 'def' }, ] CRON_TRIGGER = { 'name': 'trigger1', 'pattern': '* * * * *', 'workflow_name': 'my_wf1', 'workflow_id': None, 'workflow_input': {}, 'next_execution_time': utils.utc_now_sec() + datetime.timedelta(days=1), 'remaining_executions': 42, 'scope': 'private', 'project_id': '' } class WorkflowDefinitionTest(SQLAlchemyTest): def test_create_and_get_and_load_workflow_definition(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definition(created.name) self.assertEqual(created, fetched) fetched = db_api.load_workflow_definition(created.name) self.assertEqual(created, fetched) self.assertIsNone(db_api.load_workflow_definition("not-existing-wf")) def test_get_workflow_definition_with_fields(self): with db_api.transaction(): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definition( created.name, fields=(db_models.WorkflowDefinition.scope,) ) self.assertNotEqual(created, fetched) self.assertIsInstance(fetched, tuple) self.assertEqual(1, len(fetched)) self.assertEqual(created.scope, fetched[0]) def test_get_workflow_definition_with_uuid(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definition(created.id) self.assertEqual(created, fetched) def test_get_workflow_definition_by_admin(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to admin project. auth_context.set_ctx(test_base.get_context(default=False, admin=True)) fetched = db_api.get_workflow_definition(created.id) self.assertEqual(created, fetched) def test_filter_workflow_definitions_by_equal_value(self): db_api.create_workbook(WF_DEFINITIONS[0]) created = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'name', created.name, 'eq' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) def test_filter_workflow_definition_by_not_equal_value(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'name', created0.name, 'neq' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workflow_definition_by_greater_than_value(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gt' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workflow_definition_by_greater_than_equal_value(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gte' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_workflow_definition_by_less_than_value(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lt' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_workflow_definition_by_less_than_equal_value(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lte' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_workflow_definition_by_values_in_list(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at']], 'in' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_workflow_definition_by_values_notin_list(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at']], 'nin' ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workflow_definition_by_multiple_columns(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at'], created1['created_at']], 'in' ) _filter = filter_utils.create_or_update_filter( 'name', 'my_wf2', 'eq', _filter ) fetched = db_api.get_workflow_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_create_workflow_definition_duplicate_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') db_api.create_workflow_definition(WF_DEFINITIONS[0]) self.assertRaisesWithMessage( exc.DBDuplicateEntryError, "Duplicate entry for WorkflowDefinition ['name', 'namespace'," " 'project_id']: my_wf1, , ", db_api.create_workflow_definition, WF_DEFINITIONS[0] ) def test_create_workflow_definition_duplicate_namespace_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') db_api.create_workflow_definition(WF_DEFINITIONS[2]) self.assertRaisesWithMessage( exc.DBDuplicateEntryError, "Duplicate entry for WorkflowDefinition ['name', 'namespace'," " 'project_id']: my_wf3, mynamespace, ", db_api.create_workflow_definition, WF_DEFINITIONS[2] ) def test_create_same_workflow_definition_in_different_namespace(self): name = WF_DEFINITIONS[3]['name'] namespace1 = WF_DEFINITIONS[3]['namespace'] namespace2 = WF_DEFINITIONS[4]['namespace'] self.assertIsNone(db_api.load_workflow_definition(name, namespace1)) self.assertIsNone(db_api.load_workflow_definition(name, namespace2)) created1 = db_api.create_workflow_definition( WF_DEFINITIONS[3] ) created2 = db_api.create_workflow_definition( WF_DEFINITIONS[4] ) self.assertIsNotNone(created1) self.assertIsNotNone(created2) self.assertIsNotNone(created1.name) self.assertIsNotNone(created2.name) self.assertIsNotNone(created1.namespace) self.assertIsNotNone(created2.namespace) fetched1 = db_api.get_workflow_definition(created1.name, created1.namespace) fetched2 = db_api.get_workflow_definition(created2.name, created2.namespace) self.assertEqual(created1, fetched1) self.assertEqual(created2, fetched2) def test_update_workflow_definition(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) self.assertIsNone(created.updated_at) # Update workflow using workflow name as identifier. updated = db_api.update_workflow_definition( created['name'], {'definition': 'my new definition', 'scope': 'private'} ) self.assertEqual('my new definition', updated.definition) fetched = db_api.get_workflow_definition(created.name) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) # Update workflow using workflow uuid as identifier. updated = db_api.update_workflow_definition( created['id'], { 'name': 'updated_name', 'definition': 'my new definition', 'scope': 'private' } ) self.assertEqual('updated_name', updated.name) self.assertEqual('my new definition', updated.definition) fetched = db_api.get_workflow_definition(created['id']) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_update_other_project_workflow_definition(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to another project. auth_context.set_ctx(USER_CTX) self.assertRaises( exc.NotAllowedException, db_api.update_workflow_definition, created.name, {'definition': 'my new definition', 'scope': 'private'} ) def test_update_other_project_workflow_by_admin(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[1]) # Switch to admin. auth_context.set_ctx(ADM_CTX) updated = db_api.update_workflow_definition( created['id'], { 'definition': 'my new definition', 'scope': 'public', } ) self.assertEqual('my new definition', updated.definition) # Switch back. auth_context.set_ctx(DEFAULT_CTX) fetched = db_api.get_workflow_definition(created['id']) self.assertEqual(updated, fetched) def test_update_system_workflow_by_admin(self): system_workflow = copy.deepcopy(WF_DEFINITIONS[0]) system_workflow['is_system'] = True created = db_api.create_workflow_definition(system_workflow) # Switch to admin. auth_context.set_ctx(ADM_CTX) updated = db_api.update_workflow_definition( created['id'], { 'definition': 'my new definition', 'scope': 'public' } ) self.assertEqual('my new definition', updated.definition) def test_create_or_update_workflow_definition(self): name = WF_DEFINITIONS[0]['name'] self.assertIsNone(db_api.load_workflow_definition(name)) created = db_api.create_or_update_workflow_definition( name, WF_DEFINITIONS[0] ) self.assertIsNotNone(created) self.assertIsNotNone(created.name) updated = db_api.create_or_update_workflow_definition( created.name, {'definition': 'my new definition', 'scope': 'private'} ) self.assertEqual('my new definition', updated.definition) self.assertEqual( 'my new definition', db_api.load_workflow_definition(updated.name).definition ) fetched = db_api.get_workflow_definition(created.name) self.assertEqual(updated, fetched) def test_update_wf_scope_cron_trigger_associated_in_diff_tenant(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Create a new user. auth_context.set_ctx(USER_CTX) cron_trigger = copy.copy(CRON_TRIGGER) cron_trigger['workflow_id'] = created.id db_api.create_cron_trigger(cron_trigger) auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.NotAllowedException, db_api.update_workflow_definition, created['name'], {'scope': 'private'} ) def test_update_wf_scope_event_trigger_associated_in_diff_tenant(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to another user. auth_context.set_ctx(USER_CTX) event_trigger = copy.copy(EVENT_TRIGGERS[0]) event_trigger.update({'workflow_id': created.id}) db_api.create_event_trigger(event_trigger) # Switch back. auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.NotAllowedException, db_api.update_workflow_definition, created.id, {'scope': 'private'} ) def test_update_wf_scope_event_trigger_associated_in_same_tenant(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) event_trigger = copy.copy(EVENT_TRIGGERS[0]) event_trigger.update({'workflow_id': created.id}) db_api.create_event_trigger(event_trigger) updated = db_api.update_workflow_definition( created.id, {'scope': 'private'} ) self.assertEqual('private', updated.scope) def test_update_wf_scope_cron_trigger_associated_in_same_tenant(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) cron_trigger = copy.copy(CRON_TRIGGER) cron_trigger.update({'workflow_id': created.id}) db_api.create_cron_trigger(cron_trigger) updated = db_api.update_workflow_definition( created['name'], {'scope': 'private'} ) self.assertEqual('private', updated.scope) def test_get_workflow_definitions(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) fetched0 = db_api.load_workflow_definition(created0.name) fetched1 = db_api.load_workflow_definition(created1.name) self.assertEqual(security.get_project_id(), fetched0.project_id) self.assertEqual(security.get_project_id(), fetched1.project_id) fetched = db_api.get_workflow_definitions() self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_delete_workflow_definition(self): created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) fetched0 = db_api.get_workflow_definition(created0.name) fetched1 = db_api.get_workflow_definition(created1.id) self.assertEqual(created0, fetched0) self.assertEqual(created1, fetched1) for identifier in [created0.name, created1.id]: db_api.delete_workflow_definition(identifier) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workflow_definition, identifier ) def test_delete_workflow_definition_has_event_trigger(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[1]) event_trigger = copy.copy(EVENT_TRIGGERS[0]) event_trigger['workflow_id'] = created.id trigger = db_api.create_event_trigger(event_trigger) self.assertEqual(trigger.workflow_id, created.id) self.assertRaises( exc.DBError, db_api.delete_workflow_definition, created.id ) def test_delete_other_project_workflow_definition(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to another project. auth_context.set_ctx(USER_CTX) self.assertRaises( exc.NotAllowedException, db_api.delete_workflow_definition, created.name ) def test_delete_other_project_workflow_definition_by_admin(self): created = db_api.create_workflow_definition(WF_DEFINITIONS[0]) # Switch to admin. auth_context.set_ctx(ADM_CTX) db_api.delete_workflow_definition(created['id']) # Switch back. auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workflow_definition, created['id'] ) def test_workflow_definition_private(self): # Create a workflow(scope=private) as under one project # then make sure it's NOT visible for other projects. created1 = db_api.create_workflow_definition(WF_DEFINITIONS[1]) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) # Create a new user. auth_context.set_ctx(USER_CTX) fetched = db_api.get_workflow_definitions() self.assertEqual(0, len(fetched)) def test_workflow_definition_public(self): # Create a workflow(scope=public) as under one project # then make sure it's visible for other projects. created0 = db_api.create_workflow_definition(WF_DEFINITIONS[0]) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) # Assert that the project_id stored is actually the context's # project_id not the one given. self.assertEqual(created0.project_id, auth_context.ctx().project_id) self.assertNotEqual( WF_DEFINITIONS[0]['project_id'], auth_context.ctx().project_id ) # Create a new user. auth_context.set_ctx(USER_CTX) fetched = db_api.get_workflow_definitions() self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) self.assertEqual('public', fetched[0].scope) def test_workflow_definition_repr(self): s = db_api.create_workflow_definition(WF_DEFINITIONS[0]).__repr__() self.assertIn('WorkflowDefinition ', s) self.assertIn("'name': 'my_wf1'", s) ACTION_DEFINITIONS = [ { 'name': 'action1', 'description': 'Action #1', 'is_system': True, 'action_class': 'mypackage.my_module.Action1', 'attributes': None, 'project_id': '', 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'namespace': '' }, { 'name': 'action2', 'description': 'Action #2', 'is_system': True, 'action_class': 'mypackage.my_module.Action2', 'attributes': None, 'project_id': '', 'created_at': datetime.datetime(2016, 12, 1, 15, 1, 0), 'namespace': '' }, { 'name': 'action3', 'description': 'Action #3', 'is_system': False, 'tags': ['mc', 'abc'], 'action_class': 'mypackage.my_module.Action3', 'attributes': None, 'project_id': '', 'created_at': datetime.datetime(2016, 12, 1, 15, 2, 0), 'namespace': '' }, ] class ActionDefinitionTest(SQLAlchemyTest): def setUp(self): super(ActionDefinitionTest, self).setUp() db_api.delete_action_definitions() def test_create_and_get_and_load_action_definition(self): created = db_api.create_action_definition(ACTION_DEFINITIONS[0]) fetched = db_api.get_action_definition(created.name) self.assertEqual(created, fetched) fetched = db_api.load_action_definition(created.name) self.assertEqual(created, fetched) self.assertIsNone(db_api.load_action_definition("not-existing-id")) def test_get_action_definition_with_fields(self): with db_api.transaction(): created = db_api.create_action_definition(ACTION_DEFINITIONS[0]) fetched = db_api.get_action_definition( created.name, fields=(db_models.ActionDefinition.scope,) ) self.assertNotEqual(created, fetched) self.assertIsInstance(fetched, tuple) self.assertEqual(1, len(fetched)) self.assertEqual(created.scope, fetched[0]) def test_get_action_definition_with_uuid(self): created = db_api.create_action_definition(ACTION_DEFINITIONS[0]) fetched = db_api.get_action_definition(created.id) self.assertEqual(created, fetched) def test_create_action_definition_duplicate_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') db_api.create_action_definition(ACTION_DEFINITIONS[0]) self.assertRaisesWithMessage( exc.DBDuplicateEntryError, "Duplicate entry for Action ['name', 'namespace', 'project_id']:" " action1, , ", db_api.create_action_definition, ACTION_DEFINITIONS[0] ) def test_filter_action_definitions_by_equal_value(self): db_api.create_action_definition(ACTION_DEFINITIONS[0]) db_api.create_action_definition(ACTION_DEFINITIONS[1]) created2 = db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'is_system', False, 'eq' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created2, fetched[0]) def test_filter_action_definitions_by_not_equal_value(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'is_system', False, 'neq' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_action_definitions_by_greater_than_value(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) created2 = db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gt' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(2, len(fetched)) self.assertIn(created1, fetched) self.assertIn(created2, fetched) def test_filter_action_definitions_by_greater_than_equal_value(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) created2 = db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gte' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(3, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) self._assert_single_item(fetched, name=created2['name']) def test_filter_action_definitions_by_less_than_value(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) created2 = db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'created_at', created2['created_at'], 'lt' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_action_definitions_by_less_than_equal_value(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) created2 = db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'created_at', created2['created_at'], 'lte' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(3, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) self._assert_single_item(fetched, name=created2['name']) def test_filter_action_definitions_by_values_in_list(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at'], created1['created_at']], 'in' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_action_definitions_by_values_notin_list(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) created2 = db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at'], created1['created_at']], 'nin' ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(1, len(fetched)) self._assert_single_item(fetched, name=created2['name']) def test_filter_action_definitions_by_multiple_columns(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) db_api.create_action_definition(ACTION_DEFINITIONS[2]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at'], created1['created_at']], 'in' ) _filter = filter_utils.create_or_update_filter( 'is_system', True, 'neq', _filter ) fetched = db_api.get_action_definitions(**_filter) self.assertEqual(0, len(fetched)) def test_filter_action_definitions_by_has_filter(self): db_api.create_action_definition(ACTION_DEFINITIONS[0]) db_api.create_action_definition(ACTION_DEFINITIONS[1]) created3 = db_api.create_action_definition(ACTION_DEFINITIONS[2]) f = filter_utils.create_or_update_filter('name', "3", 'has') fetched = db_api.get_action_definitions(**f) self.assertEqual(1, len(fetched)) self.assertEqual(created3, fetched[0]) f = filter_utils.create_or_update_filter('name', "action", 'has') fetched = db_api.get_action_definitions(**f) self.assertEqual(3, len(fetched)) def test_update_action_definition_with_name(self): created = db_api.create_action_definition(ACTION_DEFINITIONS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_action_definition( created.name, {'description': 'my new desc'} ) self.assertEqual('my new desc', updated.description) fetched = db_api.get_action_definition(created.name) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_update_action_definition_with_uuid(self): created = db_api.create_action_definition(ACTION_DEFINITIONS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_action_definition( created.id, {'description': 'my new desc'} ) self.assertEqual('my new desc', updated.description) fetched = db_api.get_action_definition(created.id) self.assertEqual(updated, fetched) def test_create_or_update_action_definition(self): name = 'not-existing-id' self.assertIsNone(db_api.load_action_definition(name)) created = db_api.create_or_update_action_definition( name, ACTION_DEFINITIONS[0] ) self.assertIsNotNone(created) self.assertIsNotNone(created.name) updated = db_api.create_or_update_action_definition( created.name, {'description': 'my new desc'} ) self.assertEqual('my new desc', updated.description) self.assertEqual( 'my new desc', db_api.load_action_definition(updated.name).description ) fetched = db_api.get_action_definition(created.name) self.assertEqual(updated, fetched) def test_get_action_definitions(self): created0 = db_api.create_action_definition(ACTION_DEFINITIONS[0]) created1 = db_api.create_action_definition(ACTION_DEFINITIONS[1]) fetched = db_api.get_action_definitions(is_system=True) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_delete_action_definition_with_name(self): created = db_api.create_action_definition(ACTION_DEFINITIONS[0]) fetched = db_api.get_action_definition(created.name) self.assertEqual(created, fetched) db_api.delete_action_definition(created.name) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_action_definition, created.name ) def test_delete_action_definition_with_uuid(self): created = db_api.create_action_definition(ACTION_DEFINITIONS[0]) fetched = db_api.get_action_definition(created.id) self.assertEqual(created, fetched) db_api.delete_action_definition(created.id) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_action_definition, created.id ) def test_action_definition_repr(self): s = db_api.create_action_definition(ACTION_DEFINITIONS[0]).__repr__() self.assertIn('ActionDefinition ', s) self.assertIn("'description': 'Action #1'", s) self.assertIn("'name': 'action1'", s) ACTION_EXECS = [ { 'spec': None, 'state': 'IDLE', 'state_info': "Running...", 'created_at': None, 'updated_at': None, 'task_id': None, 'tags': [], 'accepted': True, 'output': {"result": "value"} }, { 'spec': None, 'state': 'ERROR', 'state_info': "Failed due to some reason...", 'created_at': None, 'updated_at': None, 'task_id': None, 'tags': ['deployment'], 'accepted': False, 'output': {"result": "value"} } ] class ActionExecutionTest(SQLAlchemyTest): def test_create_and_get_and_load_action_execution(self): with db_api.transaction(): created = db_api.create_action_execution(ACTION_EXECS[0]) fetched = db_api.get_action_execution(created.id) self.assertEqual(created, fetched) fetched = db_api.load_action_execution(created.id) self.assertEqual(created, fetched) self.assertIsNone(db_api.load_action_execution("not-existing-id")) def test_get_action_execution_with_fields(self): with db_api.transaction(): created = db_api.create_action_execution(ACTION_EXECS[0]) fetched = db_api.get_action_execution( created.id, fields=(db_models.ActionExecution.name,) ) self.assertNotEqual(created, fetched) self.assertIsInstance(fetched, tuple) self.assertEqual(1, len(fetched)) self.assertEqual(created.name, fetched[0]) def test_update_action_execution(self): with db_api.transaction(): created = db_api.create_action_execution(ACTION_EXECS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_action_execution( created.id, {'state': 'RUNNING', 'state_info': "Running..."} ) self.assertEqual('RUNNING', updated.state) self.assertEqual( 'RUNNING', db_api.load_action_execution(updated.id).state ) fetched = db_api.get_action_execution(created.id) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_create_or_update_action_execution(self): id = 'not-existing-id' self.assertIsNone(db_api.load_action_execution(id)) created = db_api.create_or_update_action_execution(id, ACTION_EXECS[0]) self.assertIsNotNone(created) self.assertIsNotNone(created.id) with db_api.transaction(): updated = db_api.create_or_update_action_execution( created.id, {'state': 'RUNNING'} ) self.assertEqual('RUNNING', updated.state) self.assertEqual( 'RUNNING', db_api.load_action_execution(updated.id).state ) fetched = db_api.get_action_execution(created.id) self.assertEqual(updated, fetched) def test_update_action_execution_heartbeat(self): with db_api.transaction(): created = db_api.create_action_execution(ACTION_EXECS[0]) created_last_heartbeat = created.last_heartbeat fetched = db_api.get_action_execution(created.id) fetched_last_heartbeat = fetched.last_heartbeat time.sleep(1) self.assertEqual(created_last_heartbeat, fetched_last_heartbeat) time.sleep(1) db_api.update_action_execution_heartbeat(created.id) fetched = db_api.get_action_execution(created.id) fetched_last_heartbeat = fetched.last_heartbeat self.assertIsNot(created_last_heartbeat, fetched_last_heartbeat) def test_get_action_executions(self): with db_api.transaction(): created0 = db_api.create_action_execution(WF_EXECS[0]) db_api.create_action_execution(ACTION_EXECS[1]) fetched = db_api.get_action_executions( state=WF_EXECS[0]['state'] ) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_delete_action_execution(self): with db_api.transaction(): created = db_api.create_action_execution(ACTION_EXECS[0]) fetched = db_api.get_action_execution(created.id) self.assertEqual(created, fetched) db_api.delete_action_execution(created.id) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_action_execution, created.id ) def test_delete_other_tenant_action_execution(self): created = db_api.create_action_execution(ACTION_EXECS[0]) # Create a new user. auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.delete_action_execution, created.id ) def test_trim_status_info(self): created = db_api.create_action_execution(ACTION_EXECS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_action_execution( created.id, {'state': 'FAILED', 'state_info': ".." * 65536} ) self.assertEqual('FAILED', updated.state) state_info = db_api.load_action_execution(updated.id).state_info self.assertEqual( 65503, len(state_info) ) def test_action_execution_repr(self): s = db_api.create_action_execution(ACTION_EXECS[0]).__repr__() self.assertIn('ActionExecution ', s) self.assertIn("'state': 'IDLE'", s) self.assertIn("'state_info': 'Running...'", s) self.assertIn("'accepted': True", s) WF_EXECS = [ { 'spec': {}, 'start_params': {'task': 'my_task1'}, 'state': 'IDLE', 'state_info': "Running...", 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'updated_at': None, 'context': None, 'task_id': None, 'trust_id': None, 'description': None, 'output': None }, { 'spec': {}, 'start_params': {'task': 'my_task1'}, 'state': 'RUNNING', 'state_info': "Running...", 'created_at': datetime.datetime(2016, 12, 1, 15, 1, 0), 'updated_at': None, 'context': {'image_id': '123123'}, 'task_id': None, 'trust_id': None, 'description': None, 'output': None } ] class WorkflowExecutionTest(SQLAlchemyTest): def test_create_and_get_and_load_workflow_execution(self): with db_api.transaction(): created = db_api.create_workflow_execution(WF_EXECS[0]) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(created, fetched) fetched = db_api.load_workflow_execution(created.id) self.assertEqual(created, fetched) self.assertIsNone( db_api.load_workflow_execution("not-existing-id") ) def test_get_workflow_execution_with_fields(self): with db_api.transaction(): created = db_api.create_workflow_execution(WF_EXECS[0]) fetched = db_api.get_workflow_execution( created.id, fields=(db_models.WorkflowExecution.state,) ) self.assertNotEqual(created, fetched) self.assertIsInstance(fetched, tuple) self.assertEqual(1, len(fetched)) self.assertEqual(created.state, fetched[0]) def test_update_workflow_execution(self): with db_api.transaction(): created = db_api.create_workflow_execution(WF_EXECS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_workflow_execution( created.id, {'state': 'RUNNING', 'state_info': "Running..."} ) self.assertEqual('RUNNING', updated.state) self.assertEqual( 'RUNNING', db_api.load_workflow_execution(updated.id).state ) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_update_workflow_execution_by_admin(self): with db_api.transaction(): created = db_api.create_workflow_execution(WF_EXECS[0]) auth_context.set_ctx(ADM_CTX) updated = db_api.update_workflow_execution( created.id, {'state': 'RUNNING', 'state_info': "Running..."} ) auth_context.set_ctx(DEFAULT_CTX) self.assertEqual('RUNNING', updated.state) self.assertEqual( 'RUNNING', db_api.load_workflow_execution(updated.id).state ) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_update_workflow_execution_by_others_fail(self): with db_api.transaction(): created = db_api.create_workflow_execution(WF_EXECS[0]) auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.update_workflow_execution, created.id, {'state': 'RUNNING', 'state_info': "Running..."} ) def test_create_or_update_workflow_execution(self): id = 'not-existing-id' self.assertIsNone(db_api.load_workflow_execution(id)) with db_api.transaction(): created = db_api.create_or_update_workflow_execution( id, WF_EXECS[0] ) self.assertIsNotNone(created) self.assertIsNotNone(created.id) updated = db_api.create_or_update_workflow_execution( created.id, {'state': 'RUNNING'} ) self.assertEqual('RUNNING', updated.state) self.assertEqual( 'RUNNING', db_api.load_workflow_execution(updated.id).state ) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(updated, fetched) def test_get_workflow_executions(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) db_api.create_workflow_execution(WF_EXECS[1]) fetched = db_api.get_workflow_executions( state=WF_EXECS[0]['state'] ) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_workflow_execution_by_equal_value(self): with db_api.transaction(): db_api.create_workflow_execution(WF_EXECS[0]) created = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'id', created.id, 'eq' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) def test_filter_workflow_execution_by_not_equal_value(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'id', created0.id, 'neq' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workflow_execution_by_greater_than_value(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gt' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workflow_execution_by_greater_than_equal_value(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gte' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, state=created0['state']) self._assert_single_item(fetched, state=created1['state']) def test_filter_workflow_execution_by_less_than_value(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lt' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_workflow_execution_by_less_than_equal_value(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lte' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, state=created0['state']) self._assert_single_item(fetched, state=created1['state']) def test_filter_workflow_execution_by_values_in_list(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at']], 'in' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_workflow_execution_by_values_notin_list(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at']], 'nin' ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_workflow_execution_by_multiple_columns(self): with db_api.transaction(): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at'], created1['created_at']], 'in' ) _filter = filter_utils.create_or_update_filter( 'id', created1.id, 'eq', _filter ) fetched = db_api.get_workflow_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_delete_workflow_execution(self): with db_api.transaction(): created = db_api.create_workflow_execution(WF_EXECS[0]) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(created, fetched) db_api.delete_workflow_execution(created.id) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workflow_execution, created.id ) def test_delete_workflow_execution_by_admin(self): with db_api.transaction(): created = db_api.create_workflow_execution(WF_EXECS[0]) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(created, fetched) auth_context.set_ctx(ADM_CTX) db_api.delete_workflow_execution(created.id) auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workflow_execution, created.id ) def test_delete_workflow_execution_by_other_fail(self): created = db_api.create_workflow_execution(WF_EXECS[0]) auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.delete_workflow_execution, created.id ) def test_trim_status_info(self): created = db_api.create_workflow_execution(WF_EXECS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_workflow_execution( created.id, {'state': 'FAILED', 'state_info': ".." * 65536} ) self.assertEqual('FAILED', updated.state) state_info = db_api.load_workflow_execution(updated.id).state_info self.assertEqual( 65503, len(state_info) ) def test_task_executions(self): # Add an associated object into collection. with db_api.transaction(): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) self.assertEqual(0, len(wf_ex.task_executions)) wf_ex.task_executions.append( db_models.TaskExecution(**TASK_EXECS[0]) ) # Make sure task execution has been saved. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] self.assertEqual(TASK_EXECS[0]['name'], task_ex.name) self.assertEqual(1, len(db_api.get_workflow_executions())) self.assertEqual(1, len(db_api.get_task_executions())) # Remove task execution from collection. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) del wf_ex.task_executions[:] # Make sure task execution has been removed. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(0, len(wf_ex.task_executions)) self.assertIsNone(db_api.load_task_execution(task_ex.id)) def test_workflow_execution_repr(self): s = db_api.create_workflow_execution(WF_EXECS[0]).__repr__() self.assertIn('Execution ', s) self.assertIn("'context': None", s) self.assertIn("'state': 'IDLE'", s) TASK_EXECS = [ { 'workflow_execution_id': '1', 'workflow_name': 'my_wb.my_wf', 'name': 'my_task1', 'spec': None, 'action_spec': None, 'state': 'IDLE', 'tags': ['deployment'], 'in_context': None, 'runtime_context': None, 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'updated_at': None }, { 'workflow_execution_id': '1', 'workflow_name': 'my_wb.my_wf', 'name': 'my_task2', 'spec': None, 'action_spec': None, 'state': 'IDLE', 'tags': ['deployment'], 'in_context': {'image_id': '123123'}, 'runtime_context': None, 'created_at': datetime.datetime(2016, 12, 1, 15, 1, 0), 'updated_at': None }, ] class TaskExecutionTest(SQLAlchemyTest): def test_create_and_get_and_load_task_execution(self): with db_api.transaction(): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) created = db_api.create_task_execution(values) fetched = db_api.get_task_execution(created.id) self.assertEqual(created, fetched) self.assertNotIsInstance(fetched.workflow_execution, list) fetched = db_api.load_task_execution(created.id) self.assertEqual(created, fetched) self.assertIsNone(db_api.load_task_execution("not-existing-id")) def test_get_task_execution_with_fields(self): with db_api.transaction(): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) created = db_api.create_task_execution(values) fetched = db_api.get_task_execution( created.id, fields=(db_models.TaskExecution.name,) ) self.assertNotEqual(created, fetched) self.assertIsInstance(fetched, tuple) self.assertEqual(1, len(fetched)) self.assertEqual(created.name, fetched[0]) def test_action_executions(self): # Store one task with two invocations. with db_api.transaction(): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) task = db_api.create_task_execution(values) self.assertEqual(0, len(task.action_executions)) self.assertEqual(0, len(task.workflow_executions)) a_ex1 = db_models.ActionExecution() a_ex2 = db_models.ActionExecution() task.action_executions.append(a_ex1) task.action_executions.append(a_ex2) self.assertEqual(2, len(task.action_executions)) self.assertEqual(0, len(task.workflow_executions)) # Make sure associated objects were saved. with db_api.transaction(): task = db_api.get_task_execution(task.id) self.assertEqual(2, len(task.action_executions)) self.assertNotIsInstance( task.action_executions[0].task_execution, list ) # Remove associated objects from collection. with db_api.transaction(): task = db_api.get_task_execution(task.id) del task.action_executions[:] # Make sure associated objects were deleted. with db_api.transaction(): task = db_api.get_task_execution(task.id) self.assertEqual(0, len(task.action_executions)) def test_update_task_execution(self): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) created = db_api.create_task_execution(values) self.assertIsNone(created.updated_at) with db_api.transaction(): updated = db_api.update_task_execution( created.id, {'workflow_name': 'new_wf'} ) self.assertEqual('new_wf', updated.workflow_name) fetched = db_api.get_task_execution(created.id) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_create_or_update_task_execution(self): id = 'not-existing-id' self.assertIsNone(db_api.load_task_execution(id)) wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) created = db_api.create_or_update_task_execution(id, values) self.assertIsNotNone(created) self.assertIsNotNone(created.id) with db_api.transaction(): updated = db_api.create_or_update_task_execution( created.id, {'state': 'RUNNING'} ) self.assertEqual('RUNNING', updated.state) self.assertEqual( 'RUNNING', db_api.load_task_execution(updated.id).state ) fetched = db_api.get_task_execution(created.id) self.assertEqual(updated, fetched) def test_get_task_executions(self): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) created0 = db_api.create_task_execution(values) values = copy.deepcopy(TASK_EXECS[1]) values.update({'workflow_execution_id': wf_ex.id}) created1 = db_api.create_task_execution(values) fetched = db_api.get_task_executions( workflow_name=TASK_EXECS[0]['workflow_name'] ) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_task_execution_by_equal_value(self): created, _ = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'name', created.name, 'eq' ) with db_api.transaction(): fetched = db_api.get_task_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) def test_filter_task_execution_by_not_equal_value(self): created0, created1 = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'name', created0.name, 'neq' ) with db_api.transaction(): fetched = db_api.get_task_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_task_execution_by_greater_than_value(self): created0, created1 = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gt' ) with db_api.transaction(): fetched = db_api.get_task_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_task_execution_by_greater_than_equal_value(self): created0, created1 = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'created_at', created0['created_at'], 'gte' ) fetched = db_api.get_task_executions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_task_execution_by_less_than_value(self): created0, created1 = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lt' ) with db_api.transaction(): fetched = db_api.get_task_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_filter_task_execution_by_less_than_equal_value(self): created0, created1 = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'created_at', created1['created_at'], 'lte' ) fetched = db_api.get_task_executions(**_filter) self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_filter_task_execution_by_values_in_list(self): created, _ = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'created_at', [created['created_at']], 'in' ) with db_api.transaction(): fetched = db_api.get_task_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) def test_filter_task_execution_by_values_not_in_list(self): created0, created1 = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at']], 'nin' ) with db_api.transaction(): fetched = db_api.get_task_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_filter_task_execution_by_multiple_columns(self): created0, created1 = self._create_task_executions() _filter = filter_utils.create_or_update_filter( 'created_at', [created0['created_at'], created1['created_at']], 'in' ) _filter = filter_utils.create_or_update_filter( 'name', created1.name, 'eq', _filter ) with db_api.transaction(): fetched = db_api.get_task_executions(**_filter) self.assertEqual(1, len(fetched)) self.assertEqual(created1, fetched[0]) def test_delete_task_execution(self): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) with db_api.transaction(): created = db_api.create_task_execution(values) fetched = db_api.get_task_execution(created.id) self.assertEqual(created, fetched) db_api.delete_task_execution(created.id) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_task_execution, created.id ) def test_get_incomplete_task_executions(self): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) values['state'] = 'RUNNING' with db_api.transaction(): task_ex1 = db_api.create_task_execution(values) task_execs = db_api.get_incomplete_task_executions( workflow_execution_id=wf_ex.id ) self.assertEqual(1, len(task_execs)) self.assertEqual(task_ex1, task_execs[0]) self.assertEqual( 1, db_api.get_incomplete_task_executions_count( workflow_execution_id=wf_ex.id ) ) # Add one more task. values = copy.deepcopy(TASK_EXECS[1]) values.update({'workflow_execution_id': wf_ex.id}) values['state'] = 'SUCCESS' db_api.create_task_execution(values) # It should be still one incompleted task. task_execs = db_api.get_incomplete_task_executions( workflow_execution_id=wf_ex.id ) self.assertEqual(1, len(task_execs)) self.assertEqual(task_ex1, task_execs[0]) self.assertEqual( 1, db_api.get_incomplete_task_executions_count( workflow_execution_id=wf_ex.id ) ) def test_task_execution_repr(self): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) s = db_api.create_task_execution(values).__repr__() self.assertIn('TaskExecution ', s) self.assertIn("'state': 'IDLE'", s) self.assertIn("'name': 'my_task1'", s) def _create_task_executions(self): wf_ex = db_api.create_workflow_execution(WF_EXECS[0]) values = copy.deepcopy(TASK_EXECS[0]) values.update({'workflow_execution_id': wf_ex.id}) created0 = db_api.create_task_execution(values) values = copy.deepcopy(TASK_EXECS[1]) values.update({'workflow_execution_id': wf_ex.id}) created1 = db_api.create_task_execution(values) return created0, created1 CRON_TRIGGERS = [ { 'id': '11111111-1111-1111-1111-111111111111', 'name': 'trigger1', 'pattern': '* * * * *', 'workflow_name': 'my_wf', 'workflow_id': None, 'workflow_input': {}, 'next_execution_time': utils.utc_now_sec() + datetime.timedelta(days=1), 'remaining_executions': 42, 'scope': 'private', 'project_id': '' }, { 'id': '22222222-2222-2222-2222-2222222c2222', 'name': 'trigger2', 'pattern': '* * * * *', 'workflow_name': 'my_wf', 'workflow_id': None, 'workflow_input': {'param': 'val'}, 'next_execution_time': utils.utc_now_sec() + datetime.timedelta(days=1), 'remaining_executions': 42, 'scope': 'private', 'project_id': '' }, ] class CronTriggerTest(SQLAlchemyTest): def setUp(self): super(CronTriggerTest, self).setUp() self.wf = db_api.create_workflow_definition({'name': 'my_wf'}) for ct in CRON_TRIGGERS: ct['workflow_id'] = self.wf.id def test_create_and_get_and_load_cron_trigger(self): created = db_api.create_cron_trigger(CRON_TRIGGERS[0]) fetched = db_api.get_cron_trigger(created.name) self.assertEqual(created, fetched) fetched = db_api.load_cron_trigger(created.name) self.assertEqual(created, fetched) self.assertIsNone(db_api.load_cron_trigger("not-existing-trigger")) def test_create_cron_trigger_duplicate_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') db_api.create_cron_trigger(CRON_TRIGGERS[0]) self.assertRaisesWithMessage( exc.DBDuplicateEntryError, "Duplicate entry for cron trigger ['name', 'project_id']:" " trigger1, ", db_api.create_cron_trigger, CRON_TRIGGERS[0] ) def test_update_cron_trigger(self): created = db_api.create_cron_trigger(CRON_TRIGGERS[0]) self.assertIsNone(created.updated_at) updated, updated_count = db_api.update_cron_trigger( created.name, {'pattern': '*/1 * * * *'} ) self.assertEqual('*/1 * * * *', updated.pattern) self.assertEqual(1, updated_count) fetched = db_api.get_cron_trigger(created.name) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) # Test update_cron_trigger and query_filter with results updated, updated_count = db_api.update_cron_trigger( created.name, {'pattern': '*/1 * * * *'}, query_filter={'name': created.name} ) self.assertEqual(updated, fetched) self.assertEqual(1, updated_count) # Test update_cron_trigger and query_filter without results updated, updated_count = db_api.update_cron_trigger( created.name, {'pattern': '*/1 * * * *'}, query_filter={'name': 'not-existing-id'} ) self.assertEqual(updated, updated) self.assertEqual(0, updated_count) def test_update_cron_trigger_by_id(self): created = db_api.create_cron_trigger(CRON_TRIGGERS[0]) self.assertIsNone(created.updated_at) updated, updated_count = db_api.update_cron_trigger( created.id, {'pattern': '*/1 * * * *'} ) self.assertEqual('*/1 * * * *', updated.pattern) self.assertEqual(1, updated_count) def test_create_or_update_cron_trigger(self): name = 'not-existing-id' self.assertIsNone(db_api.load_cron_trigger(name)) created = db_api.create_or_update_cron_trigger(name, CRON_TRIGGERS[0]) self.assertIsNotNone(created) self.assertIsNotNone(created.name) updated = db_api.create_or_update_cron_trigger( created.name, {'pattern': '*/1 * * * *'} ) self.assertEqual('*/1 * * * *', updated.pattern) fetched = db_api.get_cron_trigger(created.name) self.assertEqual(updated, fetched) def test_get_cron_triggers(self): created0 = db_api.create_cron_trigger(CRON_TRIGGERS[0]) created1 = db_api.create_cron_trigger(CRON_TRIGGERS[1]) fetched = db_api.get_cron_triggers(pattern='* * * * *') self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_get_cron_trigger(self): cron_trigger = db_api.create_cron_trigger(CRON_TRIGGERS[0]) # Get by id is ok fetched = db_api.get_cron_trigger(cron_trigger.id) self.assertEqual(cron_trigger, fetched) # Get by name is ok fetched = db_api.get_cron_trigger(cron_trigger.name) self.assertEqual(cron_trigger, fetched) def test_get_cron_trigger_not_found(self): self.assertRaises( exc.DBEntityNotFoundError, db_api.get_cron_trigger, 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', ) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_cron_trigger, 'not-exists-cron-trigger', ) def test_get_cron_trigger_by_id(self): cron_trigger_1 = db_api.create_cron_trigger(CRON_TRIGGERS[0]) cron_trigger_2 = db_api.create_cron_trigger(CRON_TRIGGERS[1]) fetched = db_api.get_cron_trigger_by_id(cron_trigger_1.id) self.assertEqual(cron_trigger_1, fetched) fetched = db_api.get_cron_trigger_by_id(cron_trigger_2.id) self.assertEqual(cron_trigger_2, fetched) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_cron_trigger_by_id, 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', ) def test_get_cron_triggers_other_tenant(self): created0 = db_api.create_cron_trigger(CRON_TRIGGERS[0]) # Switch to another tenant. auth_context.set_ctx(USER_CTX) fetched = db_api.get_cron_triggers( insecure=True, pattern='* * * * *', project_id=security.DEFAULT_PROJECT_ID ) self.assertEqual(1, len(fetched)) self.assertEqual(created0, fetched[0]) def test_delete_cron_trigger(self): created = db_api.create_cron_trigger(CRON_TRIGGERS[0]) fetched = db_api.get_cron_trigger(created.name) self.assertEqual(created, fetched) rowcount = db_api.delete_cron_trigger(created.name) self.assertEqual(1, rowcount) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_cron_trigger, created.name ) def test_delete_cron_trigger_by_id(self): created = db_api.create_cron_trigger(CRON_TRIGGERS[0]) fetched = db_api.get_cron_trigger(created.name) self.assertEqual(created, fetched) rowcount = db_api.delete_cron_trigger(created.id) self.assertEqual(1, rowcount) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_cron_trigger, created.id ) def test_cron_trigger_repr(self): s = db_api.create_cron_trigger(CRON_TRIGGERS[0]).__repr__() self.assertIn('CronTrigger ', s) self.assertIn("'pattern': '* * * * *'", s) self.assertIn("'name': 'trigger1'", s) SCHEDULED_JOBS = [ { 'run_after': 30, 'func_name': 'test_module.test_func', 'func_args': { 'server': 'localhost', 'database': 'test', 'timeout': 600, 'verbose': True }, 'execute_at': datetime.datetime(2019, 7, 6, 15, 1, 0) }, { 'run_after': 50, 'target_factory_func_name': 'test_target_factory_func', 'func_name': 'test_func', 'func_args': { 'server': 'localhost', 'database': 'test', 'timeout': 600, 'verbose': True }, 'execute_at': datetime.datetime(2019, 7, 6, 20, 30, 0) }, ] class ScheduledJobTest(SQLAlchemyTest): def setUp(self): super(ScheduledJobTest, self).setUp() db_api.delete_scheduled_jobs() def test_create_and_get_scheduled_job(self): created = db_api.create_scheduled_job(SCHEDULED_JOBS[0]) fetched = db_api.get_scheduled_job(created.id) self.assertEqual(created, fetched) def test_create_scheduled_job_duplicate_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') db_api.create_scheduled_job(SCHEDULED_JOBS[0]) db_api.create_scheduled_job(SCHEDULED_JOBS[0]) def test_update_scheduled_job(self): created = db_api.create_scheduled_job(SCHEDULED_JOBS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_scheduled_job( created.id, {'captured_at': datetime.datetime(2019, 7, 6, 20, 30, 0)} ) self.assertEqual( datetime.datetime(2019, 7, 6, 20, 30, 0), updated[0].captured_at ) fetched = db_api.get_scheduled_job(created.id) self.assertEqual(updated[0], fetched) self.assertIsNotNone(fetched.updated_at) def test_get_scheduled_jobs(self): created0 = db_api.create_scheduled_job(SCHEDULED_JOBS[0]) created1 = db_api.create_scheduled_job(SCHEDULED_JOBS[1]) fetched = db_api.get_scheduled_jobs() self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, func_name=created0['func_name']) self._assert_single_item(fetched, func_name=created1['func_name']) def test_delete_scheduled_job(self): created = db_api.create_scheduled_job(SCHEDULED_JOBS[0]) fetched = db_api.get_scheduled_job(created.id) self.assertEqual(created, fetched) db_api.delete_scheduled_job(created.id) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_environment, created.id ) def test_get_scheduled_jobs_count(self): res = db_api.get_scheduled_jobs_count() self.assertEqual(0, res) created0 = db_api.create_scheduled_job(SCHEDULED_JOBS[0]) created1 = db_api.create_scheduled_job(SCHEDULED_JOBS[1]) res = db_api.get_scheduled_jobs_count() self.assertEqual(2, res) db_api.delete_scheduled_job(created0.id) res = db_api.get_scheduled_jobs_count() self.assertEqual(1, res) db_api.delete_scheduled_job(created1.id) res = db_api.get_scheduled_jobs_count() self.assertEqual(0, res) ENVIRONMENTS = [ { 'name': 'env1', 'description': 'Test Environment #1', 'scope': 'private', 'variables': { 'server': 'localhost', 'database': 'test', 'timeout': 600, 'verbose': True } }, { 'name': 'env2', 'description': 'Test Environment #2', 'scope': 'public', 'variables': { 'server': '127.0.0.1', 'database': 'temp', 'timeout': 300, 'verbose': False } } ] class EnvironmentTest(SQLAlchemyTest): def setUp(self): super(EnvironmentTest, self).setUp() db_api.delete_environments() def test_create_and_get_and_load_environment(self): created = db_api.create_environment(ENVIRONMENTS[0]) fetched = db_api.get_environment(created.name) self.assertEqual(created, fetched) fetched = db_api.load_environment(created.name) self.assertEqual(created, fetched) self.assertIsNone(db_api.load_environment("not-existing-id")) def test_create_environment_duplicate_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') db_api.create_environment(ENVIRONMENTS[0]) self.assertRaisesWithMessage( exc.DBDuplicateEntryError, "Duplicate entry for Environment ['name', 'project_id']: " "env1, None", db_api.create_environment, ENVIRONMENTS[0] ) def test_update_environment(self): created = db_api.create_environment(ENVIRONMENTS[0]) self.assertIsNone(created.updated_at) updated = db_api.update_environment( created.name, {'description': 'my new desc'} ) self.assertEqual('my new desc', updated.description) fetched = db_api.get_environment(created.name) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_create_or_update_environment(self): name = 'not-existing-id' self.assertIsNone(db_api.load_environment(name)) created = db_api.create_or_update_environment(name, ENVIRONMENTS[0]) self.assertIsNotNone(created) self.assertIsNotNone(created.name) updated = db_api.create_or_update_environment( created.name, {'description': 'my new desc'} ) self.assertEqual('my new desc', updated.description) self.assertEqual( 'my new desc', db_api.load_environment(updated.name).description ) fetched = db_api.get_environment(created.name) self.assertEqual(updated, fetched) def test_get_environments(self): created0 = db_api.create_environment(ENVIRONMENTS[0]) created1 = db_api.create_environment(ENVIRONMENTS[1]) fetched = db_api.get_environments() self.assertEqual(2, len(fetched)) self._assert_single_item(fetched, name=created0['name']) self._assert_single_item(fetched, name=created1['name']) def test_delete_environment(self): created = db_api.create_environment(ENVIRONMENTS[0]) fetched = db_api.get_environment(created.name) self.assertEqual(created, fetched) db_api.delete_environment(created.name) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_environment, created.name ) def test_delete_environments(self): created0 = db_api.create_environment(ENVIRONMENTS[0]) created1 = db_api.create_environment(ENVIRONMENTS[1]) db_api.delete_environments( name={'in': [created0.name, created1.name]} ) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_environment, created0.id ) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_environment, created1.id ) def test_environment_repr(self): s = db_api.create_environment(ENVIRONMENTS[0]).__repr__() self.assertIn('Environment ', s) self.assertIn("'description': 'Test Environment #1'", s) self.assertIn("'name': 'env1'", s) class TXTest(SQLAlchemyTest): def test_rollback(self): db_api.start_tx() try: created = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbook(created.name, namespace='test') self.assertEqual(created, fetched) self.assertTrue(self.is_db_session_open()) db_api.rollback_tx() finally: db_api.end_tx() self.assertFalse(self.is_db_session_open()) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workbook, created['id'] ) self.assertFalse(self.is_db_session_open()) def test_commit(self): db_api.start_tx() try: created = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbook(created.name, namespace='test') self.assertEqual(created, fetched) self.assertTrue(self.is_db_session_open()) db_api.commit_tx() finally: db_api.end_tx() self.assertFalse(self.is_db_session_open()) fetched = db_api.get_workbook(created.name, namespace='test') self.assertEqual(created, fetched) self.assertFalse(self.is_db_session_open()) def test_commit_transaction(self): with db_api.transaction(): created = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbook(created.name, namespace='test') self.assertEqual(created, fetched) self.assertTrue(self.is_db_session_open()) self.assertFalse(self.is_db_session_open()) fetched = db_api.get_workbook(created.name, namespace='test') self.assertEqual(created, fetched) self.assertFalse(self.is_db_session_open()) def test_rollback_multiple_objects(self): db_api.start_tx() try: created = db_api.create_workflow_execution(WF_EXECS[0]) fetched = db_api.get_workflow_execution(created['id']) self.assertEqual(created, fetched) created_wb = db_api.create_workbook(WORKBOOKS[0]) fetched_wb = db_api.get_workbook( created_wb.name, namespace=created_wb.namespace ) self.assertEqual(created_wb, fetched_wb) self.assertTrue(self.is_db_session_open()) db_api.rollback_tx() finally: db_api.end_tx() self.assertFalse(self.is_db_session_open()) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workflow_execution, created.id ) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workbook, created_wb.name ) self.assertFalse(self.is_db_session_open()) def test_rollback_transaction(self): try: with db_api.transaction(): created = db_api.create_workbook(WORKBOOKS[0]) fetched = db_api.get_workbook( created.name, namespace=created.namespace ) self.assertEqual(created, fetched) self.assertTrue(self.is_db_session_open()) db_api.create_workbook(WORKBOOKS[0]) except exc.DBDuplicateEntryError: pass self.assertFalse(self.is_db_session_open()) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workbook, created.name ) def test_commit_multiple_objects(self): db_api.start_tx() try: created = db_api.create_workflow_execution(WF_EXECS[0]) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(created, fetched) created_wb = db_api.create_workbook(WORKBOOKS[0]) fetched_wb = db_api.get_workbook( created_wb.name, namespace=created_wb.namespace ) self.assertEqual(created_wb, fetched_wb) self.assertTrue(self.is_db_session_open()) db_api.commit_tx() finally: db_api.end_tx() self.assertFalse(self.is_db_session_open()) with db_api.transaction(): fetched = db_api.get_workflow_execution(created.id) self.assertEqual(created, fetched) fetched_wb = db_api.get_workbook( created_wb.name, namespace=created_wb.namespace ) self.assertEqual(created_wb, fetched_wb) self.assertFalse(self.is_db_session_open()) RESOURCE_MEMBERS = [ { 'resource_id': '123e4567-e89b-12d3-a456-426655440000', 'resource_type': 'workflow', 'project_id': security.get_project_id(), 'member_id': USER_CTX.project_id, 'status': 'pending', }, { 'resource_id': '123e4567-e89b-12d3-a456-426655440000', 'resource_type': 'workflow', 'project_id': security.get_project_id(), 'member_id': '111', 'status': 'pending', }, ] class ResourceMemberTest(SQLAlchemyTest): def test_create_and_get_resource_member(self): created_1 = db_api.create_resource_member(RESOURCE_MEMBERS[0]) created_2 = db_api.create_resource_member(RESOURCE_MEMBERS[1]) fetched = db_api.get_resource_member( '123e4567-e89b-12d3-a456-426655440000', 'workflow', USER_CTX.project_id ) self.assertEqual(created_1, fetched) # Switch to another tenant. auth_context.set_ctx(USER_CTX) fetched = db_api.get_resource_member( '123e4567-e89b-12d3-a456-426655440000', 'workflow', USER_CTX.project_id ) self.assertEqual(created_1, fetched) # Tenant A can not see membership of resource shared to Tenant B. self.assertRaises( exc.DBEntityNotFoundError, db_api.get_resource_member, '123e4567-e89b-12d3-a456-426655440000', 'workflow', created_2.member_id ) def test_create_resource_member_duplicate(self): db_api.create_resource_member(RESOURCE_MEMBERS[0]) self.assertRaisesWithMessage( exc.DBDuplicateEntryError, "Duplicate entry for ResourceMember ['resource_id'," " 'resource_type', 'member_id']:" " 123e4567-e89b-12d3-a456-426655440000, workflow, 99-88-33", db_api.create_resource_member, RESOURCE_MEMBERS[0] ) def test_get_resource_members_by_owner(self): for res_member in RESOURCE_MEMBERS: db_api.create_resource_member(res_member) fetched = db_api.get_resource_members( '123e4567-e89b-12d3-a456-426655440000', 'workflow', ) self.assertEqual(2, len(fetched)) def test_get_resource_members_not_owner(self): created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) db_api.create_resource_member(RESOURCE_MEMBERS[1]) # Switch to another tenant. auth_context.set_ctx(USER_CTX) fetched = db_api.get_resource_members( created.resource_id, 'workflow', ) self.assertEqual(1, len(fetched)) self.assertEqual(created, fetched[0]) def test_update_resource_member_by_member(self): created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) # Switch to another tenant. auth_context.set_ctx(USER_CTX) updated = db_api.update_resource_member( created.resource_id, 'workflow', USER_CTX.project_id, {'status': 'accepted'} ) self.assertEqual(created.id, updated.id) self.assertEqual('accepted', updated.status) def test_update_resource_member_by_owner(self): created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) self.assertRaises( exc.DBEntityNotFoundError, db_api.update_resource_member, created.resource_id, 'workflow', USER_CTX.project_id, {'status': 'accepted'} ) def test_delete_resource_member(self): created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) db_api.delete_resource_member( created.resource_id, 'workflow', USER_CTX.project_id, ) fetched = db_api.get_resource_members( created.resource_id, 'workflow', ) self.assertEqual(0, len(fetched)) def test_delete_resource_member_not_owner(self): created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) # Switch to another tenant. auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.delete_resource_member, created.resource_id, 'workflow', USER_CTX.project_id, ) def test_delete_resource_member_already_deleted(self): created = db_api.create_resource_member(RESOURCE_MEMBERS[0]) db_api.delete_resource_member( created.resource_id, 'workflow', USER_CTX.project_id, ) self.assertRaises( exc.DBEntityNotFoundError, db_api.delete_resource_member, created.resource_id, 'workflow', USER_CTX.project_id, ) def test_delete_nonexistent_resource_member(self): self.assertRaises( exc.DBEntityNotFoundError, db_api.delete_resource_member, 'nonexitent_resource', 'workflow', 'nonexitent_member', ) class WorkflowSharingTest(SQLAlchemyTest): def test_get_shared_workflow(self): wf = db_api.create_workflow_definition(WF_DEFINITIONS[1]) # Switch to another tenant. auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workflow_definition, wf.id ) # Switch to original tenant, share workflow to another tenant. auth_context.set_ctx(DEFAULT_CTX) workflow_sharing = { 'resource_id': wf.id, 'resource_type': 'workflow', 'project_id': security.get_project_id(), 'member_id': USER_CTX.project_id, 'status': 'pending', } db_api.create_resource_member(workflow_sharing) # Switch to another tenant, accept the sharing, get workflows. auth_context.set_ctx(USER_CTX) db_api.update_resource_member( wf.id, 'workflow', USER_CTX.project_id, {'status': 'accepted'} ) fetched = db_api.get_workflow_definition(wf.id) self.assertEqual(wf, fetched) def test_owner_delete_shared_workflow(self): wf = db_api.create_workflow_definition(WF_DEFINITIONS[1]) workflow_sharing = { 'resource_id': wf.id, 'resource_type': 'workflow', 'project_id': security.get_project_id(), 'member_id': USER_CTX.project_id, 'status': 'pending', } db_api.create_resource_member(workflow_sharing) # Switch to another tenant, accept the sharing. auth_context.set_ctx(USER_CTX) db_api.update_resource_member( wf.id, 'workflow', USER_CTX.project_id, {'status': 'accepted'} ) fetched = db_api.get_workflow_definition(wf.id) self.assertEqual(wf, fetched) # Switch to original tenant, delete the workflow. auth_context.set_ctx(DEFAULT_CTX) db_api.delete_workflow_definition(wf.id) # Switch to another tenant, can not see that workflow. auth_context.set_ctx(USER_CTX) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_workflow_definition, wf.id ) def test_owner_delete_shared_workflow_has_crontrigger(self): wf = db_api.create_workflow_definition(WF_DEFINITIONS[1]) workflow_sharing = { 'resource_id': wf.id, 'resource_type': 'workflow', 'project_id': security.get_project_id(), 'member_id': USER_CTX.project_id, 'status': 'pending', } db_api.create_resource_member(workflow_sharing) # Switch to another tenant, accept the sharing. auth_context.set_ctx(USER_CTX) db_api.update_resource_member( wf.id, 'workflow', USER_CTX.project_id, {'status': 'accepted'} ) # Create cron trigger using the shared workflow. CRON_TRIGGERS[0]['workflow_id'] = wf.id db_api.create_cron_trigger(CRON_TRIGGERS[0]) # Switch to original tenant, try to delete the workflow. auth_context.set_ctx(DEFAULT_CTX) self.assertRaises( exc.DBError, db_api.delete_workflow_definition, wf.id ) EVENT_TRIGGERS = [ { 'name': 'trigger1', 'workflow_id': '', 'workflow_input': {}, 'workflow_params': {}, 'exchange': 'openstack', 'topic': 'notification', 'event': 'compute.create_instance', }, { 'name': 'trigger2', 'workflow_id': '', 'workflow_input': {}, 'workflow_params': {}, 'exchange': 'openstack', 'topic': 'notification', 'event': 'compute.delete_instance', }, ] class EventTriggerTest(SQLAlchemyTest): def setUp(self): super(EventTriggerTest, self).setUp() self.wf = db_api.create_workflow_definition({'name': 'my_wf'}) for et in EVENT_TRIGGERS: et['workflow_id'] = self.wf.id def test_create_and_get_event_trigger(self): created = db_api.create_event_trigger(EVENT_TRIGGERS[0]) fetched = db_api.get_event_trigger(created.id) self.assertEqual(created, fetched) def test_create_event_trigger_duplicate(self): db_api.create_event_trigger(EVENT_TRIGGERS[0]) self.assertRaisesWithMessageContaining( exc.DBDuplicateEntryError, "Duplicate entry for EventTrigger ['exchange', 'topic', 'event'," " 'workflow_id', 'project_id']: openstack, notification," " compute.create_instance,", db_api.create_event_trigger, EVENT_TRIGGERS[0] ) def test_get_event_triggers_not_insecure(self): for t in EVENT_TRIGGERS: db_api.create_event_trigger(t) fetched = db_api.get_event_triggers() self.assertEqual(2, len(fetched)) def test_get_event_triggers_insecure(self): db_api.create_event_trigger(EVENT_TRIGGERS[0]) # Switch to another tenant. auth_context.set_ctx(USER_CTX) db_api.create_event_trigger(EVENT_TRIGGERS[1]) fetched = db_api.get_event_triggers() self.assertEqual(1, len(fetched)) fetched = db_api.get_event_triggers(insecure=True) self.assertEqual(2, len(fetched)) def test_get_event_triggers_specific_fields_insecure(self): fetched = db_api.get_event_triggers(fields=['name', 'workflow_id'], insecure=True) self.assertEqual(0, len(fetched)) def test_update_event_trigger(self): created = db_api.create_event_trigger(EVENT_TRIGGERS[0]) # Need a new existing workflow for updating event trigger because of # foreign constraint. new_wf = db_api.create_workflow_definition({'name': 'my_wf1'}) db_api.update_event_trigger( created.id, {'workflow_id': new_wf.id} ) updated = db_api.get_event_trigger(created.id) self.assertEqual(new_wf.id, updated.workflow_id) def test_delete_event_triggers(self): created = db_api.create_event_trigger(EVENT_TRIGGERS[0]) db_api.delete_event_trigger(created.id) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_event_trigger, created.id ) class LockTest(SQLAlchemyTest): def test_create_lock(self): # This test just ensures that DB model is OK. # It doesn't test the real intention of this model though. db_api.create_named_lock('lock1') locks = db_api.get_named_locks() self.assertEqual(1, len(locks)) self.assertEqual('lock1', locks[0].name) db_api.delete_named_lock('invalid_lock_id') locks = db_api.get_named_locks() self.assertEqual(1, len(locks)) db_api.delete_named_lock(locks[0].id) locks = db_api.get_named_locks() self.assertEqual(0, len(locks)) def test_with_named_lock(self): name = 'lock1' with db_api.named_lock(name): # Make sure that within 'with' section the lock record exists. self.assertEqual(1, len(db_api.get_named_locks())) # Make sure that outside 'with' section the lock record does not exist. self.assertEqual(0, len(db_api.get_named_locks())) def test_internal_get_direct_subworkflows(self): def wex(wex_id, tex_id=None): db_api.create_workflow_execution( {'id': wex_id, 'name': wex_id, 'task_execution_id': tex_id} ) def tex(tex_id, wex_id): db_api.create_task_execution( {'id': tex_id, 'name': tex_id, 'workflow_execution_id': wex_id} ) def assert_subworkflows(expected): self.assertEqual( set(expected), set(db_api._get_all_direct_subworkflows('root')) ) wex('root') tex('t1', 'root') wex('sub1', 't1') assert_subworkflows(['sub1']) tex('t2', 'root') wex('sub2', 't1') assert_subworkflows(['sub1', 'sub2']) tex('sub1t1', 'sub1') wex('sub1sub1', 'sub1t1') assert_subworkflows(['sub1', 'sub2']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/db/v2/test_sqlite_transactions.py0000644000175000017500000000513200000000000026326 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from eventlet import semaphore from oslo_config import cfg import testtools from mistral.db.v2.sqlalchemy import api as db_api from mistral.tests.unit import base as test_base WF_EXEC = { 'name': '1', 'spec': {}, 'start_params': {}, 'state': 'RUNNING', 'state_info': "Running...", 'created_at': None, 'updated_at': None, 'context': None, 'task_id': None, 'trust_id': None } @testtools.skipIf( 'sqlite' not in cfg.CONF.database.connection, 'SQLite is not used for the database backend.') class SQLiteTransactionsTest(test_base.DbTestCase): """The purpose of this test is to research transactions of SQLite.""" def setUp(self): super(SQLiteTransactionsTest, self).setUp() cfg.CONF.set_default('auth_enable', True, group='pecan') self.addCleanup( cfg.CONF.set_default, 'auth_enable', False, group='pecan' ) def test_dirty_reads(self): sem1 = semaphore.Semaphore(0) sem2 = semaphore.Semaphore(0) def _run_tx1(): with db_api.transaction(): wf_ex = db_api.create_workflow_execution(WF_EXEC) # Release TX2 so it can read data. sem2.release() print("Created: %s" % wf_ex) print("Holding TX1...") sem1.acquire() print("TX1 completed.") def _run_tx2(): with db_api.transaction(): print("Holding TX2...") sem2.acquire() wf_execs = db_api.get_workflow_executions() print("Read: %s" % wf_execs) self.assertEqual(1, len(wf_execs)) # Release TX1 so it can complete. sem1.release() print("TX2 completed.") t1 = eventlet.spawn(_run_tx1) t2 = eventlet.spawn(_run_tx2) t1.wait() t2.wait() t1.kill() t2.kill() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/db/v2/test_transactions.py0000644000175000017500000000437600000000000024756 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.tests.unit import base as test_base WF_EXECS = [ { 'name': '1', 'spec': {}, 'start_params': {}, 'state': 'RUNNING', 'state_info': "Running...", 'created_at': None, 'updated_at': None, 'context': None, 'task_id': None, 'trust_id': None }, { 'name': '1', 'spec': {}, 'start_params': {}, 'state': 'RUNNING', 'state_info': "Running...", 'created_at': None, 'updated_at': None, 'context': None, 'task_id': None, 'trust_id': None } ] class TransactionsTest(test_base.DbTestCase): def setUp(self): super(TransactionsTest, self).setUp() cfg.CONF.set_default('auth_enable', True, group='pecan') self.addCleanup( cfg.CONF.set_default, 'auth_enable', False, group='pecan' ) def test_read_only_transactions(self): with db_api.transaction(): db_api.create_workflow_execution(WF_EXECS[0]) wf_execs = db_api.get_workflow_executions() self.assertEqual(1, len(wf_execs)) wf_execs = db_api.get_workflow_executions() self.assertEqual(1, len(wf_execs)) with db_api.transaction(read_only=True): db_api.create_workflow_execution(WF_EXECS[1]) wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) wf_execs = db_api.get_workflow_executions() self.assertEqual(1, len(wf_execs)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.145568 mistral-10.0.0.0b3/mistral/tests/unit/engine/0000755000175000017500000000000000000000000021154 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/__init__.py0000644000175000017500000000000000000000000023253 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/base.py0000644000175000017500000002531600000000000022447 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import service from mistral.db.v2 import api as db_api from mistral.engine import engine_server from mistral.executors import base as exe from mistral.executors import executor_server from mistral.notifiers import notification_server as notif_server from mistral.rpc import base as rpc_base from mistral.rpc import clients as rpc_clients from mistral.tests.unit import base from mistral.workflow import states LOG = logging.getLogger(__name__) # Default delay and timeout in seconds for await_xxx() functions. DEFAULT_DELAY = 1 DEFAULT_TIMEOUT = 30 def launch_service(s): launcher = service.ServiceLauncher(cfg.CONF) launcher.launch_service(s) launcher.wait() class EngineTestCase(base.DbTestCase): def setUp(self): super(EngineTestCase, self).setUp() # Get transport here to let oslo.messaging setup default config # before changing the rpc_backend to the fake driver; otherwise, # oslo.messaging will throw exception. messaging.get_transport(cfg.CONF) # Set the transport to 'fake' for Engine tests. cfg.CONF.set_default('transport_url', 'fake:/') # Drop all RPC objects (transport, clients). rpc_base.cleanup() rpc_clients.cleanup() exe.cleanup() self.threads = [] # Start remote executor. if cfg.CONF.executor.type == 'remote': LOG.info("Starting remote executor threads...") self.executor_client = rpc_clients.get_executor_client() exe_svc = executor_server.get_oslo_service(setup_profiler=False) self.executor = exe_svc.executor self.threads.append(eventlet.spawn(launch_service, exe_svc)) self.addCleanup(exe_svc.stop, True) # Start remote notifier. if cfg.CONF.notifier.type == 'remote': LOG.info("Starting remote notifier threads...") self.notifier_client = rpc_clients.get_notifier_client() notif_svc = notif_server.get_oslo_service(setup_profiler=False) self.notifier = notif_svc.notifier self.threads.append(eventlet.spawn(launch_service, notif_svc)) self.addCleanup(notif_svc.stop, True) # Start engine. LOG.info("Starting engine threads...") self.engine_client = rpc_clients.get_engine_client() eng_svc = engine_server.get_oslo_service(setup_profiler=False) self.engine = eng_svc.engine self.threads.append(eventlet.spawn(launch_service, eng_svc)) self.addCleanup(eng_svc.stop, True) self.addOnException(self.print_executions) self.addCleanup(self.kill_threads) # Make sure that both services fully started, otherwise # the test may run too early. if cfg.CONF.executor.type == 'remote': exe_svc.wait_started() if cfg.CONF.notifier.type == 'remote': notif_svc.wait_started() eng_svc.wait_started() def kill_threads(self): LOG.info("Finishing engine and executor threads...") for thread in self.threads: thread.kill() @staticmethod def print_executions(exc_info=None): if exc_info: print("\nEngine test case exception occurred: %s" % exc_info[1]) print("Exception type: %s" % exc_info[0]) print("\nPrinting workflow executions...") with db_api.transaction(): wf_execs = db_api.get_workflow_executions() for w in wf_execs: print( "\n%s (%s) [state=%s, state_info=%s, output=%s]" % (w.name, w.id, w.state, w.state_info, w.output) ) for t in w.task_executions: print( "\t%s [id=%s, state=%s, state_info=%s, processed=%s," " published=%s, runtime_context=%s]" % (t.name, t.id, t.state, t.state_info, t.processed, t.published, t.runtime_context) ) child_execs = t.executions for a in child_execs: print( "\t\t%s [id=%s, state=%s, state_info=%s," " accepted=%s, output=%s]" % (a.name, a.id, a.state, a.state_info, a.accepted, a.output) ) print("\nPrinting standalone action executions...") child_execs = db_api.get_action_executions(task_execution_id=None) for a in child_execs: print( "\t\t%s [id=%s, state=%s, state_info=%s, accepted=%s," " output=%s]" % (a.name, a.id, a.state, a.state_info, a.accepted, a.output) ) # Various methods for action execution objects. def is_action_in_state(self, ex_id, state): return db_api.get_action_execution(ex_id).state == state def await_action_state(self, ex_id, state, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self._await( lambda: self.is_action_in_state(ex_id, state), delay, timeout ) def is_action_success(self, ex_id): return self.is_action_in_state(ex_id, states.SUCCESS) def is_action_error(self, ex_id): return self.is_action_in_state(ex_id, states.ERROR) def await_action_success(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_action_state(ex_id, states.SUCCESS, delay, timeout) def await_action_error(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_action_state(ex_id, states.ERROR, delay, timeout) # Various methods for task execution objects. def is_task_in_state(self, ex_id, state): return db_api.get_task_execution(ex_id).state == state def await_task_state(self, ex_id, state, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self._await( lambda: self.is_task_in_state(ex_id, state), delay, timeout ) def is_task_success(self, task_ex_id): return self.is_task_in_state(task_ex_id, states.SUCCESS) def is_task_error(self, task_ex_id): return self.is_task_in_state(task_ex_id, states.ERROR) def is_task_delayed(self, task_ex_id): return self.is_task_in_state(task_ex_id, states.RUNNING_DELAYED) def is_task_processed(self, task_ex_id): return db_api.get_task_execution(task_ex_id).processed def await_task_running(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_task_state(ex_id, states.RUNNING, delay, timeout) def await_task_success(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_task_state(ex_id, states.SUCCESS, delay, timeout) def await_task_error(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_task_state(ex_id, states.ERROR, delay, timeout) def await_task_cancelled(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_task_state(ex_id, states.CANCELLED, delay, timeout) def await_task_paused(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_task_state(ex_id, states.PAUSED, delay, timeout) def await_task_delayed(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_task_state(ex_id, states.RUNNING_DELAYED, delay, timeout) def await_task_processed(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self._await(lambda: self.is_task_processed(ex_id), delay, timeout) # Various methods for workflow execution objects. def is_workflow_in_state(self, ex_id, state): return db_api.get_workflow_execution(ex_id).state == state def await_workflow_state(self, ex_id, state, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self._await( lambda: self.is_workflow_in_state(ex_id, state), delay, timeout, fail_message="Execution {ex_id} must have reached state {state} " "state but it is in {current}", fail_message_formatter=lambda m: m.format( ex_id=ex_id, state=state, current=db_api.get_workflow_execution(ex_id).state ) ) def await_workflow_running(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_workflow_state(ex_id, states.RUNNING, delay, timeout) def await_workflow_success(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_workflow_state(ex_id, states.SUCCESS, delay, timeout) def await_workflow_error(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_workflow_state(ex_id, states.ERROR, delay, timeout) def await_workflow_paused(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_workflow_state(ex_id, states.PAUSED, delay, timeout) def await_workflow_cancelled(self, ex_id, delay=DEFAULT_DELAY, timeout=DEFAULT_TIMEOUT): self.await_workflow_state(ex_id, states.CANCELLED, delay, timeout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_action_caching.py0000644000175000017500000001001700000000000025515 0ustar00coreycorey00000000000000# Copyright 2017 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import cachetools from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import actions as action_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class LookupUtilsTest(base.EngineTestCase): ACTION = """--- version: '2.0' action1: base: std.echo output='Hi' output: result: $ """ WF_TEXT = """--- version: '2.0' wf: tasks: task1: action: action1 on-success: join_task task2: action: action1 on-success: join_task join_task: join: all on-success: task4 task4: action: action1 pause-before: true """ def test_action_definition_cache_ttl(self): namespace = 'test_namespace' wf_service.create_workflows(self.WF_TEXT, namespace=namespace) # Create an action. db_actions = action_service.create_actions(self.ACTION, namespace=namespace) self.assertEqual(1, len(db_actions)) self._assert_single_item(db_actions, name='action1', namespace=namespace) # Explicitly mark the action to be deleted after the test execution. self.addCleanup(db_api.delete_action_definitions, name='action1', namespace=namespace) # Reinitialise the cache with reduced action_definition_cache_time # to make sure the test environment is under control. new_cache = cachetools.TTLCache( maxsize=1000, ttl=50 # 50 seconds ) cache_patch = mock.patch.object( db_api, '_ACTION_DEF_CACHE', new_cache) cache_patch.start() self.addCleanup(cache_patch.stop) # Start workflow. wf_ex = self.engine.start_workflow('wf', wf_namespace=namespace) self.await_workflow_paused(wf_ex.id) # Check that 'action1' 'echo' and 'noop' are cached. self.assertEqual(5, len(db_api._ACTION_DEF_CACHE)) self.assertIn('action1:test_namespace', db_api._ACTION_DEF_CACHE) self.assertIn('std.noop:test_namespace', db_api._ACTION_DEF_CACHE) self.assertIn('std.echo:test_namespace', db_api._ACTION_DEF_CACHE) self.assertIn('std.noop', db_api._ACTION_DEF_CACHE) self.assertIn('std.echo', db_api._ACTION_DEF_CACHE) # Simulate cache expiry new_cache.clear() # Wait some time until cache expires self._await( lambda: len(db_api._ACTION_DEF_CACHE) == 0, fail_message="No triggers were found" ) self.assertEqual(0, len(db_api._ACTION_DEF_CACHE)) self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) # Check all actions are cached again. self.assertEqual(3, len(db_api._ACTION_DEF_CACHE)) self.assertIn('action1:test_namespace', db_api._ACTION_DEF_CACHE) self.assertIn('std.echo', db_api._ACTION_DEF_CACHE) self.assertIn('std.echo:test_namespace', db_api._ACTION_DEF_CACHE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_action_context.py0000644000175000017500000000443400000000000025613 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base from mistral_lib import actions as actions_base WF = """ --- version: '2.0' wf: tasks: task1: action: my_action """ class MyAction(actions_base.Action): def run(self, context): pass class ActionContextTest(base.EngineTestCase): def setUp(self): super(ActionContextTest, self).setUp() test_base.register_action_class('my_action', MyAction) @mock.patch.object(MyAction, 'run', return_value=None) def test_context(self, mocked_run): wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) self.assertEqual(1, len(mocked_run.call_args_list)) action_context = mocked_run.call_args[0][0] exec_context = action_context.execution with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(exec_context.workflow_execution_id, wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') a_ex = task1.action_executions[0] self.assertEqual(exec_context.task_id, task1.id) self.assertEqual(exec_context.workflow_name, wf_ex.name) callback_url = "/v2/action_executions/{}".format(a_ex.id) self.assertEqual(exec_context.callback_url, callback_url) self.assertEqual(exec_context.action_execution_id, a_ex.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_action_defaults.py0000644000175000017500000001552500000000000025741 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg import requests from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') ENV = { '__actions': { 'std.http': { 'auth': 'librarian:password123', 'timeout': 30, } } } EXPECTED_ENV_AUTH = ('librarian', 'password123') WORKFLOW1 = """ --- version: "2.0" wf1: tasks: task1: action: std.http url="https://api.library.org/books" publish: result: <% $ %> """ WORKFLOW2 = """ --- version: "2.0" wf2: tasks: task1: action: std.http url="https://api.library.org/books" timeout=60 publish: result: <% $ %> """ WORKFLOW1_WITH_ITEMS = """ --- version: "2.0" wf1_with_items: input: - links tasks: task1: with-items: link in <% $.links %> action: std.http url=<% $.link %> publish: result: <% $ %> """ WORKFLOW2_WITH_ITEMS = """ --- version: "2.0" wf2_with_items: input: - links tasks: task1: with-items: link in <% $.links %> action: std.http url=<% $.link %> timeout=60 publish: result: <% $ %> """ class ActionDefaultTest(base.EngineTestCase): @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) def test_action_defaults_from_env(self): wf_service.create_workflows(WORKFLOW1) wf_ex = self.engine.start_workflow('wf1', env=ENV) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') requests.request.assert_called_with( 'GET', 'https://api.library.org/books', params=None, data=None, json=None, headers=None, cookies=None, allow_redirects=None, proxies=None, verify=None, auth=EXPECTED_ENV_AUTH, timeout=ENV['__actions']['std.http']['timeout'] ) @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) def test_action_defaults_from_env_not_applied(self): wf_service.create_workflows(WORKFLOW2) wf_ex = self.engine.start_workflow('wf2', env=ENV) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') requests.request.assert_called_with( 'GET', 'https://api.library.org/books', params=None, data=None, json=None, headers=None, cookies=None, allow_redirects=None, proxies=None, verify=None, auth=EXPECTED_ENV_AUTH, timeout=60 ) @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) def test_with_items_action_defaults_from_env(self): wf_service.create_workflows(WORKFLOW1_WITH_ITEMS) wf_input = { 'links': [ 'https://api.library.org/books', 'https://api.library.org/authors' ] } wf_ex = self.engine.start_workflow( 'wf1_with_items', wf_input=wf_input, env=ENV ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') calls = [mock.call('GET', url, params=None, data=None, json=None, headers=None, cookies=None, allow_redirects=None, proxies=None, auth=EXPECTED_ENV_AUTH, verify=None, timeout=ENV['__actions']['std.http']['timeout']) for url in wf_input['links']] requests.request.assert_has_calls(calls, any_order=True) @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) def test_with_items_action_defaults_from_env_not_applied(self): wf_service.create_workflows(WORKFLOW2_WITH_ITEMS) wf_input = { 'links': [ 'https://api.library.org/books', 'https://api.library.org/authors' ] } wf_ex = self.engine.start_workflow( 'wf2_with_items', wf_input=wf_input, env=ENV ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') calls = [mock.call('GET', url, params=None, data=None, json=None, headers=None, cookies=None, allow_redirects=None, proxies=None, auth=EXPECTED_ENV_AUTH, verify=None, timeout=60) for url in wf_input['links']] requests.request.assert_has_calls(calls, any_order=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_action_heartbeat_checker.py0000644000175000017500000000755100000000000027555 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cachetools import mock from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.rpc import clients as rpc_clients from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class ActionHeartbeatCheckerTest(base.EngineTestCase): def setUp(self): # We need to override configuration values before starting engine. self.override_config('check_interval', 1, 'action_heartbeat') self.override_config('max_missed_heartbeats', 1, 'action_heartbeat') self.override_config('first_heartbeat_timeout', 0, 'action_heartbeat') super(ActionHeartbeatCheckerTest, self).setUp() # Make sure actions are not sent to an executor. @mock.patch.object( rpc_clients.ExecutorClient, 'run_action', mock.MagicMock() ) def test_fail_action_with_missing_heartbeats(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') # The workflow should fail because the action of "task1" should be # failed automatically by the action execution heartbeat checker. self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions t_ex = self._assert_single_item( t_execs, name='task1', state=states.ERROR ) a_execs = db_api.get_action_executions(task_execution_id=t_ex.id) self._assert_single_item( a_execs, name='std.noop', state=states.ERROR ) # Make sure actions are not sent to an executor. @mock.patch.object( rpc_clients.ExecutorClient, 'run_action', mock.MagicMock() ) @mock.patch.object( cachetools.LRUCache, '__getitem__', mock.MagicMock(side_effect=KeyError) ) def test_fail_action_with_missing_heartbeats_wf_spec_not_cached(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') # The workflow should fail because the action of "task1" should be # failed automatically by the action execution heartbeat checker. self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions t_ex = self._assert_single_item( t_execs, name='task1', state=states.ERROR ) a_execs = db_api.get_action_executions(task_execution_id=t_ex.id) self._assert_single_item( a_execs, name='std.noop', state=states.ERROR ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_action_heartbeat_sender.py0000644000175000017500000001077200000000000027430 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.rpc import clients as rpc_clients from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class ActionHeartbeatSenderBaseTest(base.EngineTestCase): def setUp(self): # We need to set all required configuration values before starting # an engine and an executor. self.get_configuration() super(ActionHeartbeatSenderBaseTest, self).setUp() def get_configuration(self): # We need to override configuration values before starting engine. # Subclasses can override this method and add/change their own # config options. self.override_config('check_interval', 1, 'action_heartbeat') self.override_config('max_missed_heartbeats', 1, 'action_heartbeat') self.override_config('first_heartbeat_timeout', 0, 'action_heartbeat') def _do_long_action_success_test(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.sleep seconds=4 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions t_ex = self._assert_single_item( t_execs, name='task1', state=states.SUCCESS ) a_execs = db_api.get_action_executions(task_execution_id=t_ex.id) self._assert_single_item( a_execs, name='std.sleep', state=states.SUCCESS ) # Disable the ability to send action heartbeats. @mock.patch.object( rpc_clients.EngineClient, 'process_action_heartbeats', mock.MagicMock() ) def _do_long_action_failure_test_with_disabled_sender(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.sleep seconds=4 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions t_ex = self._assert_single_item( t_execs, name='task1', state=states.ERROR ) a_execs = db_api.get_action_executions(task_execution_id=t_ex.id) self._assert_single_item( a_execs, name='std.sleep', state=states.ERROR ) class ActionHeartbeatSenderLocalExecutorTest(ActionHeartbeatSenderBaseTest): def get_configuration(self): super(ActionHeartbeatSenderLocalExecutorTest, self).get_configuration() self.override_config('type', 'local', 'executor') def test_long_action_success(self): self._do_long_action_success_test() def test_long_action_failure_with_disabled_sender(self): self._do_long_action_failure_test_with_disabled_sender() class ActionHeartbeatSenderRemoteExecutorTest(ActionHeartbeatSenderBaseTest): def get_configuration(self): super( ActionHeartbeatSenderRemoteExecutorTest, self ).get_configuration() self.override_config('type', 'remote', 'executor') def test_long_action_success(self): self._do_long_action_success_test() def test_long_action_failure_with_disabled_sender(self): self._do_long_action_failure_test_with_disabled_sender() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_adhoc_actions.py0000644000175000017500000002455600000000000025377 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WB = """ --- version: '2.0' name: my_wb actions: concat_twice: base: std.echo base-input: output: "<% $.s1 %>+<% $.s2 %>" input: - s1: "a" - s2 output: "<% $ %> and <% $ %>" test_env: base: std.echo base-input: output: '{{ env().foo }}' nested_concat: base: my_wb.concat_twice base-input: s2: '{{ _.n2 }}' input: - n2: 'b' output: nested_concat: '{{ _ }}' missing_base: base: wrong input: - some_input nested_missing_base: base: missing_base input: - some_input workflows: wf1: type: direct input: - str1 - str2 output: workflow_result: <% $.result %> # Access to execution context variables concat_task_result: <% task(concat).result %> # Same but via task name tasks: concat: action: concat_twice s1=<% $.str1 %> s2=<% $.str2 %> publish: result: <% task(concat).result %> wf2: type: direct input: - str1 - str2 output: workflow_result: <% $.result %> # Access to execution context variables concat_task_result: <% task(concat).result %> # Same but via task name tasks: concat: action: concat_twice s2=<% $.str2 %> publish: result: <% task(concat).result %> wf3: type: direct input: - str1 - str2 tasks: concat: action: concat_twice wf4: type: direct input: - str1 output: workflow_result: '{{ _.printenv_result }}' tasks: printenv: action: test_env publish: printenv_result: '{{ task().result }}' wf5: type: direct output: workflow_result: '{{ _.nested_result }}' tasks: nested_test: action: nested_concat publish: nested_result: '{{ task().result }}' wf6: type: direct output: workflow_result: '{{ _.missing_result }}' tasks: missing_action: action: missing_base on-complete: - next_action next_action: publish: missing_result: 'Finished' wf7: type: direct output: workflow_result: '{{ _.missing_result }}' tasks: nested_missing_action: action: nested_missing_base on-complete: - next_action next_action: publish: missing_result: 'Finished' """ class AdhocActionsTest(base.EngineTestCase): def setUp(self): super(AdhocActionsTest, self).setUp() wb_service.create_workbook_v2(WB) def test_run_workflow_with_adhoc_action(self): wf_ex = self.engine.start_workflow( 'my_wb.wf1', wf_input={'str1': 'a', 'str2': 'b'} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'workflow_result': 'a+b and a+b', 'concat_task_result': 'a+b and a+b' }, wf_ex.output ) def test_run_adhoc_action_without_input_value(self): wf_ex = self.engine.start_workflow( 'my_wb.wf2', wf_input={'str1': 'a', 'str2': 'b'} ) self.await_workflow_success(wf_ex.id) expected_output = { 'workflow_result': 'a+b and a+b', 'concat_task_result': 'a+b and a+b' } with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual(expected_output, wf_ex.output) def test_run_adhoc_action_without_sufficient_input_value(self): wf_ex = self.engine.start_workflow( 'my_wb.wf3', wf_input={'str1': 'a', 'str2': 'b'} ) self.assertIn("Invalid input", wf_ex.state_info) self.assertEqual(states.ERROR, wf_ex.state) def test_run_adhoc_action_with_env(self): wf_ex = self.engine.start_workflow( 'my_wb.wf4', wf_input={'str1': 'a'}, env={'foo': 'bar'} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'workflow_result': 'bar' }, wf_ex.output ) def test_run_nested_adhoc_with_output(self): wf_ex = self.engine.start_workflow('my_wb.wf5') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'workflow_result': {'nested_concat': 'a+b and a+b'} }, wf_ex.output ) def test_missing_adhoc_action_definition(self): wf_ex = self.engine.start_workflow('my_wb.wf6') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='missing_action') self.assertEqual(states.ERROR, task1.state) def test_nested_missing_adhoc_action_definition(self): wf_ex = self.engine.start_workflow('my_wb.wf7') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item( tasks, name='nested_missing_action' ) self.assertEqual(states.ERROR, task1.state) def test_adhoc_async_action(self): wb_text = """--- version: '2.0' name: my_wb1 actions: my_action: input: - my_param base: std.async_noop workflows: my_wf: tasks: task1: action: my_action my_param="asdfasdf" """ wb_service.create_workbook_v2(wb_text) wf_ex = self.engine.start_workflow('my_wb1.my_wf') self.await_workflow_running(wf_ex.id) def test_adhoc_action_definition_with_namespace(self): namespace1 = 'ad-hoc_test' namespace2 = 'ad-hoc_test2' wb_text = """--- version: '2.0' name: my_wb1 actions: test_env: base: std.echo base-input: output: '{{ env().foo }}' # TODO(rakhmerov): It won't work. workflows: wf: input: - str1 output: workflow_result: '{{ _.printenv_result }}' tasks: printenv: action: test_env publish: printenv_result: '{{ task().result }}' """ wb_service.create_workbook_v2(wb_text, namespace=namespace1) wb_service.create_workbook_v2(wb_text, namespace=namespace2) with db_api.transaction(): action_defs = db_api.get_action_definitions( name='my_wb1.test_env' ) self.assertEqual(2, len(action_defs)) action_defs = db_api.get_action_definitions( name='my_wb1.test_env', namespace=namespace1 ) self.assertEqual(1, len(action_defs)) action_defs = db_api.get_action_definitions( name='my_wb1.test_env', namespace=namespace2 ) self.assertEqual(1, len(action_defs)) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_action_definition, name='my_wb1.test_env' ) def test_adhoc_action_execution_with_namespace(self): namespace = 'ad-hoc_test' wb_service.create_workbook_v2(WB, namespace=namespace) wf_ex = self.engine.start_workflow( 'my_wb.wf4', wf_input={'str1': 'a'}, env={'foo': 'bar'}, wf_namespace=namespace ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): action_execs = db_api.get_action_executions( name='std.echo', workflow_namespace=namespace ) self.assertEqual(1, len(action_execs)) context = action_execs[0].runtime_context self.assertEqual( 'my_wb.test_env', context.get('adhoc_action_name') ) self.assertEqual(namespace, action_execs[0].workflow_namespace) def test_adhoc_action_runtime_context_name(self): wf_ex = self.engine.start_workflow( 'my_wb.wf4', wf_input={'str1': 'a'}, env={'foo': 'bar'} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): action_execs = db_api.get_action_executions(name='std.echo') self.assertEqual(1, len(action_execs)) action_name = action_execs[0].runtime_context.get( 'adhoc_action_name' ) self.assertEqual('my_wb.test_env', action_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_commands.py0000644000175000017500000003270100000000000024371 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK1 = """ --- version: '2.0' name: my_wb workflows: wf: type: direct input: - my_var tasks: task1: action: std.echo output='1' on-complete: - fail: <% $.my_var = 1 %> - succeed: <% $.my_var = 2 %> - pause: <% $.my_var = 3 %> - task2 task2: action: std.echo output='2' """ class SimpleEngineCommandsTest(base.EngineTestCase): def setUp(self): super(SimpleEngineCommandsTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK1) def test_fail(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 1}) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) def test_succeed(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 2}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) def test_pause(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 3}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) # Let's resume the workflow and wait till it succeeds. self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self._assert_single_item( task_execs, name='task2', state=states.SUCCESS ) WORKBOOK2 = """ --- version: '2.0' name: my_wb workflows: wf: type: direct input: - my_var task-defaults: on-complete: - fail: <% $.my_var = 1 %> - succeed: <% $.my_var = 2 %> - pause: <% $.my_var = 3 %> - task2: <% $.my_var = 4 %> # (Never happens in this test) tasks: task1: action: std.echo output='1' task2: action: std.echo output='2' """ class SimpleEngineWorkflowLevelCommandsTest(base.EngineTestCase): def setUp(self): super(SimpleEngineWorkflowLevelCommandsTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK2) def test_fail(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 1}) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) def test_succeed(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 2}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) def test_pause(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 3}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) WORKBOOK3 = """ --- version: '2.0' name: my_wb workflows: fail_first_wf: type: direct tasks: task1: action: std.echo output='1' on-complete: - fail - task2 task2: action: std.echo output='2' fail_second_wf: type: direct tasks: task1: action: std.echo output='1' on-complete: - task2 - fail task2: action: std.echo output='2' succeed_first_wf: type: direct tasks: task1: action: std.echo output='1' on-complete: - succeed - task2 task2: action: std.echo output='2' succeed_second_wf: type: direct tasks: task1: action: std.echo output='1' on-complete: - task2 - succeed task2: action: std.http url='some.not.existing.url' """ class OrderEngineCommandsTest(base.EngineTestCase): def setUp(self): super(OrderEngineCommandsTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK3) def test_fail_first(self): wf_ex = self.engine.start_workflow('my_wb.fail_first_wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) def test_fail_second(self): wf_ex = self.engine.start_workflow('my_wb.fail_second_wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) task2_db = self._assert_single_item(task_execs, name='task2') self.await_task_success(task2_db.id) self.await_workflow_error(wf_ex.id) def test_succeed_first(self): wf_ex = self.engine.start_workflow('my_wb.succeed_first_wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) def test_succeed_second(self): wf_ex = self.engine.start_workflow('my_wb.succeed_second_wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) task2_db = self._assert_single_item(task_execs, name='task2') self.await_task_error(task2_db.id) self.await_workflow_success(wf_ex.id) WORKBOOK4 = """ --- version: '2.0' name: my_wb workflows: wf: type: direct input: - my_var tasks: task1: action: std.echo output='1' on-complete: - fail(msg='my_var value is 1'): <% $.my_var = 1 %> - succeed(msg='my_var value is 2'): <% $.my_var = 2 %> - pause(msg='my_var value is 3'): <% $.my_var = 3 %> - task2 task2: action: std.echo output='2' """ class SimpleEngineCmdsWithMsgTest(base.EngineTestCase): def setUp(self): super(SimpleEngineCmdsWithMsgTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK4) def test_fail(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 1}) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertEqual(states.ERROR, wf_ex.state) self.assertEqual('my_var value is 1', wf_ex.state_info) def test_succeed(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 2}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual("my_var value is 2", wf_ex.state_info) def test_pause(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 3}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual("my_var value is 3", wf_ex.state_info) WORKBOOK5 = """ --- version: '2.0' name: my_wb workflows: wf: type: direct input: - my_var task-defaults: on-complete: - fail(msg='my_var value is 1'): <% $.my_var = 1 %> - succeed(msg='my_var value is <% $.my_var %>'): <% $.my_var = 2 %> - pause(msg='my_var value is 3'): <% $.my_var = 3 %> - task2: <% $.my_var = 4 %> # (Never happens in this test) tasks: task1: action: std.echo output='1' task2: action: std.echo output='2' """ class SimpleEngineWorkflowLevelCmdsWithMsgTest(base.EngineTestCase): def setUp(self): super(SimpleEngineWorkflowLevelCmdsWithMsgTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK5) def test_fail(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 1}) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertEqual(states.ERROR, wf_ex.state) self.assertEqual("my_var value is 1", wf_ex.state_info) def test_succeed(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 2}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual("my_var value is 2", wf_ex.state_info) def test_pause(self): wf_ex = self.engine.start_workflow('my_wb.wf', wf_input={'my_var': 3}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual("my_var value is 3", wf_ex.state_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_cron_trigger.py0000644000175000017500000001453000000000000025254 0ustar00coreycorey00000000000000# Copyright 2015 Alcatel-Lucent, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from mistral import context as auth_ctx from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import periodic from mistral.services import security from mistral.services import triggers from mistral.services import workflows from mistral.tests.unit.engine import base from mistral_lib import utils WORKFLOW_LIST = """ --- version: '2.0' my_wf: type: direct tasks: task1: action: std.echo output='Hi!' """ class ProcessCronTriggerTest(base.EngineTestCase): @mock.patch.object(security, 'create_trust', type('trust', (object,), {'id': 'my_trust_id'})) @mock.patch('mistral.rpc.clients.get_engine_client') def test_start_workflow(self, get_engine_client_mock): cfg.CONF.set_default('auth_enable', True, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] t = triggers.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, '* * * * * */1', None, None, None ) self.assertEqual('my_trust_id', t.trust_id) cfg.CONF.set_default('auth_enable', False, group='pecan') next_trigger = triggers.get_next_cron_triggers()[0] next_execution_time_before = next_trigger.next_execution_time periodic.process_cron_triggers_v2(None, None) start_wf_mock = get_engine_client_mock.return_value.start_workflow start_wf_mock.assert_called_once() # Check actual parameters of the call. self.assertEqual( ('my_wf', '', None, {}), start_wf_mock.mock_calls[0][1] ) self.assertIn( t.id, start_wf_mock.mock_calls[0][2]['description'] ) self._await( lambda: triggers.get_next_cron_triggers(), fail_message="No triggers were found" ) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_after = next_trigger.next_execution_time # Checking the workflow was executed, by # verifying that the next execution time changed. self.assertNotEqual( next_execution_time_before, next_execution_time_after ) def test_workflow_without_auth(self): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] triggers.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), wf.name, {}, {}, '* * * * * */1', None, None, None ) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_before = next_trigger.next_execution_time ts_before = datetime.datetime.utcnow() periodic.process_cron_triggers_v2(None, None) self._await( lambda: triggers.get_next_cron_triggers(), fail_message="No triggers were found" ) next_triggers = triggers.get_next_cron_triggers() self.assertEqual(1, len(next_triggers)) next_trigger = next_triggers[0] next_execution_time_after = next_trigger.next_execution_time self.assertGreater( next_execution_time_after, ts_before ) self.assertNotEqual( next_execution_time_before, next_execution_time_after ) @mock.patch('mistral.services.triggers.validate_cron_trigger_input') def test_create_cron_trigger_with_pattern_and_first_time(self, validate_mock): cfg.CONF.set_default('auth_enable', False, group='pecan') wf = workflows.create_workflows(WORKFLOW_LIST)[0] # Make the first_time 1 sec later than current time, in order to make # it executed by next cron-trigger task. first_time = datetime.datetime.utcnow() + datetime.timedelta(0, 1) # Creates a cron-trigger with pattern and first time, ensure the # cron-trigger can be executed more than once, and cron-trigger will # not be deleted. trigger_name = 'trigger-%s' % utils.generate_unicode_uuid() cron_trigger = triggers.create_cron_trigger( trigger_name, wf.name, {}, {}, '*/1 * * * *', first_time, None, None ) interval = (cron_trigger.next_execution_time - first_time) self.assertLessEqual(interval.total_seconds(), 3.0) periodic.process_cron_triggers_v2(None, None) # After process_triggers context is set to None, need to reset it. auth_ctx.set_ctx(self.ctx) next_time = triggers.get_next_execution_time( cron_trigger.pattern, cron_trigger.next_execution_time ) cron_trigger_db = db_api.get_cron_trigger(trigger_name) self.assertIsNotNone(cron_trigger_db) interval = (cron_trigger_db.next_execution_time - next_time) self.assertLessEqual(interval.total_seconds(), 3.0) def test_validate_cron_trigger_input_first_time(self): cfg.CONF.set_default('auth_enable', False, group='pecan') first_time = datetime.datetime.utcnow() + datetime.timedelta(0, 1) self.assertRaises( exc.InvalidModelException, triggers.validate_cron_trigger_input, None, first_time, None ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_dataflow.py0000644000175000017500000012611300000000000024372 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral import expressions as expr from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base as engine_test_base from mistral import utils from mistral.workflow import data_flow from mistral.workflow import states import sys # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class DataFlowEngineTest(engine_test_base.EngineTestCase): def test_linear_dataflow(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hi" publish: hi: <% task(task1).result %> on-success: - task2 task2: action: std.echo output="Morpheus" publish: to: <% task(task2).result %> on-success: - task3 task3: publish: result: "<% $.hi %>, <% $.to %>! Your <% env().from %>." """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'from': 'Neo'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.SUCCESS, task3.state) self.assertDictEqual({'hi': 'Hi'}, task1.published) self.assertDictEqual({'to': 'Morpheus'}, task2.published) self.assertDictEqual( {'result': 'Hi, Morpheus! Your Neo.'}, task3.published ) # Make sure that task inbound context doesn't contain workflow # execution info. self.assertNotIn('__execution', task1.in_context) def test_linear_with_branches_dataflow(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hi" publish: hi: <% task(task1).result %> progress: "completed task1" on-success: - notify - task2 task2: action: std.echo output="Morpheus" publish: to: <% task(task2).result %> progress: "completed task2" on-success: - notify - task3 task3: publish: result: "<% $.hi %>, <% $.to %>! Your <% env().from %>." progress: "completed task3" on-success: - notify notify: action: std.echo output=<% $.progress %> publish: progress: <% task(notify).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'from': 'Neo'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') notify_tasks = self._assert_multiple_items(tasks, 3, name='notify') notify_published_arr = [t.published['progress'] for t in notify_tasks] self.assertEqual(states.SUCCESS, task3.state) exp_published_arr = [ { 'hi': 'Hi', 'progress': 'completed task1' }, { 'to': 'Morpheus', 'progress': 'completed task2' }, { 'result': 'Hi, Morpheus! Your Neo.', 'progress': 'completed task3' } ] self.assertDictEqual(exp_published_arr[0], task1.published) self.assertDictEqual(exp_published_arr[1], task2.published) self.assertDictEqual(exp_published_arr[2], task3.published) self.assertIn( exp_published_arr[0]['progress'], notify_published_arr ) self.assertIn( exp_published_arr[1]['progress'], notify_published_arr ) self.assertIn( exp_published_arr[2]['progress'], notify_published_arr ) def test_parallel_tasks(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output=1 publish: var1: <% task(task1).result %> task2: action: std.echo output=2 publish: var2: <% task(task2).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf',) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertDictEqual({'var1': 1}, task1.published) self.assertDictEqual({'var2': 2}, task2.published) self.assertEqual(1, wf_output['var1']) self.assertEqual(2, wf_output['var2']) def test_parallel_tasks_complex(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.noop publish: var1: 1 on-complete: - task12 task12: action: std.noop publish: var12: 12 on-complete: - task13 - task14 task13: action: std.fail description: | Since this task fails we expect that 'var13' won't go into context. Only 'var14'. publish: var13: 13 on-error: - noop task14: publish: var14: 14 task2: publish: var2: 2 on-complete: - task21 task21: publish: var21: 21 """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(6, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task12 = self._assert_single_item(tasks, name='task12') task13 = self._assert_single_item(tasks, name='task13') task14 = self._assert_single_item(tasks, name='task14') task2 = self._assert_single_item(tasks, name='task2') task21 = self._assert_single_item(tasks, name='task21') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task12.state) self.assertEqual(states.ERROR, task13.state) self.assertEqual(states.SUCCESS, task14.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task21.state) self.assertDictEqual({'var1': 1}, task1.published) self.assertDictEqual({'var12': 12}, task12.published) self.assertDictEqual({'var14': 14}, task14.published) self.assertDictEqual({'var2': 2}, task2.published) self.assertDictEqual({'var21': 21}, task21.published) self.assertEqual(1, wf_output['var1']) self.assertEqual(12, wf_output['var12']) self.assertNotIn('var13', wf_output) self.assertEqual(14, wf_output['var14']) self.assertEqual(2, wf_output['var2']) self.assertEqual(21, wf_output['var21']) def test_sequential_tasks_publishing_same_var(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hi" publish: greeting: <% task(task1).result %> on-success: - task2 task2: action: std.echo output="Yo" publish: greeting: <% task(task2).result %> on-success: - task3 task3: action: std.echo output="Morpheus" publish: to: <% task(task3).result %> on-success: - task4 task4: publish: result: "<% $.greeting %>, <% $.to %>! <% env().from %>." """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'from': 'Neo'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task4 = self._assert_single_item(tasks, name='task4') self.assertEqual(states.SUCCESS, task4.state) self.assertDictEqual({'greeting': 'Hi'}, task1.published) self.assertDictEqual({'greeting': 'Yo'}, task2.published) self.assertDictEqual({'to': 'Morpheus'}, task3.published) self.assertDictEqual( {'result': 'Yo, Morpheus! Neo.'}, task4.published ) def test_sequential_tasks_publishing_same_structured(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: publish: greeting: {"a": "b"} on-success: - task2 task2: publish: greeting: {} on-success: - task3 task3: publish: result: <% $.greeting %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'from': 'Neo'}) self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.SUCCESS, task3.state) self.assertDictEqual({'greeting': {'a': 'b'}}, task1.published) self.assertDictEqual({'greeting': {}}, task2.published) self.assertDictEqual({'result': {}}, task3.published) def test_linear_dataflow_implicit_publish(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hi" on-success: - task21 - task22 task21: action: std.echo output="Morpheus" on-success: - task4 task22: action: std.echo output="Neo" on-success: - task4 task4: join: all publish: result: > <% task(task1).result %>, <% task(task21).result %>! Your <% task(task22).result %>. """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task4 = self._assert_single_item(tasks, name='task4') self.assertDictEqual( {'result': 'Hi, Morpheus! Your Neo.\n'}, task4.published ) def test_destroy_result(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output=["Hi", "John Doe!"] publish: hi: <% task(task1).result %> keep-result: false """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') result = data_flow.get_task_execution_result(task1) # Published vars are saved. self.assertDictEqual( {'hi': ["Hi", "John Doe!"]}, task1.published ) # But all result is cleared. self.assertIsNone(result) def test_empty_with_items(self): wf_text = """--- version: "2.0" wf1_with_items: type: direct tasks: task1: with-items: i in <% list() %> action: std.echo output= "Task 1.<% $.i %>" publish: result: <% task(task1).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf1_with_items') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task1 = self._assert_single_item( wf_ex.task_executions, name='task1' ) result = data_flow.get_task_execution_result(task1) self.assertListEqual([], result) def test_publish_on_error(self): wf_text = """--- version: '2.0' wf: type: direct output-on-error: out: <% $.hi %> tasks: task1: action: std.fail publish-on-error: hi: hello_from_error err: <% task(task1).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.ERROR, task1.state) self.assertEqual('hello_from_error', task1.published['hi']) self.assertIn( 'Fail action expected exception', task1.published['err'] ) self.assertEqual('hello_from_error', wf_output['out']) self.assertIn( 'Fail action expected exception', wf_output['result'] ) def test_publish_with_all(self): wf_text = """--- version: '2.0' wf: tasks: main-task: publish: res_x1: 111 on-complete: next: complete-task publish: branch: res_x3: 222 on-success: next: success-task publish: branch: res_x2: 222 success-task: action: std.noop publish: success_x2: <% $.res_x2 %> success_x1: <% $.res_x1 %> complete-task: action: std.noop publish: complete_x2: <% $.res_x3 %> complete_x1: <% $.res_x1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions main_task = self._assert_single_item(tasks, name='main-task') main_task_published_vars = main_task.get("published") expected_main_variables = {'res_x3', 'res_x2', 'res_x1'} self.assertEqual(set(main_task_published_vars.keys()), expected_main_variables) complete_task = self._assert_single_item(tasks, name='complete-task') complete_task_published_vars = complete_task.get("published") expected_complete_variables = {'complete_x2', 'complete_x1'} self.assertEqual(set(complete_task_published_vars.keys()), expected_complete_variables) success_task = self._assert_single_item(tasks, name='success-task') success_task_published_vars = success_task.get("published") expected_success_variables = {'success_x2', 'success_x1'} self.assertEqual(set(success_task_published_vars.keys()), expected_success_variables) all_expected_published_variables = expected_main_variables.union( expected_success_variables, expected_complete_variables ) self.assertEqual(set(wf_output), all_expected_published_variables) def test_publish_no_success(self): wf_text = """--- version: '2.0' wf: tasks: main-task: publish: res_x1: 111 on-complete: next: complete-task publish: branch: res_x3: 222 complete-task: action: std.noop publish: complete_x2: <% $.res_x3 %> complete_x1: <% $.res_x1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions main_task = self._assert_single_item(tasks, name='main-task') main_task_published_vars = main_task.get("published") expected_main_variables = {'res_x3', 'res_x1'} self.assertEqual(set(main_task_published_vars.keys()), expected_main_variables) complete_task = self._assert_single_item(tasks, name='complete-task') complete_task_published_vars = complete_task.get("published") expected_complete_variables = {'complete_x2', 'complete_x1'} self.assertEqual(set(complete_task_published_vars.keys()), expected_complete_variables) all_expected_published_variables = expected_main_variables.union( expected_complete_variables) self.assertEqual(set(wf_output), all_expected_published_variables) def test_publish_no_complete(self): wf_text = """--- version: '2.0' wf: tasks: main-task: publish: res_x1: 111 on-success: next: success-task publish: branch: res_x2: 222 success-task: action: std.noop publish: success_x2: <% $.res_x2 %> success_x1: <% $.res_x1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions wf_output = wf_ex.output main_task = self._assert_single_item(tasks, name='main-task') main_task_published_vars = main_task.get("published") expected_main_variables = {'res_x2', 'res_x1'} self.assertEqual(set(main_task_published_vars.keys()), expected_main_variables) success_task = self._assert_single_item(tasks, name='success-task') success_task_published_vars = success_task.get("published") expected_success_variables = {'success_x2', 'success_x1'} self.assertEqual(set(success_task_published_vars.keys()), expected_success_variables) all_expected_published_variables = expected_main_variables.union( expected_success_variables) self.assertEqual(set(wf_output), all_expected_published_variables) def test_publish_no_regular_publish(self): wf_text = """--- version: '2.0' wf2: tasks: main-task: on-success: next: success-task publish: branch: res_x2: 222 success-task: action: std.noop publish: success_x2: <% $.res_x2 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf2') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions wf_output = wf_ex.output main_task = self._assert_single_item(tasks, name='main-task') main_task_published_vars = main_task.get("published") expected_main_variables = {'res_x2'} self.assertEqual(set(main_task_published_vars.keys()), expected_main_variables) success_task = self._assert_single_item(tasks, name='success-task') success_task_published_vars = success_task.get("published") expected_success_variables = {'success_x2'} self.assertEqual(set(success_task_published_vars.keys()), expected_success_variables) all_expected_published_variables = expected_main_variables.union( expected_success_variables) self.assertEqual(set(wf_output), all_expected_published_variables) def test_output_on_error_wb_yaql_failed(self): wb_text = """--- version: '2.0' name: wb workflows: wf1: type: direct output-on-error: message: <% $.message %> tasks: task1: workflow: wf2 publish-on-error: message: <% task(task1).result.message %> wf2: type: direct output-on-error: message: <% $.not_existing_variable %> tasks: task1: action: std.fail publish-on-error: message: <% task(task1).result %> """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Failed to evaluate expression in output-on-error!', wf_ex.state_info) self.assertIn('$.message', wf_ex.state_info) task1 = self._assert_single_item(tasks, name='task1') self.assertIn('task(task1).result.message', task1.state_info) def test_size_of_output_by_execution_field_size_limit_kb(self): wf_text = """ version: '2.0' wf: type: direct output-on-error: custom_error: The action in the task does not exists tasks: task1: action: wrong.task """ # Note: The number 1121 below added as value for field size # limit is because the output of workflow error comes as # workflow error string + custom error message and total length # might be greater than 1121. It varies depending on the length # of the custom message. This is a random number value used for # test case only. cfg.CONF.set_default( 'execution_field_size_limit_kb', 1121, group='engine' ) kilobytes = cfg.CONF.engine.execution_field_size_limit_kb bytes_per_char = sys.getsizeof('s') - sys.getsizeof('') total_output_length = int(kilobytes * 1024 / bytes_per_char) wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', '', None) self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output self.assertLess( len(str(wf_output.get('custom_error'))), total_output_length ) def test_override_json_input(self): wf_text = """--- version: 2.0 wf: input: - a: aa: aa bb: bb tasks: task1: action: std.noop publish: published_a: <% $.a %> """ wf_service.create_workflows(wf_text) wf_input = { 'a': { 'cc': 'cc', 'dd': 'dd' } } # Start workflow. wf_ex = self.engine.start_workflow('wf', wf_input=wf_input) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task1 = wf_ex.task_executions[0] self.assertDictEqual(wf_input['a'], task1.published['published_a']) def test_branch_publishing_success(self): wf_text = """--- version: 2.0 wf: tasks: task1: action: std.noop on-success: publish: branch: my_var: my branch value next: task2 task2: action: std.echo output=<% $.my_var %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') self._assert_single_item(tasks, name='task2') self.assertDictEqual({"my_var": "my branch value"}, task1.published) def test_global_publishing_success_access_via_root_context_(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="Hi" on-success: publish: global: my_var: <% task().result %> next: - task2 task2: action: std.echo output=<% $.my_var %> publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertDictEqual({'result': 'Hi'}, task2.published) def test_global_publishing_error_access_via_root_context(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.fail on-success: publish: global: my_var: "We got success" next: - task2 on-error: publish: global: my_var: "We got an error" next: - task2 task2: action: std.echo output=<% $.my_var %> publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertDictEqual({'result': 'We got an error'}, task2.published) def test_global_publishing_success_access_via_function(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: publish: branch: my_var: Branch local value global: my_var: Global value next: - task2 task2: action: std.noop publish: local: <% $.my_var %> global: <% global(my_var) %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertDictEqual( { 'local': 'Branch local value', 'global': 'Global value' }, task2.published ) def test_global_publishing_error_access_via_function(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.fail on-error: publish: branch: my_var: Branch local value global: my_var: Global value next: - task2 task2: action: std.noop publish: local: <% $.my_var %> global: <% global(my_var) %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertDictEqual( { 'local': 'Branch local value', 'global': 'Global value' }, task2.published ) def test_get_published_global(self): wf_text = """--- version: '2.0' wf: vars: var1: 1 var2: 2 tasks: task1: action: std.noop on-success: publish: global: global_var1: Global value 1 global_var2: Global value 2 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) published_global = ( data_flow.get_workflow_execution_published_global(wf_ex) ) self.assertDictEqual( { 'global_var1': 'Global value 1', 'global_var2': 'Global value 2' }, published_global ) def test_linear_data_with_input_expressions(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo input: output: key1: value1 key2: value2 publish: res1: <% task(task1).result %> on-success: - task2 task2: action: std.echo output=<% $.res1.key2 %> publish: res2: <% task().result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item( tasks, name='task1', state=states.SUCCESS ) task2 = self._assert_single_item( tasks, name='task2', state=states.SUCCESS ) self.assertDictEqual( { 'key1': 'value1', 'key2': 'value2' }, task1.published['res1'] ) self.assertEqual('value2', task2.published['res2']) class DataFlowTest(test_base.BaseTest): def test_get_task_execution_result(self): task_ex = models.TaskExecution( name='task1', spec={ "version": '2.0', 'name': 'task1', 'with-items': 'var in [1]', 'type': 'direct', 'action': 'my_action' }, runtime_context={ 'with_items': {'count': 1} } ) task_ex.action_executions = [models.ActionExecution( name='my_action', output={'result': 1}, accepted=True, runtime_context={'index': 0} )] self.assertEqual([1], data_flow.get_task_execution_result(task_ex)) task_ex.action_executions.append(models.ActionExecution( name='my_action', output={'result': 1}, accepted=True, runtime_context={'index': 0} )) task_ex.action_executions.append(models.ActionExecution( name='my_action', output={'result': 1}, accepted=False, runtime_context={'index': 0} )) self.assertEqual( [1, 1], data_flow.get_task_execution_result(task_ex) ) def test_context_view(self): ctx = data_flow.ContextView( { 'k1': 'v1', 'k11': 'v11', 'k3': 'v3' }, { 'k2': 'v2', 'k21': 'v21', 'k3': 'v32' } ) self.assertIsInstance(ctx, dict) self.assertEqual(5, len(ctx)) self.assertIn('k1', ctx) self.assertIn('k11', ctx) self.assertIn('k3', ctx) self.assertIn('k2', ctx) self.assertIn('k21', ctx) self.assertEqual('v1', ctx['k1']) self.assertEqual('v1', ctx.get('k1')) self.assertEqual('v11', ctx['k11']) self.assertEqual('v11', ctx.get('k11')) self.assertEqual('v3', ctx['k3']) self.assertEqual('v2', ctx['k2']) self.assertEqual('v2', ctx.get('k2')) self.assertEqual('v21', ctx['k21']) self.assertEqual('v21', ctx.get('k21')) self.assertIsNone(ctx.get('Not existing key')) self.assertRaises(exc.MistralError, ctx.update) self.assertRaises(exc.MistralError, ctx.clear) self.assertRaises(exc.MistralError, ctx.pop, 'k1') self.assertRaises(exc.MistralError, ctx.popitem) self.assertRaises(exc.MistralError, ctx.__setitem__, 'k5', 'v5') self.assertRaises(exc.MistralError, ctx.__delitem__, 'k2') self.assertEqual('v1', expr.evaluate('<% $.k1 %>', ctx)) self.assertEqual('v2', expr.evaluate('<% $.k2 %>', ctx)) self.assertEqual('v3', expr.evaluate('<% $.k3 %>', ctx)) # Now change the order of dictionaries and make sure to have # a different for key 'k3'. ctx = data_flow.ContextView( { 'k2': 'v2', 'k21': 'v21', 'k3': 'v32' }, { 'k1': 'v1', 'k11': 'v11', 'k3': 'v3' } ) self.assertEqual('v32', expr.evaluate('<% $.k3 %>', ctx)) def test_context_view_eval_root_with_yaql(self): ctx = data_flow.ContextView( {'k1': 'v1'}, {'k2': 'v2'} ) res = expr.evaluate('<% $ %>', ctx) self.assertIsNotNone(res) self.assertIsInstance(res, dict) self.assertEqual(2, len(res)) def test_context_view_eval_keys(self): ctx = data_flow.ContextView( {'k1': 'v1'}, {'k2': 'v2'} ) res = expr.evaluate('<% $.keys() %>', ctx) self.assertIsNotNone(res) self.assertIsInstance(res, list) self.assertEqual(2, len(res)) self.assertIn('k1', res) self.assertIn('k2', res) def test_context_view_eval_values(self): ctx = data_flow.ContextView( {'k1': 'v1'}, {'k2': 'v2'} ) res = expr.evaluate('<% $.values() %>', ctx) self.assertIsNotNone(res) self.assertIsInstance(res, list) self.assertEqual(2, len(res)) self.assertIn('v1', res) self.assertIn('v2', res) def test_context_view_repr(self): ctx = data_flow.ContextView( {'k1': 'v1'}, {'k2': 'v2'}, {3: 3} ) str_repr = str(ctx) self.assertIsNotNone(str_repr) self.assertFalse(str_repr == "{}") self.assertEqual("{'k1': 'v1', 'k2': 'v2', 3: 3}", str_repr) ctx = data_flow.ContextView() self.assertEqual('{}', str(ctx)) def test_context_view_as_root_json(self): ctx = data_flow.ContextView( {'k1': 'v1'}, {'k2': 'v2'}, ) json_str = utils.to_json_str(ctx) self.assertIsNotNone(json_str) self.assertNotEqual('{}', json_str) # We can't use regular dict comparison because key order # is not defined. self.assertIn('"k1": "v1"', json_str) self.assertIn('"k2": "v2"', json_str) def test_context_view_as_nested_json(self): ctx = data_flow.ContextView( {'k1': 'v1'}, {'k2': 'v2'}, ) d = {'root': ctx} json_str = utils.to_json_str(d) self.assertIsNotNone(json_str) self.assertNotEqual('{"root": {}}', json_str) # We can't use regular dict comparison because key order # is not defined. self.assertIn('"k1": "v1"', json_str) self.assertIn('"k1": "v1"', json_str) self.assertIn('"root"', json_str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_default_engine.py0000644000175000017500000005075600000000000025553 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from oslo_messaging.rpc import client as rpc_client from oslo_utils import uuidutils from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral.engine import default_engine as d_eng from mistral import exceptions as exc from mistral.executors import base as exe from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit import base from mistral.tests.unit.engine import base as eng_test_base from mistral.workflow import states from mistral_lib import actions as ml_actions # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK = """ --- version: '2.0' name: wb workflows: wf: type: reverse input: - param1: value1 - param2 tasks: task1: action: std.echo output=<% $.param1 %> publish: var: <% task(task1).result %> task2: action: std.echo output=<% $.param2 %> requires: [task1] """ DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f' ENVIRONMENT = { 'id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'my test settings', 'variables': { 'key1': 'abc', 'key2': 123 }, 'scope': 'private', 'created_at': str(datetime.datetime.utcnow()), 'updated_at': str(datetime.datetime.utcnow()) } ENVIRONMENT_DB = models.Environment( id=ENVIRONMENT['id'], name=ENVIRONMENT['name'], description=ENVIRONMENT['description'], variables=ENVIRONMENT['variables'], scope=ENVIRONMENT['scope'], created_at=datetime.datetime.strptime(ENVIRONMENT['created_at'], DATETIME_FORMAT), updated_at=datetime.datetime.strptime(ENVIRONMENT['updated_at'], DATETIME_FORMAT) ) MOCK_ENVIRONMENT = mock.MagicMock(return_value=ENVIRONMENT_DB) MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError()) @mock.patch.object(exe, 'get_executor', mock.Mock()) class DefaultEngineTest(base.DbTestCase): def setUp(self): super(DefaultEngineTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK) # Note: For purposes of this test we can easily use # simple magic mocks for engine and executor clients self.engine = d_eng.DefaultEngine() def test_start_workflow(self): wf_input = {'param1': 'Hey', 'param2': 'Hi'} # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input=wf_input, description='my execution', task_name='task2' ) self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual('my execution', wf_ex.description) self.assertIn('__execution', wf_ex.context) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_ex = task_execs[0] self.assertEqual('wb.wf', task_ex.workflow_name) self.assertEqual('task1', task_ex.name) self.assertEqual(states.RUNNING, task_ex.state) self.assertIsNotNone(task_ex.spec) self.assertDictEqual({}, task_ex.runtime_context) # Data Flow properties. action_execs = db_api.get_action_executions( task_execution_id=task_ex.id ) self.assertEqual(1, len(action_execs)) task_action_ex = action_execs[0] self.assertIsNotNone(task_action_ex) self.assertDictEqual({'output': 'Hey'}, task_action_ex.input) def test_start_workflow_with_ex_id(self): wf_input = {'param1': 'Hey1', 'param2': 'Hi1'} the_ex_id = 'theId' # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input=wf_input, description='my execution', task_name='task2', wf_ex_id=the_ex_id ) self.assertEqual(the_ex_id, wf_ex.id) wf_ex_2 = self.engine.start_workflow( 'wb.wf', wf_input={'param1': 'Hey2', 'param2': 'Hi2'}, wf_ex_id=the_ex_id ) self.assertDictEqual(dict(wf_ex), dict(wf_ex_2)) wf_executions = db_api.get_workflow_executions() self.assertEqual(1, len(wf_executions)) def test_start_workflow_with_input_default(self): wf_input = {'param2': 'value2'} # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input=wf_input, task_name='task1' ) self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIn('__execution', wf_ex.context) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_ex = task_execs[0] self.assertEqual('wb.wf', task_ex.workflow_name) self.assertEqual('task1', task_ex.name) self.assertEqual(states.RUNNING, task_ex.state) self.assertIsNotNone(task_ex.spec) self.assertDictEqual({}, task_ex.runtime_context) # Data Flow properties. action_execs = db_api.get_action_executions( task_execution_id=task_ex.id ) self.assertEqual(1, len(action_execs)) task_action_ex = action_execs[0] self.assertIsNotNone(task_action_ex) self.assertDictEqual({'output': 'value1'}, task_action_ex.input) def test_start_workflow_with_adhoc_env(self): wf_input = { 'param1': '<% env().key1 %>', 'param2': '<% env().key2 %>' } env = ENVIRONMENT['variables'] # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input=wf_input, env=env, task_name='task2') self.assertIsNotNone(wf_ex) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual(wf_ex.params.get('env', {}), env) @mock.patch.object(db_api, "load_environment", MOCK_ENVIRONMENT) def test_start_workflow_with_saved_env(self): wf_input = { 'param1': '<% env().key1 %>', 'param2': '<% env().key2 %>' } env = ENVIRONMENT['variables'] # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input=wf_input, env='test', task_name='task2' ) self.assertIsNotNone(wf_ex) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual(wf_ex.params.get('env', {}), env) @mock.patch.object(db_api, "get_environment", MOCK_NOT_FOUND) def test_start_workflow_env_not_found(self): e = self.assertRaises( exc.InputException, self.engine.start_workflow, 'wb.wf', wf_input={ 'param1': '<% env().key1 %>', 'param2': 'some value' }, env='foo', task_name='task2' ) self.assertEqual("Environment is not found: foo", str(e)) def test_start_workflow_with_env_type_error(self): e = self.assertRaises( exc.InputException, self.engine.start_workflow, 'wb.wf', wf_input={ 'param1': '<% env().key1 %>', 'param2': 'some value' }, env=True, task_name='task2' ) self.assertIn('Unexpected value type for environment', str(e)) def test_start_workflow_missing_parameters(self): e = self.assertRaises( exc.InputException, self.engine.start_workflow, 'wb.wf', '', None, task_name='task2' ) self.assertIn("Invalid input", str(e)) self.assertIn("missing=['param2']", str(e)) def test_start_workflow_unexpected_parameters(self): e = self.assertRaises( exc.InputException, self.engine.start_workflow, 'wb.wf', wf_input={ 'param1': 'Hey', 'param2': 'Hi', 'unexpected_param': 'val' }, task_name='task2' ) self.assertIn("Invalid input", str(e)) self.assertIn("unexpected=['unexpected_param']", str(e)) def test_on_action_update(self): workflow = """ version: '2.0' wf_async: type: direct tasks: task1: action: std.async_noop on-success: - task2 task2: action: std.noop """ # Start workflow. wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf_async') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id ) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.RUNNING, task1_action_ex.state) # Pause action execution of 'task1'. task1_action_ex = self.engine.on_action_update( task1_action_ex.id, states.PAUSED ) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual(states.PAUSED, task1_action_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self.assertEqual(states.PAUSED, task_execs[0].state) self.assertEqual(states.PAUSED, wf_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id ) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.PAUSED, task1_action_ex.state) def test_on_action_update_non_async(self): workflow = """ version: '2.0' wf_sync: type: direct tasks: task1: action: std.noop on-success: - task2 task2: action: std.noop """ # Start workflow. wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf_sync') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id ) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.RUNNING, task1_action_ex.state) self.assertRaises( exc.InvalidStateTransitionException, self.engine.on_action_update, task1_action_ex.id, states.PAUSED ) def test_on_action_complete(self): wf_input = {'param1': 'Hey', 'param2': 'Hi'} # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input=wf_input, task_name='task2' ) self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) self.assertIsNotNone(task1_ex.spec) self.assertDictEqual({}, task1_ex.runtime_context) self.assertNotIn('__execution', task1_ex.in_context) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id ) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertIsNotNone(task1_action_ex) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) # Finish action of 'task1'. task1_action_ex = self.engine.on_action_complete( task1_action_ex.id, ml_actions.Result(data='Hey') ) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual('std.echo', task1_action_ex.name) self.assertEqual(states.SUCCESS, task1_action_ex.state) # Data Flow properties. task1_ex = db_api.get_task_execution(task1_ex.id) # Re-read the state. self.assertDictEqual({'var': 'Hey'}, task1_ex.published) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) self.assertDictEqual({'result': 'Hey'}, task1_action_ex.output) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) task2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.RUNNING, task2_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task2_ex.id ) self.assertEqual(1, len(action_execs)) task2_action_ex = action_execs[0] self.assertIsNotNone(task2_action_ex) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) # Finish 'task2'. task2_action_ex = self.engine.on_action_complete( task2_action_ex.id, ml_actions.Result(data='Hi') ) self._await( lambda: db_api.get_workflow_execution(wf_ex.id).state == states.SUCCESS ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsInstance(task2_action_ex, models.ActionExecution) self.assertEqual('std.echo', task2_action_ex.name) self.assertEqual(states.SUCCESS, task2_action_ex.state) # Data Flow properties. self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({}, task2_ex.published) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({'result': 'Hi'}, task2_action_ex.output) self.assertEqual(2, len(task_execs)) self._assert_single_item(task_execs, name='task1') self._assert_single_item(task_execs, name='task2') def test_stop_workflow_fail(self): # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input={ 'param1': 'Hey', 'param2': 'Hi' }, task_name="task2" ) # Re-read execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.engine.stop_workflow(wf_ex.id, 'ERROR', "Stop this!") # Re-read from DB again wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual('ERROR', wf_ex.state) self.assertEqual("Stop this!", wf_ex.state_info) def test_stop_workflow_succeed(self): # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input={ 'param1': 'Hey', 'param2': 'Hi' }, task_name="task2" ) # Re-read execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.engine.stop_workflow(wf_ex.id, 'SUCCESS', "Like this, done") # Re-read from DB again wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual('SUCCESS', wf_ex.state) self.assertEqual("Like this, done", wf_ex.state_info) def test_stop_workflow_bad_status(self): wf_ex = self.engine.start_workflow( 'wb.wf', wf_input={ 'param1': 'Hey', 'param2': 'Hi' }, task_name="task2" ) # Re-read execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertNotEqual( 'PAUSE', self.engine.stop_workflow(wf_ex.id, 'PAUSE') ) def test_resume_workflow(self): # TODO(akhmerov): Implement. pass def test_report_running_actions(self): wf_input = {'param1': 'Hey', 'param2': 'Hi'} # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', '', wf_input=wf_input, description='my execution', task_name='task2' ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_ex = task_execs[0] action_execs = db_api.get_action_executions( task_execution_id=task_ex.id ) task_action_ex = action_execs[0] self.engine.process_action_heartbeats([]) self.engine.process_action_heartbeats([None, None]) self.engine.process_action_heartbeats([None, task_action_ex.id]) task_action_ex = db_api.get_action_execution(task_action_ex.id) self.assertIsNotNone(task_action_ex.last_heartbeat) class DefaultEngineWithTransportTest(eng_test_base.EngineTestCase): def test_engine_client_remote_error(self): mocked = mock.Mock() mocked.sync_call.side_effect = rpc_client.RemoteError( 'InputException', 'Input is wrong' ) self.engine_client._client = mocked self.assertRaises( exc.InputException, self.engine_client.start_workflow, 'some_wf', {}, 'some_description' ) def test_engine_client_remote_error_arbitrary(self): mocked = mock.Mock() mocked.sync_call.side_effect = KeyError('wrong key') self.engine_client._client = mocked exception = self.assertRaises( exc.MistralException, self.engine_client.start_workflow, 'some_wf', {}, 'some_description' ) self.assertIn('KeyError: wrong key', str(exception)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_direct_workflow.py0000644000175000017500000007701300000000000026001 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.scheduler import base as sched_base from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class DirectWorkflowEngineTest(base.EngineTestCase): def _run_workflow(self, wf_text, expected_state=states.ERROR): wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_state(wf_ex.id, expected_state) return db_api.get_workflow_execution(wf_ex.id) def test_on_closures(self): wf_text = """ version: '2.0' wf: # type: direct - 'direct' is default tasks: task1: description: | Explicit 'succeed' command should lead to workflow success. action: std.echo output="Echo" on-success: - task2 - succeed on-complete: - task3 - task4 - fail - never_gets_here task2: action: std.noop task3: action: std.noop task4: action: std.noop never_gets_here: action: std.noop """ wf_ex = self._run_workflow(wf_text, expected_state=states.SUCCESS) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(2, len(tasks)) self.await_task_success(task1.id) self.await_task_success(task2.id) self.assertEqual(wf_ex.state, states.SUCCESS) def test_condition_transition_not_triggering(self): wf_text = """--- version: '2.0' wf: input: - var: null tasks: task1: action: std.fail on-success: - task2 on-error: - task3: <% $.var != null %> task2: action: std.noop task3: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(1, len(tasks)) self.await_task_error(task1.id) self.assertEqual(wf_ex.state, states.ERROR) def test_change_state_after_success(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="Echo" on-success: - task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) self.assertEqual( states.SUCCESS, self.engine.resume_workflow(wf_ex.id).state ) self.assertRaises( exc.WorkflowException, self.engine.pause_workflow, wf_ex.id ) self.assertEqual( states.SUCCESS, self.engine.stop_workflow(wf_ex.id, states.ERROR).state ) def test_task_not_updated(self): wf_text = """ version: 2.0 wf: tasks: task1: action: std.echo input: output: <% task().result.content %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) self.assertEqual( states.SUCCESS, self.engine.resume_workflow(wf_ex.id).state ) self.assertRaises( exc.WorkflowException, self.engine.pause_workflow, wf_ex.id ) self.assertEqual( states.SUCCESS, self.engine.stop_workflow(wf_ex.id, states.ERROR).state ) def test_wrong_task_input(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: description: Wrong task output should lead to workflow failure action: std.echo wrong_input="Hahaha" """ wf_ex = self._run_workflow(wf_text) self.assertIn('Invalid input', wf_ex.state_info) self.assertEqual(wf_ex.state, states.ERROR) def test_wrong_first_task_input(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo wrong_input="Ha-ha" """ wf_ex = self._run_workflow(wf_text) self.assertIn("Invalid input", wf_ex.state_info) self.assertEqual(states.ERROR, wf_ex.state) def test_wrong_action(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: action.doesnt_exist """ wf_ex = self._run_workflow(wf_text) # TODO(dzimine): Catch tasks caused error, and set them to ERROR: # TODO(dzimine): self.assertTrue(task_ex.state, states.ERROR) self.assertEqual(wf_ex.state, states.ERROR) self.assertIn("Failed to find action", wf_ex.state_info) def test_wrong_action_first_task(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: wrong.task """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.assertIn( "Failed to find action [action_name=wrong.task]", wf_ex.state_info ) self.assertEqual(states.ERROR, wf_ex.state) def test_next_task_with_input_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output=<% wrong(yaql) %> """ # Invoke workflow and assert workflow is in ERROR. wf_ex = self._run_workflow(wf_text) self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) # 'task1' should be in SUCCESS. task_1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) # 'task1' should have exactly one action execution (in SUCCESS). task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # 'task2' should exist but in ERROR. task_2_ex = self._assert_single_item( task_execs, name='task2', state=states.ERROR ) # 'task2' must not have action executions. self.assertEqual( 0, len(db_api.get_action_executions(task_execution_id=task_2_ex.id)) ) def test_async_next_task_with_input_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.async_noop on-complete: - task2 task2: action: std.echo output=<% wrong(yaql) %> """ # Invoke workflow and assert workflow, task, # and async action execution are RUNNING. wf_ex = self._run_workflow(wf_text, states.RUNNING) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.RUNNING, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Update async action execution result. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data='foobar') ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) self.assertEqual(2, len(task_execs)) # 'task1' must be in SUCCESS. task_1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) # 'task1' must have exactly one action execution (in SUCCESS). task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # 'task2' must be in ERROR. task_2_ex = self._assert_single_item( task_execs, name='task2', state=states.ERROR ) # 'task2' must not have action executions. self.assertEqual( 0, len(db_api.get_action_executions(task_execution_id=task_2_ex.id)) ) def test_join_all_task_with_input_jinja_error(self): wf_def = """--- version: '2.0' wf: tasks: task_1_1: action: std.sleep seconds=1 on-success: - task_2 task_1_2: on-success: - task_2 task_2: action: std.echo join: all input: output: | !! {{ _.nonexistent_variable }} !!""" wf_service.create_workflows(wf_def) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(3, len(tasks)) task_1_1 = self._assert_single_item( tasks, name="task_1_1", state=states.SUCCESS ) task_1_2 = self._assert_single_item( tasks, name="task_1_2", state=states.SUCCESS ) task_2 = self._assert_single_item( tasks, name="task_2", state=states.ERROR ) with db_api.transaction(): task_1_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_1.id) task_1_2_action_exs = db_api.get_action_executions( task_execution_id=task_1_2.id) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2.id) self.assertEqual(1, len(task_1_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_1_action_exs[0].state) self.assertEqual(1, len(task_1_2_action_exs)) self.assertEqual(states.SUCCESS, task_1_2_action_exs[0].state) self.assertEqual(0, len(task_2_action_exs)) def test_second_task_with_input_jinja_error(self): wf_def = """--- version: '2.0' wf: tasks: first: on-success: - second second: action: std.echo input: output: | !! {{ _.nonexistent_variable }} !!""" wf_service.create_workflows(wf_def) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(2, len(tasks)) task_first = self._assert_single_item( tasks, name="first", state=states.SUCCESS ) task_second = self._assert_single_item( tasks, name="second", state=states.ERROR ) with db_api.transaction(): first_tasks_action_exs = db_api.get_action_executions( task_execution_id=task_first.id) second_tasks_action_exs = db_api.get_action_executions( task_execution_id=task_second.id) self.assertEqual(1, len(first_tasks_action_exs)) self.assertEqual(states.SUCCESS, first_tasks_action_exs[0].state) self.assertEqual(0, len(second_tasks_action_exs)) def test_messed_yaql_in_first_task(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output=<% wrong(yaql) %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.assertIn( "Can not evaluate YAQL expression [expression=wrong(yaql)", wf_ex.state_info ) self.assertEqual(states.ERROR, wf_ex.state) def test_mismatched_yaql_in_first_task(self): wf_text = """ version: '2.0' wf: input: - var tasks: task1: action: std.echo output=<% $.var + $.var2 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', wf_input={'var': 2}) self.assertIn("Can not evaluate YAQL expression", wf_ex.state_info) self.assertEqual(states.ERROR, wf_ex.state) def test_one_line_syntax_in_on_clauses(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output=1 on-success: task2 task2: action: std.echo output=1 on-complete: task3 task3: action: std.fail on-error: task4 task4: action: std.echo output=4 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) def test_task_on_clause_has_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.noop on-success: - task2: <% wrong(yaql) %> task2: action: std.noop """ # Invoke workflow and assert workflow is in ERROR. wf_ex = self._run_workflow(wf_text) self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions # Assert that there is only one task execution and it's SUCCESS. self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item( task_execs, name='task1' ) self.assertEqual(states.ERROR, task_1_ex.state) # Assert that there is only one action execution and it's SUCCESS. task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) def test_async_task_on_clause_has_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.async_noop on-complete: - task2: <% wrong(yaql) %> task2: action: std.noop """ # Invoke workflow and assert workflow, task, # and async action execution are RUNNING. wf_ex = self._run_workflow(wf_text, states.RUNNING) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.RUNNING, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Update async action execution result. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data='foobar') ) # Assert that task1 is SUCCESS and workflow is ERROR. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.ERROR, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) def test_inconsistent_task_names(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop on-success: task3 task2: action: std.noop """ exception = self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, wf_text ) self.assertIn("Task 'task3' not found", str(exception)) def test_delete_workflow_integrity_check_on_stop(self): wf_text = """--- version: '2.0' wf: tasks: async_task: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.engine.stop_workflow(wf_ex.id, state=states.CANCELLED) sched = sched_base.get_system_scheduler() self._await(lambda: not sched.has_scheduled_jobs()) def test_delete_workflow_integrity_check_on_execution_delete(self): wf_text = """--- version: '2.0' wf: tasks: async_task: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') db_api.delete_workflow_execution(wf_ex.id) sched = sched_base.get_system_scheduler() self._await(lambda: not sched.has_scheduled_jobs()) def test_output(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="Hi Mistral!" on-success: task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({}, wf_ex.output) def test_output_expression(self): wf_text = """--- version: '2.0' wf: output: continue_flag: <% $.continue_flag %> task-defaults: on-error: - task2 tasks: task1: action: std.fail on-success: task3 task2: action: std.noop publish: continue_flag: false task3: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) self.assertDictEqual({'continue_flag': False}, wf_ex.output) def test_triggered_by(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail on-error: task3 task3: action: std.fail on-error: noop on-success: task4 on-complete: task4 task4: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1 = self._assert_single_item(task_execs, name='task1') task2 = self._assert_single_item(task_execs, name='task2') task3 = self._assert_single_item(task_execs, name='task3') task4 = self._assert_single_item(task_execs, name='task4') key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertListEqual( [ { "task_id": task1.id, "event": "on-success" } ], task2.runtime_context.get(key) ) self.assertListEqual( [ { "task_id": task2.id, "event": "on-error" } ], task3.runtime_context.get(key) ) self.assertListEqual( [ { "task_id": task3.id, "event": "on-complete" } ], task4.runtime_context.get(key) ) def test_task_in_context_immutability(self): wf_text = """--- version: '2.0' wf: description: | The idea of this workflow is to have two parallel branches and publish different data in these branches. When the workflow completed we need to check that during internal manipulations with workflow contexts belonging to different branches the inbound contexts of all tasks keep their initial values. tasks: # Start task. task0: publish: var0: val0 on-success: - task1_1 - task2_1 task1_1: publish: var1: val1 on-success: task1_2 # The last task in the 1st branch. task1_2: action: std.noop task2_1: publish: var2: val2 on-success: task2_2 # The last task in the 2nd branch. task2_2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks_execs = wf_ex.task_executions task0_ex = self._assert_single_item(tasks_execs, name='task0') task1_1_ex = self._assert_single_item(tasks_execs, name='task1_1') task1_2_ex = self._assert_single_item(tasks_execs, name='task1_2') task2_1_ex = self._assert_single_item(tasks_execs, name='task2_1') task2_2_ex = self._assert_single_item(tasks_execs, name='task2_2') self.assertDictEqual({}, task0_ex.in_context) self.assertDictEqual({'var0': 'val0'}, task1_1_ex.in_context) self.assertDictEqual( { 'var0': 'val0', 'var1': 'val1' }, task1_2_ex.in_context ) self.assertDictEqual({'var0': 'val0'}, task2_1_ex.in_context) self.assertDictEqual( { 'var0': 'val0', 'var2': 'val2' }, task2_2_ex.in_context ) def test_big_on_closures(self): # The idea of the test is to run a workflow with a big 'on-success' # list of tasks and big task inbound context ('task_ex.in_context) # and observe how it influences memory consumption and performance. # The test doesn't have any assertions related to memory(CPU) usage # because it's quite difficult to do them. Particular metrics may # vary from run to run and also depend on the platform. sub_wf_text = """ version: '2.0' sub_wf: tasks: task1: action: std.noop """ wf_text = """ version: '2.0' wf: tasks: task01: action: std.noop on-success: task02 task02: action: std.test_dict size=1000 key_prefix='key' val='val' publish: continue_flag: true data: <% task().result %> on-success: task0 task0: workflow: sub_wf on-success: {{{__ON_SUCCESS_LIST__}}} {{{__TASK_LIST__}}} """ # Generate the workflow text. task_cnt = 50 on_success_list_str = '' for i in range(1, task_cnt + 1): on_success_list_str += ( '\n - task{}: ' '<% $.continue_flag = true %>'.format(i) ) wf_text = wf_text.replace( '{{{__ON_SUCCESS_LIST__}}}', on_success_list_str ) task_list_str = '' task_template = """ task{}: action: std.noop """ for i in range(1, task_cnt + 1): task_list_str += task_template.format(i) wf_text = wf_text.replace('{{{__TASK_LIST__}}}', task_list_str) wf_service.create_workflows(sub_wf_text) wf_service.create_workflows(wf_text) # Start the workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id, timeout=60) self.assertEqual(2, spec_parser.get_wf_execution_spec_cache_size()) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(task_cnt + 3, len(task_execs)) self._assert_single_item(task_execs, name='task0') self._assert_single_item(task_execs, name='task{}'.format(task_cnt)) def test_action_error_with_array_result(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.fail error_data=[1,2,3] """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item( wf_ex.task_executions, name='task1', state=states.ERROR ) a_ex = self._assert_single_item( task_ex.action_executions, name='std.fail' ) self.assertIsInstance(a_ex.output.get('result'), list) # NOTE(rakhmerov): This was previously failing but only Python 2.7 # probably because SQLAlchemy works differently on different # versions of Python. On Python 3 this field's value was always # converted into a string no matter what we tried to assign. But # that didn't happen on Python 2.7 which caused an SQL exception. self.assertIsInstance(task_ex.state_info, six.string_types) def test_single_fail_with_next_noop(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.fail on-error: - noop """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) def test_unexisting_join_task_does_not_stuck_wf_running(self): wf_text = """--- version: '2.0' wf: tasks: branch1: action: std.noop on-success: branch1-23_merge branch2: action: std.async_noop on-success: branch2-3_merge branch3: action: std.fail on-success: branch2-3_merge branch2-3_merge: action: std.noop on-success: branch1-23_merge join: all branch1-23_merge: action: std.noop join: all """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions t_ex = self._assert_single_item( task_execs, name='branch2' ) t_action_exs = db_api.get_action_executions( task_execution_id=t_ex.id ) self.engine.on_action_complete( t_action_exs[0].id, ml_actions.Result(error="Error!") ) self.await_workflow_error(wf_ex.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_direct_workflow_rerun.py0000644000175000017500000014603000000000000027210 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg import testtools from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') SIMPLE_WORKBOOK = """ --- version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.echo output="Task 1" on-success: - t2 t2: action: std.echo output="Task 2" on-success: - t3 t3: action: std.echo output="Task 3" """ SIMPLE_WORKBOOK_DIFF_ENV_VAR = """ --- version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t10: action: std.echo output="Task 10" on-success: - t21 - t30 t21: action: std.echo output=<% env().var1 %> on-success: - t22 t22: action: std.echo output="<% env().var2 %>" on-success: - t30 t30: join: all action: std.echo output="<% env().var3 %>" wait-before: 1 """ WITH_ITEMS_WORKBOOK = """ --- version: '2.0' name: wb3 workflows: wf1: type: direct tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.echo output="Task 1.<% $.i %>" publish: v1: <% task(t1).result %> on-success: - t2 t2: action: std.echo output="Task 2" """ WITH_ITEMS_WORKBOOK_DIFF_ENV_VAR = """ --- version: '2.0' name: wb3 workflows: wf1: type: direct tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.echo output="Task 1.<% $.i %> [<% env().var1 %>]" publish: v1: <% task(t1).result %> on-success: - t2 t2: action: std.echo output="Task 2" """ WITH_ITEMS_WORKBOOK_CONCURRENCY = """ --- version: '2.0' name: wb3 workflows: wf1: type: direct tasks: t1: with-items: i in <% list(range(0, 4)) %> action: std.echo output="Task 1.<% $.i %>" concurrency: 2 publish: v1: <% task(t1).result %> on-success: - t2 t2: action: std.echo output="Task 2" """ JOIN_WORKBOOK = """ --- version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t0: action: std.noop on-success: - t3 t1: action: std.echo output="Task 1" on-success: - t3 t2: action: std.echo output="Task 2" on-success: - t3 t3: action: std.echo output="Task 3" join: all """ SUBFLOW_WORKBOOK = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.echo output="Task 1" on-success: - t2 t2: workflow: wf2 on-success: - t3 t3: action: std.echo output="Task 3" wf2: type: direct output: result: <% task(wf2_t1).result %> tasks: wf2_t1: action: std.echo output="Task 2" """ class DirectWorkflowRerunTest(base.EngineTestCase): @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. exc.ActionException(), # Mock task2 exception for initial run. 'Task 2', # Mock task2 success for rerun. 'Task 3' # Mock task3 success. ] ) ) def test_rerun(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Resume workflow and re-run failed task. self.engine.rerun_workflow(task_2_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) # Wait for the workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(2, len(task_2_action_exs)) # Check there is exactly 1 action in Success and 1 in error state. # Order doesn't matter. self._assert_single_item(task_2_action_exs, state=states.SUCCESS) self._assert_single_item(task_2_action_exs, state=states.ERROR) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id ) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 10', # Mock task10 success for first run. exc.ActionException(), # Mock task21 exception for first run. 'Task 21', # Mock task21 success for rerun. 'Task 22', # Mock task22 success. 'Task 30' # Mock task30 success. ] ) ) def test_rerun_diff_env_vars(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK_DIFF_ENV_VAR) # Initial environment variables for the workflow execution. env = { 'var1': 'fee fi fo fum', 'var2': 'mirror mirror', 'var3': 'heigh-ho heigh-ho' } # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1', env=env) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(3, len(task_execs)) self.assertDictEqual(env, wf_ex.params['env']) task_10_ex = self._assert_single_item(task_execs, name='t10') task_21_ex = self._assert_single_item(task_execs, name='t21') task_30_ex = self._assert_single_item(task_execs, name='t30') self.assertEqual(states.SUCCESS, task_10_ex.state) self.assertEqual(states.ERROR, task_21_ex.state) self.assertIsNotNone(task_21_ex.state_info) self.assertEqual(states.ERROR, task_30_ex.state) # Update env in workflow execution with the following. updated_env = { 'var1': 'Task 21', 'var2': 'Task 22', 'var3': 'Task 30' } # Resume workflow and re-run failed task. wf_ex = self.engine.rerun_workflow(task_21_ex.id, env=updated_env) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertDictEqual(updated_env, wf_ex.params['env']) # Await t30 success. self.await_task_success(task_30_ex.id) # Wait for the workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(4, len(task_execs)) task_10_ex = self._assert_single_item(task_execs, name='t10') task_21_ex = self._assert_single_item(task_execs, name='t21') task_22_ex = self._assert_single_item(task_execs, name='t22') task_30_ex = self._assert_single_item(task_execs, name='t30') # Check action executions of task 10. self.assertEqual(states.SUCCESS, task_10_ex.state) task_10_action_exs = db_api.get_action_executions( task_execution_id=task_10_ex.id ) self.assertEqual(1, len(task_10_action_exs)) self.assertEqual(states.SUCCESS, task_10_action_exs[0].state) self.assertDictEqual( {'output': 'Task 10'}, task_10_action_exs[0].input ) # Check action executions of task 21. self.assertEqual(states.SUCCESS, task_21_ex.state) self.assertIsNone(task_21_ex.state_info) task_21_action_exs = db_api.get_action_executions( task_execution_id=task_21_ex.id ) self.assertEqual(2, len(task_21_action_exs)) # Check there is exactly 1 action in Success and 1 in error state. # Order doesn't matter. task_21_action_exs_1 = self._assert_single_item( task_21_action_exs, state=states.ERROR ) task_21_action_exs_2 = self._assert_single_item( task_21_action_exs, state=states.SUCCESS ) self.assertDictEqual( {'output': env['var1']}, task_21_action_exs_1.input ) self.assertDictEqual( {'output': updated_env['var1']}, task_21_action_exs_2.input ) # Check action executions of task 22. self.assertEqual(states.SUCCESS, task_22_ex.state) task_22_action_exs = db_api.get_action_executions( task_execution_id=task_22_ex.id ) self.assertEqual(1, len(task_22_action_exs)) self.assertEqual(states.SUCCESS, task_22_action_exs[0].state) self.assertDictEqual( {'output': updated_env['var2']}, task_22_action_exs[0].input ) # Check action executions of task 30. self.assertEqual(states.SUCCESS, task_30_ex.state) task_30_action_exs = db_api.get_action_executions( task_execution_id=task_30_ex.id ) self.assertEqual(1, len(task_30_action_exs)) self.assertEqual(states.SUCCESS, task_30_action_exs[0].state) self.assertDictEqual( {'output': updated_env['var3']}, task_30_action_exs[0].input ) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. exc.ActionException() # Mock task2 exception for initial run. ] ) ) def test_rerun_from_prev_step(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item( task_execs, name='t1', state=states.SUCCESS ) task_2_ex = self._assert_single_item( task_execs, name='t2', state=states.ERROR ) self.assertIsNotNone(task_2_ex.state_info) # Resume workflow and re-run failed task. e = self.assertRaises( exc.MistralError, self.engine.rerun_workflow, task_1_ex.id ) self.assertIn('not supported', str(e)) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.1', # Mock task1 success for initial run. exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.0', # Mock task1 success for rerun. 'Task 1.2', # Mock task1 success for rerun. 'Task 2' # Mock task2 success. ] ) ) def test_rerun_with_items(self): wb_service.create_workbook_v2(WITH_ITEMS_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb3.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') self.assertEqual(states.ERROR, task_1_ex.state) self.assertIsNotNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(3, len(task_1_action_exs)) # Resume workflow and re-run failed task. self.engine.rerun_workflow(task_1_ex.id, reset=False) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) # The single action execution that succeeded should not re-run. self.assertEqual(5, len(task_1_action_exs)) self.assertListEqual( ['Task 1.0', 'Task 1.1', 'Task 1.2'], task_1_ex.published.get('v1') ) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(1, len(task_2_action_exs)) @testtools.skip('Restore concurrency support.') @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.1', # Mock task1 success for initial run. exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.3', # Mock task1 success for initial run. 'Task 1.0', # Mock task1 success for rerun. 'Task 1.2', # Mock task1 success for rerun. 'Task 2' # Mock task2 success. ] ) ) def test_rerun_with_items_concurrency(self): wb_service.create_workbook_v2(WITH_ITEMS_WORKBOOK_CONCURRENCY) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb3.wf1') self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(1, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') self.assertEqual(states.ERROR, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(4, len(task_1_action_exs)) # Resume workflow and re-run failed task. self.engine.rerun_workflow(task_1_ex.id, reset=False) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.await_workflow_success(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) # The action executions that succeeded should not re-run. self.assertEqual(6, len(task_1_action_exs)) self.assertListEqual(['Task 1.0', 'Task 1.1', 'Task 1.2', 'Task 1.3'], task_1_ex.published.get('v1')) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(1, len(task_2_action_exs)) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.1', # Mock task1 success for initial run. exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.0', # Mock task1 success for rerun. 'Task 1.2', # Mock task1 success for rerun. 'Task 2' # Mock task2 success. ] ) ) def test_rerun_with_items_diff_env_vars(self): wb_service.create_workbook_v2(WITH_ITEMS_WORKBOOK_DIFF_ENV_VAR) # Initial environment variables for the workflow execution. env = {'var1': 'fee fi fo fum'} # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb3.wf1', env=env) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') self.assertEqual(states.ERROR, task_1_ex.state) self.assertIsNotNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(3, len(task_1_action_exs)) # Update env in workflow execution with the following. updated_env = {'var1': 'foobar'} # Resume workflow and re-run failed task. self.engine.rerun_workflow( task_1_ex.id, reset=False, env=updated_env ) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) expected_inputs = [ 'Task 1.0 [%s]' % env['var1'], # Task 1 item 0 (error). 'Task 1.1 [%s]' % env['var1'], # Task 1 item 1. 'Task 1.2 [%s]' % env['var1'], # Task 1 item 2 (error). 'Task 1.0 [%s]' % updated_env['var1'], # Task 1 item 0 (rerun). 'Task 1.2 [%s]' % updated_env['var1'] # Task 1 item 2 (rerun). ] # Assert that every expected input is in actual task input. for action_ex in task_1_action_exs: self.assertIn(action_ex.input['output'], expected_inputs) # Assert that there was same number of unique inputs as action execs. self.assertEqual( len(task_1_action_exs), len(set( [action_ex.input['output'] for action_ex in task_1_action_exs] )) ) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(1, len(task_2_action_exs)) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. 'Task 2', # Mock task2 success for initial run. exc.ActionException(), # Mock task3 exception for initial run. 'Task 3' # Mock task3 success for rerun. ] ) ) def test_rerun_on_join_task(self): wb_service.create_workbook_v2(JOIN_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') wf_ex = db_api.get_workflow_execution(wf_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(4, len(task_execs)) task_0_ex = self._assert_single_item(task_execs, name='t0') task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') self.assertEqual(states.SUCCESS, task_0_ex.state) self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertEqual(states.ERROR, task_3_ex.state) self.assertIsNotNone(task_3_ex.state_info) # Resume workflow and re-run failed task. self.engine.rerun_workflow(task_3_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) # Wait for the workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(4, len(task_execs)) task_0_ex = self._assert_single_item(task_execs, name='t0') task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') # Check action executions of task 0. self.assertEqual(states.SUCCESS, task_0_ex.state) task_0_action_exs = db_api.get_action_executions( task_execution_id=task_0_ex.id ) self.assertEqual(1, len(task_0_action_exs)) self.assertEqual(states.SUCCESS, task_0_action_exs[0].state) # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(1, len(task_2_action_exs)) self.assertEqual(states.SUCCESS, task_2_action_exs[0].state) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) self.assertIsNone(task_3_ex.state_info) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id ) self.assertEqual(2, len(task_3_action_exs)) # Check there is exactly 1 action in Success and 1 in error state. # Order doesn't matter. self._assert_single_item(task_3_action_exs, state=states.SUCCESS) self._assert_single_item(task_3_action_exs, state=states.ERROR) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ exc.ActionException(), # Mock task1 exception for initial run. exc.ActionException(), # Mock task2 exception for initial run. 'Task 1', # Mock task1 success for rerun. 'Task 2', # Mock task2 success for rerun. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_join_with_branch_errors(self): wb_service.create_workbook_v2(JOIN_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(4, len(task_execs)) task_0_ex = self._assert_single_item(task_execs, name='t0') self.assertEqual(states.SUCCESS, task_0_ex.state) task_1_ex = self._assert_single_item(task_execs, name='t1') self.assertEqual(states.ERROR, task_1_ex.state) self.assertIsNotNone(task_1_ex.state_info) task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) task_3_ex = self._assert_single_item(task_execs, name='t3') self.assertEqual(states.ERROR, task_3_ex.state) self.assertIsNotNone(task_3_ex.state_info) # Resume workflow and re-run failed task. wf_ex = self.engine.rerun_workflow(task_1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions # Wait for the task to succeed. task_1_ex = self._assert_single_item(task_execs, name='t1') self.await_task_success(task_1_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(4, len(task_execs)) task_0_ex = self._assert_single_item(task_execs, name='t0') task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') self.assertEqual(states.SUCCESS, task_0_ex.state) self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertEqual(states.ERROR, task_3_ex.state) # Check that join task did not start any action execution task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id ) self.assertEqual(0, len(task_3_action_exs)) # Resume workflow and re-run failed task. wf_ex = self.engine.rerun_workflow(task_2_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) # Join now should finally complete. self.await_task_success(task_3_ex.id) # Wait for the workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(4, len(task_execs)) task_0_ex = self._assert_single_item(task_execs, name='t0') task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') # Check action executions of task 0. self.assertEqual(states.SUCCESS, task_0_ex.state) self.assertIsNone(task_0_ex.state_info) task_0_action_exs = db_api.get_action_executions( task_execution_id=task_0_ex.id ) self.assertEqual(1, len(task_0_action_exs)) # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(2, len(task_1_action_exs)) # Check there is exactly 1 action in Success and 1 in error state. # Order doesn't matter. self._assert_single_item(task_1_action_exs, state=states.SUCCESS) self._assert_single_item(task_1_action_exs, state=states.ERROR) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(2, len(task_2_action_exs)) # Check there is exactly 1 action in Success and 1 in error state. # Order doesn't matter. self._assert_single_item(task_2_action_exs, state=states.SUCCESS) self._assert_single_item(task_2_action_exs, state=states.ERROR) # Check there is exactly 1 action execution of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id ) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ exc.ActionException(), # Mock task 1.0 error for run. 'Task 1.1', # Mock task 1.1 success for run. exc.ActionException(), # Mock task 1.2 error for run. exc.ActionException(), # Mock task 1.0 error for 1st rerun. exc.ActionException(), # Mock task 1.2 error for 1st rerun. exc.ActionException(), # Mock task 1.0 error for 2nd run. 'Task 1.1', # Mock task 1.1 success for 2nd run. exc.ActionException(), # Mock task 1.2 error for 2nd run. exc.ActionException(), # Mock task 1.0 error for 3rd rerun. exc.ActionException(), # Mock task 1.2 error for 3rd rerun. 'Task 1.0', # Mock task 1.0 success for 4th rerun. 'Task 1.2', # Mock task 1.2 success for 4th rerun. 'Task 2' # Mock task 2 success. ] ) ) def test_multiple_reruns_with_items(self): wb_service.create_workbook_v2(WITH_ITEMS_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb3.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') self.await_task_error(task_1_ex.id) self.assertIsNotNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(3, len(task_1_action_exs)) # Resume workflow and re-run failed task. Re-run #1 with no reset. wf_ex = self.engine.rerun_workflow(task_1_ex.id, reset=False) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(5, len(task_1_action_exs)) # Resume workflow and re-run failed task. Re-run #2 with reset. self.engine.rerun_workflow(task_1_ex.id, reset=True) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(8, len(task_1_action_exs)) # Resume workflow and re-run failed task. Re-run #3 with no reset. self.engine.rerun_workflow(task_1_ex.id, reset=False) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(10, len(task_1_action_exs)) # Resume workflow and re-run failed task. Re-run #4 with no reset. self.engine.rerun_workflow(task_1_ex.id, reset=False) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) # The single action execution that succeeded should not re-run. self.assertEqual(12, len(task_1_action_exs)) self.assertListEqual( ['Task 1.0', 'Task 1.1', 'Task 1.2'], task_1_ex.published.get('v1') ) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(1, len(task_2_action_exs)) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. exc.ActionException(), # Mock task2 exception for initial run. 'Task 2', # Mock task2 success for rerun. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_subflow(self): wb_service.create_workbook_v2(SUBFLOW_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Resume workflow and re-run failed task. self.engine.rerun_workflow(task_2_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) # Wait for the workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_workflow_executions( task_execution_id=task_2_ex.id ) self.assertEqual(2, len(task_2_action_exs)) # Check there is exactly 1 action in Success and 1 in error state. # Order doesn't matter. self._assert_single_item(task_2_action_exs, state=states.SUCCESS) self._assert_single_item(task_2_action_exs, state=states.ERROR) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id ) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. exc.ActionException(), # Mock task2 exception for initial run. 'Task 2', # Mock task2 success for rerun. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_subflow_task(self): wb_service.create_workbook_v2(SUBFLOW_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) with db_api.transaction(): # Get subworkflow and related task sub_wf_exs = db_api.get_workflow_executions( task_execution_id=task_2_ex.id ) sub_wf_ex = sub_wf_exs[0] sub_wf_task_execs = sub_wf_ex.task_executions self.assertEqual(states.ERROR, sub_wf_ex.state) self.assertIsNotNone(sub_wf_ex.state_info) self.assertEqual(1, len(sub_wf_task_execs)) sub_wf_task_ex = self._assert_single_item( sub_wf_task_execs, name='wf2_t1' ) self.assertEqual(states.ERROR, sub_wf_task_ex.state) self.assertIsNotNone(sub_wf_task_ex.state_info) # Resume workflow and re-run failed subworkflow task. self.engine.rerun_workflow(sub_wf_task_ex.id) sub_wf_ex = db_api.get_workflow_execution(sub_wf_ex.id) self.assertEqual(states.RUNNING, sub_wf_ex.state) self.assertIsNone(sub_wf_ex.state_info) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) # Wait for the subworkflow to succeed. self.await_workflow_success(sub_wf_ex.id) with db_api.transaction(): sub_wf_ex = db_api.get_workflow_execution(sub_wf_ex.id) sub_wf_task_execs = sub_wf_ex.task_executions self.assertEqual(states.SUCCESS, sub_wf_ex.state) self.assertIsNone(sub_wf_ex.state_info) self.assertEqual(1, len(sub_wf_task_execs)) sub_wf_task_ex = self._assert_single_item( sub_wf_task_execs, name='wf2_t1' ) # Check action executions of subworkflow task. self.assertEqual(states.SUCCESS, sub_wf_task_ex.state) self.assertIsNone(sub_wf_task_ex.state_info) sub_wf_task_ex_action_exs = db_api.get_action_executions( task_execution_id=sub_wf_task_ex.id ) self.assertEqual(2, len(sub_wf_task_ex_action_exs)) # Check there is exactly 1 action in Success and 1 in error state. # Order doesn't matter. self._assert_single_item( sub_wf_task_ex_action_exs, state=states.SUCCESS ) self._assert_single_item(sub_wf_task_ex_action_exs, state=states.ERROR) # Wait for the main workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_workflow_executions( task_execution_id=task_2_ex.id ) self.assertEqual(1, len(task_2_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state) def test_rerun_task_with_retry_policy(self): wf_service.create_workflows("""--- version: '2.0' wf_fail: tasks: task1: action: std.fail retry: delay: 0 count: 2""") wf_ex = self.engine.start_workflow("wf_fail") self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name="task1") action_executions = task_ex.executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(3, len(action_executions)) self.assertTrue(all(a.state == states.ERROR for a in action_executions)) self.engine.rerun_workflow(task_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name="task1") action_executions = task_ex.executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(6, len(action_executions)) self.assertTrue(all(a.state == states.ERROR for a in action_executions)) @mock.patch.object( std_actions.NoOpAction, 'run', mock.MagicMock( side_effect=[ exc.ActionException(), 'Success' ] ) ) def test_rerun_sub_workflow(self): wf_service.create_workflows("""--- version: '2.0' wf1: tasks: task1: workflow: wf2 wf2: tasks: task2: workflow: wf3 wf3: tasks: task3: action: std.noop""") # Run workflow and fail task. wf1_ex = self.engine.start_workflow('wf1') self.await_workflow_error(wf1_ex.id) with db_api.transaction(): wf_exs = db_api.get_workflow_executions() task_exs = db_api.get_task_executions() self.assertEqual(3, len(wf_exs), 'The number of workflow executions') self.assertEqual(3, len(task_exs), 'The number of task executions') for wf_ex in wf_exs: self.assertEqual(states.ERROR, wf_ex.state, 'The executions must fail the first time') for task_ex in task_exs: self.assertEqual(states.ERROR, task_ex.state, 'The tasks must fail the first time') wf3_ex = self._assert_single_item(wf_exs, name='wf3') task3_ex = self._assert_single_item(wf3_ex.task_executions, name="task3") self.engine.rerun_workflow(task3_ex.id) self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf_exs = db_api.get_workflow_executions() task_exs = db_api.get_task_executions() self.assertEqual(3, len(wf_exs), 'The number of workflow executions') self.assertEqual(3, len(task_exs), 'The number of task executions') for wf_ex in wf_exs: self.assertEqual(states.SUCCESS, wf_ex.state, 'The executions must success the second time') for task_ex in task_exs: self.assertEqual(states.SUCCESS, task_ex.state, 'The tasks must success the second time') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_direct_workflow_rerun_cancelled.py0000644000175000017500000005155400000000000031210 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class DirectWorkflowRerunCancelledTest(base.EngineTestCase): @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 2', # Mock task2 success. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_cancelled_task(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.echo output="Task 2" on-success: - t3 t3: action: std.echo output="Task 3" """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(1, len(wf1_t1_action_exs)) self.assertEqual(states.RUNNING, wf1_t1_action_exs[0].state) # Cancel action execution for task. self.engine.on_action_complete( wf1_t1_action_exs[0].id, ml_actions.Result(cancel=True) ) self.await_task_cancelled(wf1_t1_ex.id) self.await_workflow_cancelled(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') self.assertEqual(states.CANCELLED, wf1_ex.state) self.assertEqual("Cancelled tasks: t1", wf1_ex.state_info) self.assertEqual(1, len(wf1_task_execs)) self.assertEqual(states.CANCELLED, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) # Resume workflow and re-run cancelled task. self.engine.rerun_workflow(wf1_t1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions self.assertEqual(states.RUNNING, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) # Mark async action execution complete. wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(states.RUNNING, wf1_t1_ex.state) self.assertEqual(2, len(wf1_t1_action_exs)) # Check there is exactly 1 action in Running and 1 in Cancelled state. # Order doesn't matter. wf1_t1_aex_running = self._assert_single_item( wf1_t1_action_exs, state=states.RUNNING ) self._assert_single_item(wf1_t1_action_exs, state=states.CANCELLED) self.engine.on_action_complete( wf1_t1_aex_running.id, ml_actions.Result(data={'foo': 'bar'}) ) # Wait for the workflow to succeed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions self.assertEqual(states.SUCCESS, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) self.assertEqual(3, len(wf1_task_execs)) wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') wf1_t2_ex = self._assert_single_item(wf1_task_execs, name='t2') wf1_t3_ex = self._assert_single_item(wf1_task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(2, len(wf1_t1_action_exs)) # Check there is exactly 1 action in Success and 1 in Cancelled state. # Order doesn't matter. self._assert_single_item(wf1_t1_action_exs, state=states.SUCCESS) self._assert_single_item(wf1_t1_action_exs, state=states.CANCELLED) # Check action executions of task 2. self.assertEqual(states.SUCCESS, wf1_t2_ex.state) wf1_t2_action_exs = db_api.get_action_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(1, len(wf1_t2_action_exs)) self.assertEqual(states.SUCCESS, wf1_t2_action_exs[0].state) # Check action executions of task 3. self.assertEqual(states.SUCCESS, wf1_t3_ex.state) wf1_t3_action_exs = db_api.get_action_executions( task_execution_id=wf1_t3_ex.id ) self.assertEqual(1, len(wf1_t3_action_exs)) self.assertEqual(states.SUCCESS, wf1_t3_action_exs[0].state) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_cancelled_subflow(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.echo output="Task 1" on-success: - t2 t2: workflow: wf2 on-success: - t3 t3: action: std.echo output="Task 3" wf2: type: direct output: result: <% task(wf2_t1).result %> tasks: wf2_t1: action: std.async_noop """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): # Wait for task 1 to complete. wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) self.await_task_success(wf1_t1_ex.id) with db_api.transaction(): # Wait for the async task to run. wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t2_ex = self._assert_single_item( wf1_ex.task_executions, name='t2' ) self.await_task_state(wf1_t2_ex.id, states.RUNNING) with db_api.transaction(): sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(1, len(sub_wf_exs)) wf2_ex_running = self._assert_single_item( sub_wf_exs, state=states.RUNNING ) wf2_t1_ex = self._assert_single_item( wf2_ex_running.task_executions, name='wf2_t1' ) self.await_task_state(wf2_t1_ex.id, states.RUNNING) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id ) self.assertEqual(1, len(wf2_t1_action_exs)) self.assertEqual(states.RUNNING, wf2_t1_action_exs[0].state) # Cancel subworkflow. self.engine.stop_workflow(wf2_ex_running.id, states.CANCELLED) self.await_workflow_cancelled(wf2_ex_running.id) self.await_workflow_cancelled(wf1_ex.id) # Resume workflow and re-run failed subworkflow task. self.engine.rerun_workflow(wf1_t2_ex.id) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t2_ex = self._assert_single_item( wf1_ex.task_executions, name='t2' ) self.await_task_state(wf1_t2_ex.id, states.RUNNING) with db_api.transaction(): sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(2, len(sub_wf_exs)) # Check there is exactly 1 sub-wf in Running and 1 in Cancelled # state. Order doesn't matter. self._assert_single_item(sub_wf_exs, state=states.CANCELLED) wf2_ex_running = self._assert_single_item( sub_wf_exs, state=states.RUNNING ) wf2_t1_ex = self._assert_single_item( wf2_ex_running.task_executions, name='wf2_t1' ) self.await_task_state(wf2_t1_ex.id, states.RUNNING) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id ) self.assertEqual(1, len(wf2_t1_action_exs)) self.assertEqual(states.RUNNING, wf2_t1_action_exs[0].state) # Mark async action execution complete. self.engine.on_action_complete( wf2_t1_action_exs[0].id, ml_actions.Result(data={'foo': 'bar'}) ) # Wait for the workflows to succeed. self.await_workflow_success(wf1_ex.id) self.await_workflow_success(wf2_ex_running.id) sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(2, len(sub_wf_exs)) # Check there is exactly 1 sub-wf in Success and 1 in Cancelled state. # Order doesn't matter. self._assert_single_item(sub_wf_exs, state=states.SUCCESS) self._assert_single_item(sub_wf_exs, state=states.CANCELLED) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id ) self.assertEqual(1, len(wf2_t1_action_exs)) self.assertEqual(states.SUCCESS, wf2_t1_action_exs[0].state) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_cancelled_subflow_task(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.echo output="Task 1" on-success: - t2 t2: workflow: wf2 on-success: - t3 t3: action: std.echo output="Task 3" wf2: type: direct output: result: <% task(wf2_t1).result %> tasks: wf2_t1: action: std.async_noop """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): # Wait for task 1 to complete. wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) self.await_task_success(wf1_t1_ex.id) with db_api.transaction(): # Wait for the async task to run. wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t2_ex = self._assert_single_item( wf1_ex.task_executions, name='t2' ) self.await_task_state(wf1_t2_ex.id, states.RUNNING) with db_api.transaction(): sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(1, len(sub_wf_exs)) self.assertEqual(states.RUNNING, sub_wf_exs[0].state) wf2_ex = sub_wf_exs[0] wf2_t1_ex = self._assert_single_item( wf2_ex.task_executions, name='wf2_t1' ) self.await_task_state(wf2_t1_ex.id, states.RUNNING) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id ) self.assertEqual(1, len(wf2_t1_action_exs)) self.assertEqual(states.RUNNING, wf2_t1_action_exs[0].state) # Cancel action execution for task. self.engine.on_action_complete( wf2_t1_action_exs[0].id, ml_actions.Result(cancel=True) ) self.await_workflow_cancelled(wf2_ex.id) self.await_workflow_cancelled(wf1_ex.id) # Resume workflow and re-run failed subworkflow task. self.engine.rerun_workflow(wf2_t1_ex.id) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t2_ex = self._assert_single_item( wf1_ex.task_executions, name='t2' ) self.await_task_state(wf1_t2_ex.id, states.RUNNING) with db_api.transaction(): sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(1, len(sub_wf_exs)) self.assertEqual(states.RUNNING, sub_wf_exs[0].state) wf2_ex = sub_wf_exs[0] wf2_t1_ex = self._assert_single_item( wf2_ex.task_executions, name='wf2_t1' ) self.await_task_state(wf2_t1_ex.id, states.RUNNING) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id ) self.assertEqual(2, len(wf2_t1_action_exs)) # Check there is exactly 1 action in Running and 1 in Cancelled state. # Order doesn't matter. self._assert_single_item(wf2_t1_action_exs, state=states.CANCELLED) wf2_t1_aex_running = self._assert_single_item( wf2_t1_action_exs, state=states.RUNNING ) # Mark async action execution complete. self.engine.on_action_complete( wf2_t1_aex_running.id, ml_actions.Result(data={'foo': 'bar'}) ) # Wait for the workflows to succeed. self.await_workflow_success(wf1_ex.id) self.await_workflow_success(wf2_ex.id) sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(1, len(sub_wf_exs)) self.assertEqual(states.SUCCESS, sub_wf_exs[0].state) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id ) self.assertEqual(2, len(wf2_t1_action_exs)) # Check there is exactly 1 action in Success and 1 in Cancelled state. # Order doesn't matter. self._assert_single_item(wf2_t1_action_exs, state=states.SUCCESS) self._assert_single_item(wf2_t1_action_exs, state=states.CANCELLED) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 2' # Mock task2 success. ] ) ) def test_rerun_cancelled_with_items(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.async_noop on-success: - t2 t2: action: std.echo output="Task 2" """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(3, len(wf1_t1_action_exs)) self._assert_multiple_items(wf1_t1_action_exs, 3, state=states.RUNNING) # Cancel action execution for tasks. for wf1_t1_action_ex in wf1_t1_action_exs: self.engine.on_action_complete( wf1_t1_action_ex.id, ml_actions.Result(cancel=True) ) self.await_workflow_cancelled(wf1_ex.id) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(3, len(wf1_t1_action_exs)) self._assert_multiple_items( wf1_t1_action_exs, 3, state=states.CANCELLED ) # Resume workflow and re-run failed with items task. self.engine.rerun_workflow(wf1_t1_ex.id, reset=False) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) self.await_workflow_state(wf1_ex.id, states.RUNNING) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(6, len(wf1_t1_action_exs)) # Check there is exactly 3 action in Running and 3 in Cancelled state. # Order doesn't matter. self._assert_multiple_items( wf1_t1_action_exs, 3, state=states.CANCELLED ) wf1_t1_aexs_running = self._assert_multiple_items( wf1_t1_action_exs, 3, state=states.RUNNING ) # Mark async action execution complete. for action_ex in wf1_t1_aexs_running: self.engine.on_action_complete( action_ex.id, ml_actions.Result(data={'foo': 'bar'}) ) # Wait for the workflows to succeed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(6, len(wf1_t1_action_exs)) # Check there is exactly 3 action in Success and 3 in Cancelled state. # Order doesn't matter. self._assert_multiple_items(wf1_t1_action_exs, 3, state=states.SUCCESS) self._assert_multiple_items( wf1_t1_action_exs, 3, state=states.CANCELLED ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_direct_workflow_with_cycles.py0000644000175000017500000001530500000000000030372 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import data_flow from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class DirectWorkflowWithCyclesTest(base.EngineTestCase): def test_simple_cycle(self): wf_text = """ version: 2.0 workflow_cycle_in_out_verify: input: - num_of_cycles output: abc: <% $.counter %> tasks: initialize: publish: counter: 0 on-success: - increment increment: action: std.noop publish: counter: <% $.counter + 1 %> on-success: - increment: <% $.counter < $.num_of_cycles %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow( 'workflow_cycle_in_out_verify', wf_input={"num_of_cycles": 21} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'abc': 21}, wf_ex.output) t_execs = wf_ex.task_executions # Expecting one execution for task1 and two executions # for task2 and task3 because of the cycle 'task2 <-> task3'. self._assert_single_item(t_execs, name='initialize') self._assert_multiple_items(t_execs, 21, name='increment') self.assertEqual(22, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) def test_complex_cycle(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task2 task2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task3 task3: action: std.echo output=3 on-complete: - task4 task4: action: std.echo output=4 on-success: - task2: <% $.cnt < 2 %> - task5: <% $.cnt >= 2 %> task5: action: std.echo output=<% $.cnt %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'cnt': 2}, wf_ex.output) t_execs = wf_ex.task_executions # Expecting one execution for task1 and task5 and two executions # for task2, task3 and task4 because of the cycle # 'task2 -> task3 -> task4 -> task2'. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task2') self._assert_multiple_items(t_execs, 2, name='task3') self._assert_multiple_items(t_execs, 2, name='task4') task5_ex = self._assert_single_item(t_execs, name='task5') self.assertEqual(8, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) with db_api.transaction(): task5_ex = db_api.get_task_execution(task5_ex.id) self.assertEqual(2, data_flow.get_task_execution_result(task5_ex)) def test_parallel_cycles(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task1_2 - task2_2 task1_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task1_3 task1_3: action: std.echo output=3 on-success: - task1_2: <% $.cnt < 2 %> task2_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task2_3 task2_3: action: std.echo output=3 on-success: - task2_2: <% $.cnt < 3 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output t_execs = wf_ex.task_executions # NOTE: We have two cycles in parallel workflow branches # and those branches will have their own copy of "cnt" variable # so both cycles must complete correctly. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task1_2') self._assert_multiple_items(t_execs, 2, name='task1_3') self._assert_multiple_items(t_execs, 3, name='task2_2') self._assert_multiple_items(t_execs, 3, name='task2_3') self.assertEqual(11, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) # TODO(rakhmerov): We have this uncertainty because of the known # bug: https://bugs.launchpad.net/mistral/liberty/+bug/1424461 # Now workflow output is almost always 3 because the second cycle # takes longer hence it wins because of how DB queries work: they # order entities in ascending of creation time. self.assertTrue(wf_output['cnt'] == 2 or wf_output['cnt'] == 3) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_disabled_yaql_conversion.py0000644000175000017500000002125000000000000027627 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from mistral.db.v2 import api as db_api from mistral.engine import engine_server from mistral import exceptions as exc from mistral.expressions import yaql_expression from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base as engine_test_base class DisabledYAQLConversionTest(engine_test_base.EngineTestCase): def setUp(self): super(DisabledYAQLConversionTest, self).setUp() self.override_config('auth_enable', False, 'pecan') def test_disabled_yaql_output_conversion(self): """Test YAQL expressions with disabled data conversion. The test is needed to make sure that if we disable YAQL data conversion (for both input and output), then Mistral will handle YAQL internal data types properly if they sneak into the Mistral logic as part of an expression result. Particularly, we need to make sure that the ORM framework (SQLAlchemy) will also be able to save data properly if it comes across such a type. NOTE: - set() and toSet() functions produce "frozenset" type internally within YAQL and it should be handled properly everywhere in the code including SQLAlchemy. - dict() produces "FrozenDict" internally but we unwrap the top most dict after evaluating an expression on the Mistral side. """ # Both input and output data conversion in YAQL need to be disabled # so that we're sure that there won't be any surprises from YAQL # like some YAQL internal types included in expression results. self.override_config('convert_input_data', False, 'yaql') self.override_config('convert_output_data', False, 'yaql') # At this point YAQL engine has already been initialized with the # default value of config options. So we need to set the corresponding # constant to None so it gets initialized again with the new values # upon the first use. yaql_expression.YAQL_ENGINE = None wf_text = """--- version: '2.0' wf: tasks: task1: publish: var1: <% range(0,10) %> var2: <% set(15) %> var3: <% [4, 5, 6].toSet() %> var4: <% {k1 => v1, k2 => v2} %> var5: <% dict([['a', 2], ['b', 4]]) %> var6: <% [1, dict(k3 => v3, k4 => v4), 3] %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions t_ex = self._assert_single_item(tasks, name='task1') self.assertDictEqual( { 'var1': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'var2': [15], 'var3': [4, 5, 6], 'var4': {'k1': 'v1', 'k2': 'v2'}, 'var5': {'a': 2, 'b': 4}, 'var6': [1, {'k3': 'v3', 'k4': 'v4'}, 3], }, t_ex.published ) def test_configuration_check(self): # Kill all the threads started by default and try to start an # instance of engine server again with the wrong configuration. self.kill_threads() self.override_config('convert_input_data', True, 'yaql') self.override_config('convert_output_data', False, 'yaql') # Setting YAQL engine to None so it reinitialized again with the # right values upon the next use. yaql_expression.YAQL_ENGINE = None eng_svc = engine_server.get_oslo_service(setup_profiler=False) self.assertRaisesWithMessage( exc.MistralError, "The config property 'yaql.convert_output_data' is set to False " "so 'yaql.convert_input_data' must also be set to False.", eng_svc.start ) def test_root_context(self): # Both input and output data conversion in YAQL need to be disabled # so that we're sure that there won't be any surprises from YAQL # like some YAQL internal types included in expression results. self.override_config('convert_input_data', False, 'yaql') self.override_config('convert_output_data', False, 'yaql') # Setting YAQL engine to None so it reinitialized again with the # right values upon the next use. yaql_expression.YAQL_ENGINE = None wf_text = """--- version: '2.0' wf: input: - param: default_val tasks: task1: action: std.echo output=<% $ %> publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) t_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) action_ex = t_ex.action_executions[0] self.assertTrue(len(action_ex.input) > 0) self.assertIn('output', action_ex.input) self.assertIn('param', action_ex.input['output']) def test_iterators_in_yaql_result(self): # Both input and output data conversion in YAQL need to be disabled # so that we're sure that there won't be any surprises from YAQL # like some YAQL internal types included in expression results. self.override_config('convert_input_data', False, 'yaql') self.override_config('convert_output_data', False, 'yaql') # Setting YAQL engine to None so it reinitialized again with the # right values upon the next use. yaql_expression.YAQL_ENGINE = None wf_text = """--- version: '2.0' wf: input: - params: null tasks: task1: action: std.echo input: output: param1: <% switch($.params = null => [], $.params != null => $.params.items().select({k => $[0], v => $[1]})) %> """ wf_service.create_workflows(wf_text) wf_input = { 'params': { 'k1': 'v1', 'k2': 'v2' } } with mock.patch.object(self.executor, 'run_action', wraps=self.executor.run_action) as mocked: # Start workflow. wf_ex = self.engine.start_workflow('wf', wf_input=wf_input) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) t_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) action_ex = t_ex.action_executions[0] self.assertTrue(len(action_ex.input) > 0) mocked.assert_called_once() # We need to make sure that the executor got the right action # input regardless of an iterator (that can only be used once) # present in the YAQL expression result. Let's check first 4 # actual arguments with the executor was called, including the # action parameters. args = mocked.call_args[0] self.assertEqual(action_ex.id, args[0]) self.assertEqual('mistral.actions.std_actions.EchoAction', args[1]) self.assertDictEqual({}, args[2]) self.assertDictEqual(action_ex.input, args[3]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_environment.py0000644000175000017500000002754700000000000025150 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.executors import default_executor as d_exe from mistral.executors import remote_executor as r_exe from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') TARGET = '10.1.15.251' WORKBOOK = """ --- version: '2.0' name: my_wb workflows: wf1: type: reverse input: - param1 - param2 output: final_result: <% $.final_result %> tasks: task1: action: std.echo output=<% $.param1 %> target: <% env().var1 %> publish: result1: <% task(task1).result %> task2: requires: [task1] action: std.echo output="'<% $.result1 %> & <% $.param2 %>'" target: <% env().var1 %> publish: final_result: <% task(task2).result %> wf2: output: slogan: <% $.slogan %> tasks: task1: workflow: wf1 input: param1: <% env().var2 %> param2: <% env().var3 %> task_name: task2 publish: slogan: > <% task(task1).result.final_result %> is a cool <% env().var4 %>! """ def _run_at_target(action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, target=None, async_=True, timeout=None): # We'll just call executor directly for testing purposes. executor = d_exe.DefaultExecutor() executor.run_action( action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context=execution_context, target=target, async_=async_, timeout=timeout ) MOCK_RUN_AT_TARGET = mock.MagicMock(side_effect=_run_at_target) class EnvironmentTest(base.EngineTestCase): def setUp(self): super(EnvironmentTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK) @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def _test_subworkflow(self, env): wf2_ex = self.engine.start_workflow('my_wb.wf2', env=env) # Execution of 'wf2'. self.assertIsNotNone(wf2_ex) self.assertDictEqual({}, wf2_ex.input) self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) # Execution of 'wf1'. wf2_ex = self._assert_single_item(wf_execs, name='my_wb.wf2') wf1_ex = self._assert_single_item(wf_execs, name='my_wb.wf1') expected_wf1_input = { 'param1': 'Bonnie', 'param2': 'Clyde' } self.assertIsNotNone(wf1_ex.task_execution_id) self.assertDictEqual(wf1_ex.input, expected_wf1_input) # Wait till workflow 'wf1' is completed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) self.assertDictEqual( {'final_result': "'Bonnie & Clyde'"}, wf1_ex.output ) # Wait till workflow 'wf2' is completed. self.await_workflow_success(wf2_ex.id) with db_api.transaction(): wf2_ex = db_api.get_workflow_execution(wf2_ex.id) self.assertDictEqual( {'slogan': "'Bonnie & Clyde' is a cool movie!\n"}, wf2_ex.output ) with db_api.transaction(): # Check if target is resolved. wf1_task_execs = db_api.get_task_executions( workflow_execution_id=wf1_ex.id ) self._assert_single_item(wf1_task_execs, name='task1') self._assert_single_item(wf1_task_execs, name='task2') for t_ex in wf1_task_execs: a_ex = t_ex.action_executions[0] callback_url = '/v2/action_executions/%s' % a_ex.id r_exe.RemoteExecutor.run_action.assert_any_call( a_ex.id, 'mistral.actions.std_actions.EchoAction', {}, a_ex.input, False, { 'task_execution_id': t_ex.id, 'callback_url': callback_url, 'workflow_execution_id': wf1_ex.id, 'workflow_name': wf1_ex.name, 'action_execution_id': a_ex.id, }, target=TARGET, timeout=None ) def test_subworkflow_env_task_input(self): env = { 'var1': TARGET, 'var2': 'Bonnie', 'var3': 'Clyde', 'var4': 'movie' } self._test_subworkflow(env) def test_subworkflow_env_recursive(self): env = { 'var1': TARGET, 'var2': 'Bonnie', 'var3': '<% env().var5 %>', 'var4': 'movie', 'var5': 'Clyde' } self._test_subworkflow(env) def test_evaluate_env_parameter(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop publish: var1: <% env().var1 %> var2: <% env().var2 %> """ wf_service.create_workflows(wf_text) env = { "var1": "val1", "var2": "<% env().var1 %>" } # Run with 'evaluate_env' set to True. wf_ex = self.engine.start_workflow( 'wf', env=env, evaluate_env=True ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t = self._assert_single_item(wf_ex.task_executions, name='task1') self.assertDictEqual( { "var1": "val1", "var2": "val1" }, t.published ) # Run with 'evaluate_env' set to False. wf_ex = self.engine.start_workflow( 'wf', env=env, evaluate_env=False ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t = self._assert_single_item(wf_ex.task_executions, name='task1') self.assertDictEqual( { "var1": "val1", "var2": "<% env().var1 %>" }, t.published ) def test_evaluate_env_parameter_subworkflow(self): wf_text = """--- version: '2.0' parent_wf: tasks: task1: workflow: sub_wf sub_wf: output: result: <% $.result %> tasks: task1: action: std.noop publish: result: <% env().dummy %> """ wf_service.create_workflows(wf_text) # Run with 'evaluate_env' set to False. env = {"dummy": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>"} parent_wf_ex = self.engine.start_workflow( 'parent_wf', env=env, evaluate_env=False ) self.await_workflow_success(parent_wf_ex.id) with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) t = self._assert_single_item( parent_wf_ex.task_executions, name='task1' ) sub_wf_ex = db_api.get_workflow_executions( task_execution_id=t.id )[0] self.assertDictEqual( { "result": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>" }, sub_wf_ex.output ) # Run with 'evaluate_env' set to True. env = {"dummy": "<% 1 + 1 %>"} parent_wf_ex = self.engine.start_workflow( 'parent_wf', env=env, evaluate_env=True ) self.await_workflow_success(parent_wf_ex.id) with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) t = self._assert_single_item( parent_wf_ex.task_executions, name='task1' ) sub_wf_ex = db_api.get_workflow_executions( task_execution_id=t.id )[0] self.assertDictEqual( { "result": 2 }, sub_wf_ex.output ) def test_env_not_copied_to_context(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="<% env().param1 %>" publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) env = { 'param1': 'val1', 'param2': 'val2', 'param3': 'val3' } wf_ex = self.engine.start_workflow('wf', env=env) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.assertDictEqual({'result': 'val1'}, t.published) self.assertNotIn('__env', wf_ex.context) def test_subworkflow_env_no_duplicate(self): wf_text = """--- version: '2.0' parent_wf: tasks: task1: workflow: sub_wf sub_wf: output: result: <% $.result %> tasks: task1: action: std.noop publish: result: <% env().param1 %> """ wf_service.create_workflows(wf_text) env = { 'param1': 'val1', 'param2': 'val2', 'param3': 'val3' } parent_wf_ex = self.engine.start_workflow('parent_wf', env=env) self.await_workflow_success(parent_wf_ex.id) with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) t = self._assert_single_item( parent_wf_ex.task_executions, name='task1' ) sub_wf_ex = db_api.get_workflow_executions( task_execution_id=t.id )[0] self.assertDictEqual( { "result": "val1" }, sub_wf_ex.output ) # The environment of the subworkflow must be empty. # To evaluate expressions it should be taken from the # parent workflow execution. self.assertDictEqual({}, sub_wf_ex.params['env']) self.assertNotIn('__env', sub_wf_ex.context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_error_handling.py0000644000175000017500000005736400000000000025601 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_db import exception as db_exc from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.expressions import jinja_expression from mistral.expressions import yaql_expression from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as actions_base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class InvalidUnicodeAction(actions_base.Action): def run(self, context): return b'\xf8' def test(self): pass class ErrorHandlingEngineTest(base.EngineTestCase): def test_invalid_workflow_input(self): # Check that in case of invalid input workflow objects aren't even # created. wf_text = """ version: '2.0' wf: input: - param1 - param2 tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) self.assertRaises( exc.InputException, self.engine.start_workflow, 'wf', '', {'wrong_param': 'some_value'} ) self.assertEqual(0, len(db_api.get_workflow_executions())) self.assertEqual(0, len(db_api.get_task_executions())) self.assertEqual(0, len(db_api.get_action_executions())) def test_first_task_error(self): # Check that in case of an error in first task workflow objects are # still persisted properly. wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail on-success: task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNotNone(db_api.get_workflow_execution(wf_ex.id)) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item(task_execs, name='task1', state=states.ERROR) def test_action_error(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of action error. wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item(task_execs, name='task1', state=states.ERROR) def test_task_error(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of an error at task level. wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop publish: my_var: <% invalid_yaql_function() %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) # Now we need to make sure that task is in ERROR state but action # is in SUCCESS because error occurred in 'publish' clause which # must not affect action state. task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_ex = self._assert_single_item( task_execs, name='task1', state=states.ERROR ) action_execs = task_ex.executions self.assertEqual(1, len(action_execs)) self._assert_single_item( action_execs, name='std.noop', state=states.SUCCESS ) def test_task_error_with_on_handlers(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of an error at task level and this task has on-XXX handlers. wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop publish: my_var: <% invalid_yaql_function() %> on-success: - task2 on-error: - task3 task2: description: This task must never run. action: std.noop task3: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) # Now we need to make sure that task is in ERROR state but action # is in SUCCESS because error occurred in 'publish' clause which # must not affect action state. task_execs = wf_ex.task_executions # NOTE: task3 must not run because on-error handler triggers # only on error outcome of an action (or workflow) associated # with a task. self.assertEqual(1, len(task_execs)) task_ex = self._assert_single_item( task_execs, name='task1', state=states.ERROR ) action_execs = task_ex.executions self.assertEqual(1, len(action_execs)) self._assert_single_item( action_execs, name='std.noop', state=states.SUCCESS ) def test_workflow_error(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of an error at task level. wf_text = """ version: '2.0' wf: output: my_output: <% $.invalid_yaql_variable %> tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) # Now we need to make sure that task and action are in SUCCESS # state because mistake at workflow level (output evaluation) # must not affect them. task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) action_execs = task_ex.executions self.assertEqual(1, len(action_execs)) self._assert_single_item( action_execs, name='std.noop', state=states.SUCCESS ) def test_action_error_with_wait_before_policy(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of action error and task has 'wait-before' policy. It is an # implicit test for task continuation because 'wait-before' inserts # a delay between preparing task execution object and scheduling # actions. If an error happens during scheduling actions (e.g. # invalid YAQL in action parameters) then we also need to handle # this properly, meaning that task and workflow state should go # into ERROR state. wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output=<% invalid_yaql_function() %> wait-before: 1 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_ex = self._assert_single_item( task_execs, name='task1', state=states.ERROR ) action_execs = task_ex.executions self.assertEqual(0, len(action_execs)) def test_action_error_with_wait_after_policy(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of action error and task has 'wait-after' policy. It is an # implicit test for task completion because 'wait-after' inserts # a delay between actual task completion and logic that calculates # next workflow commands. If an error happens while calculating # next commands (e.g. invalid YAQL in on-XXX clauses) then we also # need to handle this properly, meaning that task and workflow state # should go into ERROR state. wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop wait-after: 1 on-success: - task2: <% invalid_yaql_function() %> task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_ex = self._assert_single_item( task_execs, name='task1', state=states.ERROR ) action_execs = task_ex.executions self.assertEqual(1, len(action_execs)) self._assert_single_item( action_execs, name='std.noop', state=states.SUCCESS ) def test_error_message_format_key_error(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop on-success: - succeed: <% $.invalid_yaql %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertLess(state_info.find('error'), state_info.find('data')) def test_error_message_format_unknown_function(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop publish: my_var: <% invalid_yaql_function() %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('data=')) def test_error_message_format_invalid_on_task_run(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output={{ _.invalid_var }} """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('wf=')) def test_error_message_format_on_task_continue(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output={{ _.invalid_var }} wait-before: 1 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('wf=')) def test_error_message_format_on_action_complete(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop publish: my_var: <% invalid_yaql_function() %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info print(state_info) self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('wf=')) def test_error_message_format_complete_task(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop wait-after: 1 on-success: - task2: <% invalid_yaql_function() %> task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('wf=')) def test_error_message_format_on_adhoc_action_error(self): wb_text = """ version: '2.0' name: wb actions: my_action: input: - output output: <% invalid_yaql_function() %> base: std.echo base-input: output: <% $.output %> workflows: wf: tasks: task1: action: my_action output="test" """ wb_service.create_workbook_v2(wb_text) wf_ex = self.engine.start_workflow('wb.wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('action=')) def test_publish_bad_yaql(self): wf_text = """--- version: '2.0' wf: type: direct input: - my_dict: - id: 1 value: 11 tasks: task1: action: std.noop publish: problem_var: <% $.my_dict.where($.value = 13).id.first() %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] action_ex = task_ex.action_executions[0] self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual(states.ERROR, task_ex.state) self.assertIsNotNone(task_ex.state_info) self.assertEqual(states.ERROR, wf_ex.state) def test_publish_bad_jinja(self): wf_text = """--- version: '2.0' wf: type: direct input: - my_dict: - id: 1 value: 11 tasks: task1: action: std.noop publish: problem_var: '{{ (_.my_dict|some_invalid_filter).id }}' """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] action_ex = task_ex.action_executions[0] self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual(states.ERROR, task_ex.state) self.assertIsNotNone(task_ex.state_info) self.assertEqual(states.ERROR, wf_ex.state) def test_invalid_task_input(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.echo output=<% $.non_existing_function_AAA() %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(2, len(tasks)) self._assert_single_item(tasks, name='task1', state=states.SUCCESS) t2 = self._assert_single_item(tasks, name='task2', state=states.ERROR) self.assertIsNotNone(t2.state_info) self.assertIn('Can not evaluate YAQL expression', t2.state_info) self.assertIsNotNone(wf_ex.state_info) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) def test_invalid_action_result(self): self.register_action_class( 'test.invalid_unicode_action', InvalidUnicodeAction ) wf_text = """--- version: '2.0' wf: tasks: task1: action: test.invalid_unicode_action on-success: task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] self.assertIn("UnicodeDecodeError: utf", wf_ex.state_info) self.assertIn("UnicodeDecodeError: utf", task_ex.state_info) @mock.patch( 'mistral.expressions.yaql_expression.get_yaql_context', mock.MagicMock( side_effect=[ db_exc.DBDeadlock(), # Emulating DB deadlock yaql_expression.get_yaql_context({}) # Successful run ] ) ) def test_db_error_in_yaql_expression(self): # This test just checks that the workflow completes successfully # even if a DB deadlock occurs during YAQL expression evaluation. # The engine in this case should should just retry the transactional # method. wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="Hello" publish: my_var: <% 1 + 1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] self.assertDictEqual({'my_var': 2}, task_ex.published) @mock.patch( 'mistral.expressions.jinja_expression.get_jinja_context', mock.MagicMock( side_effect=[ db_exc.DBDeadlock(), # Emulating DB deadlock jinja_expression.get_jinja_context({}) # Successful run ] ) ) def test_db_error_in_jinja_expression(self): # This test just checks that the workflow completes successfully # even if a DB deadlock occurs during Jinja expression evaluation. # The engine in this case should should just retry the transactional # method. wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="Hello" publish: my_var: "{{ 1 + 1 }}" """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] self.assertDictEqual({'my_var': 2}, task_ex.published) def test_action_error_message_format(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.fail """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] expected = ( "The action raised an exception [action_ex_id=%s, " "msg='Fail action expected exception.'" ) % task_ex.action_executions[0].id # Making sure that the actual error message goes before # other debugging information. self.assertTrue(task_ex.state_info.startswith(expected)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_error_result.py0000644000175000017500000001550400000000000025321 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import actions as actions_base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WF = """ --- version: '2.0' wf: input: - success_result - error_result tasks: task1: action: {action_name} input: success_result: <% $.success_result %> error_result: <% $.error_result %> publish: p_var: <% task(task1).result %> on-error: - task2: <% task(task1).result = 2 %> - task3: <% task(task1).result = 3 %> task2: action: std.noop task3: action: std.noop """ class MyAction(actions_base.Action): def __init__(self, success_result, error_result): self.success_result = success_result self.error_result = error_result def run(self, context): return actions_base.Result( data=self.success_result, error=self.error_result ) def test(self): raise NotImplementedError class MyAsyncAction(MyAction): def is_sync(self): return False class ErrorResultTest(base.EngineTestCase): def setUp(self): super(ErrorResultTest, self).setUp() test_base.register_action_class('my_action', MyAction) test_base.register_action_class('my_async_action', MyAsyncAction) def test_error_result1(self): wf_service.create_workflows(WF.format(action_name="my_action")) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'success_result': None, 'error_result': 2 } ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(2, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(states.ERROR, task1.state) self.assertEqual(states.SUCCESS, task2.state) # "publish" clause is ignored in case of ERROR so task execution # field must be empty. self.assertDictEqual({}, task1.published) self.assertEqual(2, data_flow.get_task_execution_result(task1)) def test_error_result2(self): wf_service.create_workflows(WF.format(action_name="my_action")) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'success_result': None, 'error_result': 3 } ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(2, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.ERROR, task1.state) self.assertEqual(states.SUCCESS, task3.state) # "publish" clause is ignored in case of ERROR so task execution # field must be empty. self.assertDictEqual({}, task1.published) self.assertEqual(3, data_flow.get_task_execution_result(task1)) def test_success_result(self): wf_service.create_workflows(WF.format(action_name="my_action")) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'success_result': 'success', 'error_result': None } ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(1, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.SUCCESS, task1.state) # "publish" clause is ignored in case of ERROR so task execution # field must be empty. self.assertDictEqual({'p_var': 'success'}, task1.published) self.assertEqual( 'success', data_flow.get_task_execution_result(task1) ) def test_async_error_result(self): wf_service.create_workflows(WF.format(action_name="my_async_action")) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'success_result': None, 'error_result': 2 } ) # If the action errors, we expect the workflow to continue. The # on-error means the workflow ends in success. self.await_workflow_success(wf_ex.id) def test_async_success_result(self): wf_service.create_workflows(WF.format(action_name="my_async_action")) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'success_result': 'success', 'error_result': None } ) # When the action is successful, the workflow will wait in the RUNNING # state for it to complete. self.await_workflow_running(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(1, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.RUNNING, task1.state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_execution_fields_size_limitation.py0000644000175000017500000002134200000000000031403 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral_lib import actions as actions_base from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WF = """ --- version: '2.0' wf: input: - workflow_input: '__WORKFLOW_INPUT__' - action_output_length: 0 - action_output_dict: false - action_error: false tasks: task1: action: my_action input: input: '__ACTION_INPUT__' output_length: <% $.action_output_length %> output_dict: <% $.action_output_dict %> error: <% $.action_error %> publish: p_var: '__TASK_PUBLISHED__' """ class MyAction(actions_base.Action): def __init__(self, input, output_length, output_dict=False, error=False): self.input = input self.output_length = output_length self.output_dict = output_dict self.error = error def run(self, context): if not self.output_dict: result = ''.join('A' for _ in range(self.output_length)) else: result = {} for i in range(self.output_length): result[i] = 'A' if not self.error: return actions_base.Result(data=result) else: return actions_base.Result(error=result) def test(self): raise NotImplementedError def generate_workflow(tokens): new_wf = WF long_string = ''.join('A' for _ in range(1024)) for token in tokens: new_wf = new_wf.replace(token, long_string) return new_wf class ExecutionFieldsSizeLimitTest(base.EngineTestCase): def setUp(self): """Resets the size limit config between tests""" super(ExecutionFieldsSizeLimitTest, self).setUp() cfg.CONF.set_default( 'execution_field_size_limit_kb', 0, group='engine' ) test_base.register_action_class('my_action', MyAction) def tearDown(self): """Restores the size limit config to default""" super(ExecutionFieldsSizeLimitTest, self).tearDown() cfg.CONF.set_default( 'execution_field_size_limit_kb', 1024, group='engine' ) def test_default_limit(self): cfg.CONF.set_default( 'execution_field_size_limit_kb', -1, group='engine' ) new_wf = generate_workflow( ['__ACTION_INPUT_', '__WORKFLOW_INPUT__', '__TASK_PUBLISHED__'] ) wf_service.create_workflows(new_wf) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) def test_workflow_input_default_value_limit(self): new_wf = generate_workflow(['__WORKFLOW_INPUT__']) wf_service.create_workflows(new_wf) # Start workflow. e = self.assertRaises( exc.SizeLimitExceededException, self.engine.start_workflow, 'wf' ) self.assertEqual( 'Field size limit exceeded' ' [class=TaskExecution, field=input, size=1KB, limit=0KB]', str(e) ) def test_workflow_input_limit(self): wf_service.create_workflows(WF) # Start workflow. e = self.assertRaises( exc.SizeLimitExceededException, self.engine.start_workflow, 'wf', wf_input={'workflow_input': ''.join('A' for _ in range(1024))} ) self.assertEqual( 'Field size limit exceeded' ' [class=TaskExecution, field=input, size=1KB, limit=0KB]', str(e) ) def test_action_input_limit(self): new_wf = generate_workflow(['__ACTION_INPUT__']) wf_service.create_workflows(new_wf) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.assertEqual(states.ERROR, wf_ex.state) self.assertIn( "Field size limit exceeded" " [class=TaskExecution, field=input, size=1KB, limit=0KB]", wf_ex.state_info ) def test_action_output_limit(self): wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={'action_output_length': 1024} ) self.await_workflow_error(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIn( 'Field size limit exceeded' ' [class=TaskExecution, field=output, size=1KB, limit=0KB]', wf_ex.state_info ) self.assertEqual(states.ERROR, wf_ex.state) def test_task_published_limit(self): new_wf = generate_workflow(['__TASK_PUBLISHED__']) wf_service.create_workflows(new_wf) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertIn( 'Failed to handle action completion [error=Field size', wf_ex.state_info ) self.assertIn('wf=wf, task=task1', wf_ex.state_info) task_ex = self._assert_single_item(task_execs, name='task1') self.assertIn( 'Field size limit exceeded' ' [class=TaskExecution, field=published, size=1KB, limit=0KB]', task_ex.state_info ) def test_workflow_params_limit(self): wf_service.create_workflows(WF) # Start workflow. long_string = ''.join('A' for _ in range(1024)) e = self.assertRaises( exc.SizeLimitExceededException, self.engine.start_workflow, 'wf', env={'param': long_string} ) self.assertIn( 'Field size limit exceeded' ' [class=TaskExecution, field=params, size=1KB, limit=0KB]', str(e) ) def test_task_execution_state_info_trimmed(self): # No limit on output, input and other JSON fields. cfg.CONF.set_default( 'execution_field_size_limit_kb', -1, group='engine' ) wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'action_output_length': 80000, 'action_output_dict': True, 'action_error': True } ) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item( wf_ex.task_executions, state=states.ERROR ) # "state_info" must be trimmed so that it's not greater than 65535. self.assertLess(len(task_ex.state_info), 65536) self.assertGreater(len(task_ex.state_info), 65490) self.assertLess(len(wf_ex.state_info), 65536) self.assertGreater(len(wf_ex.state_info), 65490) def test_fail_workflow_no_limit(self): cfg.CONF.set_default( 'execution_field_size_limit_kb', -1, group='engine' ) wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'action_output_length': 10000, 'action_output_dict': True, 'action_error': True } ) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertGreater(len(wf_ex.output['result']), 10000) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_execution_params.py0000644000175000017500000000304300000000000026133 0ustar00coreycorey00000000000000# Copyright 2018 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class TestExecutionParameters(base.EngineTestCase): def test_null_description(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', description=None) self.await_workflow_success(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_integrity_check.py0000644000175000017500000000544000000000000025743 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states class IntegrityCheckTest(base.EngineTestCase): def setUp(self): super(IntegrityCheckTest, self).setUp() self.override_config('auth_enable', False, group='pecan') self.override_config( 'execution_integrity_check_delay', 2, group='engine' ) def test_task_execution_integrity(self): self.override_config('execution_integrity_check_delay', 1, 'engine') # The idea of the test is that we use the no-op asynchronous action # so that action and task execution state is not automatically set # to SUCCESS after we start the workflow. We'll update the action # execution state to SUCCESS directly through the DB and will wait # till task execution integrity is checked and fixed automatically # by a periodic job after about 2 seconds. wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.await_task_success(task1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task2_ex = self._assert_single_item( wf_ex.task_executions, name='task2', state=states.RUNNING ) action2_ex = self._assert_single_item( task2_ex.executions, state=states.RUNNING ) db_api.update_action_execution( action2_ex.id, {'state': states.SUCCESS} ) self.await_task_success(task2_ex.id) self.await_workflow_success(wf_ex.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_javascript_action.py0000644000175000017500000001166100000000000026275 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import importutils import testtools from mistral.db.v2 import api as db_api from mistral.expressions import yaql_expression from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.utils import javascript from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') JAVASCRIPT_WORKFLOW = """ version: "2.0" wf: input: - length tasks: task1: action: std.javascript input: script: | let numberSequence = Array.from({length: $['length']}, (x, i) => i); let evenNumbers = numberSequence.filter(x => x % 2 === 0); return evenNumbers.length; context: <% $ %> publish: res: <% task().result %> """ def fake_evaluate(_, context): return context['length'] / 2 class JavaScriptEngineTest(base.EngineTestCase): @testtools.skipIf(not importutils.try_import('py_mini_racer'), 'This test requires that py_mini_racer library was ' 'installed') def test_py_mini_racer_javascript_action(self): cfg.CONF.set_default('js_implementation', 'py_mini_racer') length = 1000 wf_service.create_workflows(JAVASCRIPT_WORKFLOW) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={'length': length} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.SUCCESS, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.assertEqual(length / 2, task_ex.published['res']) @testtools.skipIf(not importutils.try_import('py_mini_racer'), 'This test requires that py_mini_racer library was ' 'installed') def test_py_mini_racer_javascript_action_disabled_yaql_conversion(self): cfg.CONF.set_default('js_implementation', 'py_mini_racer') # Both input and output data conversion in YAQL need to be disabled # so that we're sure that there won't be any surprises from YAQL # like some YAQL internal types included in expression results. self.override_config('convert_input_data', False, 'yaql') self.override_config('convert_output_data', False, 'yaql') # Setting YAQL engine to None so it reinitialized again with the # right values upon the next use. yaql_expression.YAQL_ENGINE = None wf_text = """--- version: '2.0' wf: input: - param: default_val tasks: task1: action: std.js input: context: <% $ %> script: > return $.param publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) t_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.assertDictEqual({'result': 'default_val'}, t_ex.published) @mock.patch.object(javascript, 'evaluate', fake_evaluate) def test_fake_javascript_action_data_context(self): length = 1000 wf_service.create_workflows(JAVASCRIPT_WORKFLOW) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={'length': length} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.SUCCESS, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.assertEqual(length / 2, task_ex.published['res']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_join.py0000644000175000017500000010702100000000000023525 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import testtools from mistral.db.v2 import api as db_api from mistral.lang.v2 import tasks as tasks_lang from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as actions_base from mistral_lib import utils # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class ActionWithExceptionInInit(actions_base.Action): def __init__(self, aaa): super(ActionWithExceptionInInit, self).__init__() if aaa != "bbb": raise Exception("Aaa doesn't equal bbb") self.aaa = aaa def run(self, context): return actions_base.Result( data=self.aaa ) def test(self): raise NotImplementedError class JoinEngineTest(base.EngineTestCase): def test_full_join_simple(self): wf_text = """--- version: '2.0' wf: type: direct tasks: join_task: join: all task1: on-success: join_task task2: on-success: join_task """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions self._assert_single_item(t_execs, name='task1') self._assert_single_item(t_execs, name='task2') self._assert_single_item(t_execs, name='join_task') def test_full_join_without_errors(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result3 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task3 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task3 task3: join: all action: std.echo output="<% $.result1 %>,<% $.result2 %>" publish: result3: <% task(task3).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'result': '1,2'}, wf_ex.output) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task3.state) def test_full_join_with_errors(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result3 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task3 task2: action: std.fail on-error: - task3 task3: join: all action: std.echo output="<% $.result1 %>-<% $.result1 %>" publish: result3: <% task(task3).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'result': '1-1'}, wf_ex.output) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.ERROR, task2.state) self.assertEqual(states.SUCCESS, task3.state) def test_full_join_with_conditions(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task3 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task3: <% $.result2 = 11111 %> - task4: <% $.result2 = 2 %> task3: join: all action: std.echo output="<% $.result1 %>-<% $.result1 %>" publish: result3: <% task(task3).result %> task4: action: std.echo output=4 publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') def _num_of_tasks(): return len( db_api.get_task_executions(workflow_execution_id=wf_ex.id) ) self._await(lambda: _num_of_tasks() == 4) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task4 = self._assert_single_item(tasks, name='task4') # NOTE(xylan): We ensure task4 is successful here because of the # uncertainty of its running in parallel with task3. self.await_task_success(task4.id) self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) # NOTE(rakhmerov): Task 3 must fail because task2->task3 transition # will never trigger due to its condition. self.await_task_error(task3.id) self.await_workflow_error(wf_ex.id) def test_partial_join(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task4 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task4 task3: action: std.fail description: | Always fails and 'on-success' never gets triggered. However, 'task4' will run since its join cardinality is 2 which means 'task1' and 'task2' completion is enough to trigger it. on-success: - task4 on-error: - noop task4: join: 2 action: std.echo output="<% $.result1 %>,<% $.result2 %>" publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'result': '1,2'}, wf_ex.output) tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task4 = self._assert_single_item(tasks, name='task4') task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task4.state) # task3 may still be in RUNNING state and we need to make sure # it gets into ERROR state. self.await_task_error(task3.id) self.assertDictEqual({'result4': '1,2'}, task4.published) def test_partial_join_triggers_once(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result5 %> tasks: task1: action: std.noop publish: result1: 1 on-complete: - task5 task2: action: std.noop publish: result2: 2 on-complete: - task5 task3: action: std.noop publish: result3: 3 on-complete: - task5 task4: action: std.noop publish: result4: 4 on-complete: - task5 task5: join: 2 action: std.echo input: output: | <% result1 in $.keys() %>,<% result2 in $.keys() %>, <% result3 in $.keys() %>,<% result4 in $.keys() %> publish: result5: <% task(task5).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(5, len(tasks)) task5 = self._assert_single_item(tasks, name='task5') self.assertEqual(states.SUCCESS, task5.state) success_count = sum([1 for t in tasks if t.state == states.SUCCESS]) # At least task4 and two others must be successfully completed. self.assertGreaterEqual(success_count, 3) result5 = task5.published['result5'] self.assertIsNotNone(result5) # Depending on how many inbound tasks completed before 'join' # task5 started it can get different inbound context with. # But at least two inbound results should be accessible at task5 # which logically corresponds to 'join' cardinality 2. self.assertGreaterEqual(result5.count('True'), 2) def test_discriminator(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.noop publish: result1: 1 on-complete: - task4 task2: action: std.noop publish: result2: 2 on-complete: - task4 task3: action: std.noop publish: result3: 3 on-complete: - task4 task4: join: one action: std.echo input: output: | <% result1 in $.keys() %>,<% result2 in $.keys() %>, <% result3 in $.keys() %> publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task4 = self._assert_single_item(tasks, name='task4') self.assertEqual(states.SUCCESS, task4.state) success_count = sum([1 for t in tasks if t.state == states.SUCCESS]) # At least task4 and one of others must be successfully completed. self.assertGreaterEqual(success_count, 2) result4 = task4.published['result4'] self.assertIsNotNone(result4) self.assertLess(result4.count('False'), 3) self.assertGreaterEqual(result4.count('True'), 1) def test_full_join_parallel_published_vars(self): wfs_tasks_join_complex = """--- version: '2.0' main: type: direct output: var1: <% $.var1 %> var2: <% $.var2 %> is_done: <% $.is_done %> tasks: init: publish: var1: false var2: false is_done: false on-success: - branch1 - branch2 branch1: workflow: work publish: var1: true on-success: - done branch2: publish: var2: true on-success: - done done: join: all publish: is_done: true work: type: direct tasks: do: action: std.echo output="Doing..." on-success: - exit exit: action: std.echo output="Exiting..." """ wf_service.create_workflows(wfs_tasks_join_complex) # Start workflow. wf_ex = self.engine.start_workflow('main') self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'var1': True, 'is_done': True, 'var2': True }, wf_ex.output ) @testtools.skip('https://bugs.launchpad.net/mistral/+bug/1424461') def test_full_join_parallel_published_vars_complex(self): wf_text = """--- version: "2.0" main: type: direct output: var_a: <% $.var_a %> var_b: <% $.var_b %> var_c: <% $.var_c %> var_d: <% $.var_d %> tasks: init: publish: var_a: 0 var_b: 0 var_c: 0 on-success: - branch1_0 - branch2_0 branch1_0: publish: var_c: 1 on-success: - branch1_1 branch2_0: publish: var_a: 1 on-success: - done branch1_1: publish: var_b: 1 on-success: - done done: join: all publish: var_d: 1 """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('main') self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'var_a': 1, 'var_b': 1, 'var_c': 1, 'var_d': 1 }, wf_ex.output ) def test_full_join_with_branch_errors(self): wf_text = """--- version: '2.0' main: type: direct tasks: task10: action: std.noop on-success: - task21 - task31 task21: action: std.noop on-success: - task22 task22: action: std.noop on-success: - task40 task31: action: std.fail on-success: - task32 task32: action: std.noop on-success: - task40 task40: join: all action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('main') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertIsNotNone(wf_ex.state_info) task10 = self._assert_single_item(tasks, name='task10') task21 = self._assert_single_item(tasks, name='task21') task22 = self._assert_single_item(tasks, name='task22') task31 = self._assert_single_item(tasks, name='task31') task40 = self._assert_single_item(tasks, name='task40') self.assertEqual(states.SUCCESS, task10.state) self.assertEqual(states.SUCCESS, task21.state) self.assertEqual(states.SUCCESS, task22.state) self.assertEqual(states.ERROR, task31.state) self.assertNotIn('task32', [task.name for task in tasks]) self.assertEqual(states.ERROR, task40.state) def test_diamond_join_all(self): wf_text = """--- version: '2.0' test-join: tasks: a: on-success: - b - c - d b: on-success: - e c: on-success: - e d: on-success: - e e: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('test-join') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_multiple_items(tasks, 5, state=states.SUCCESS) def test_join_multiple_routes_with_one_source(self): wf_text = """--- version: '2.0' wf: tasks: a: on-success: - b - c b: on-success: - c c: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_multiple_items(tasks, 3, state=states.SUCCESS) def test_join_after_join(self): wf_text = """--- version: '2.0' wf: tasks: a: on-success: - c b: on-success: - c c: join: all on-success: - f d: on-success: - f e: on-success: - f f: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(6, len(task_execs)) self._assert_multiple_items(task_execs, 6, state=states.SUCCESS) def test_join_route_delays(self): wf_text = """--- version: '2.0' wf: tasks: a: wait-before: 4 on-success: b b: on-success: join c: on-success: join join: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(4, len(task_execs)) self._assert_multiple_items(task_execs, 4, state=states.SUCCESS) def test_delete_join_completion_check_on_stop(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: join_task task2: description: Never ends action: std.async_noop on-success: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self.assertGreaterEqual(len(tasks), 2) task1 = self._assert_single_item(tasks, name='task1') self.await_task_success(task1.id) # Once task1 is finished we know that join_task must be created. tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self._assert_single_item( tasks, name='join_task', state=states.WAITING ) # Stop the workflow. self.engine.stop_workflow(wf_ex.id, state=states.CANCELLED) mtd_name = 'mistral.engine.task_handler._refresh_task_state' self._await( lambda: len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 ) def test_delete_join_completion_check_on_execution_delete(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: join_task task2: description: Never ends action: std.async_noop on-success: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self.assertGreaterEqual(len(tasks), 2) task1 = self._assert_single_item(tasks, name='task1') self.await_task_success(task1.id) # Once task1 is finished we know that join_task must be created. tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self._assert_single_item( tasks, name='join_task', state=states.WAITING ) # Stop the workflow. db_api.delete_workflow_execution(wf_ex.id) mtd_name = 'mistral.engine.task_handler._refresh_task_state' self._await( lambda: len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 ) def test_join_with_deep_dependencies_tree(self): wf_text = """--- version: '2.0' wf: tasks: task_a_1: on-success: - task_with_join task_b_1: action: std.fail on-success: - task_b_2 task_b_2: on-success: - task_b_3 task_b_3: on-success: - task_with_join task_with_join: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(3, len(task_execs)) self._assert_single_item( task_execs, name='task_a_1', state=states.SUCCESS ) self._assert_single_item( task_execs, name='task_b_1', state=states.ERROR ) self._assert_single_item( task_execs, name='task_with_join', state=states.ERROR ) def test_no_workflow_error_after_inbound_error(self): wf_text = """--- version: "2.0" wf: output: continue_flag: <% $.get(continue_flag) %> task-defaults: on-error: - change_continue_flag tasks: task_a: action: std.fail on-success: - task_c: <% $.get(continue_flag) = null %> - task_a_process task_a_process: action: std.noop task_b: on-success: - task_c: <% $.get(continue_flag) = null %> task_c: join: all change_continue_flag: publish: continue_flag: false """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) def test_triggered_by_success(self): wf_text = """--- version: '2.0' wf: type: direct tasks: join_task: join: all task1: on-success: join_task task2: on-success: join_task """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions task1 = self._assert_single_item(t_execs, name='task1') task2 = self._assert_single_item(t_execs, name='task2') join_task = self._assert_single_item(t_execs, name='join_task') key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertIsNone(task2.runtime_context.get(key)) self.assertIn( { "task_id": task1.id, "event": "on-success" }, join_task.runtime_context.get(key) ) self.assertIn( { "task_id": task2.id, "event": "on-success" }, join_task.runtime_context.get(key) ) def test_triggered_by_error(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: on-success: join_task task2: action: std.fail on-success: join_task task3: action: std.noop on-error: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions task1 = self._assert_single_item( t_execs, name='task1', state=states.SUCCESS ) task2 = self._assert_single_item( t_execs, name='task2', state=states.ERROR ) task3 = self._assert_single_item( t_execs, name='task3', state=states.SUCCESS ) join_task = self._assert_single_item( t_execs, name='join_task', state=states.ERROR ) key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertIsNone(task2.runtime_context.get(key)) self.assertIsNone(task3.runtime_context.get(key)) self.assertIn( { "task_id": task2.id, "event": "not triggered" }, join_task.runtime_context.get(key) ) self.assertIn( { "task_id": task3.id, "event": "not triggered" }, join_task.runtime_context.get(key) ) def test_triggered_by_impossible_route(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: on-success: join_task task2: action: std.fail on-success: task3 task3: action: std.noop on-success: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions task1 = self._assert_single_item( t_execs, name='task1', state=states.SUCCESS ) task2 = self._assert_single_item( t_execs, name='task2', state=states.ERROR ) join_task = self._assert_single_item( t_execs, name='join_task', state=states.ERROR ) self.assertEqual(3, len(t_execs)) key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertIsNone(task2.runtime_context.get(key)) # Note: in case if execution does not exist for a previous # task we can't track it in "triggered_by" because we need # to know its ID so we leave it blank. self.assertFalse(join_task.runtime_context.get(key)) def test_join_saving_task_context_with_all(self): workflow = """--- version: '2.0' test_workflow: type: direct tasks: task1: action: std.echo output='task1' on-success: - task2 publish: result: <% task().result %> task2: action: std.echo output='task2' join: all publish: result: <% task().result %> """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('test_workflow') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions for task in tasks: task_result = task.published["result"] self.assertEqual(task.name, task_result, "The result of task must equal own name") def test_join_with_long_name(self): long_task_name = utils.generate_string( tasks_lang.MAX_LENGTH_JOIN_TASK_NAME ) wf_text = """--- version: '2.0' wf: tasks: task1: on-success: - {0} task2: on-success: - {0} {0}: join: all """.format(long_task_name) wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name=long_task_name) self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task3.state) def test_join_last_inbound_indirect_error(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: - join_task task2: action: std.fail wait-before: 2 on-success: - task3 task3: action: std.noop on-success: - join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(3, len(task_execs)) self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self._assert_single_item( task_execs, name='task2', state=states.ERROR ) self._assert_single_item( task_execs, name='join_task', state=states.ERROR ) def test_join_task_with_input_error(self): test_base.register_action_class( 'my_action', ActionWithExceptionInInit ) wf_text = """--- version: '2.0' wf: type: direct tasks: join_task: action: my_action aaa="aaa" join: all task1: on-success: join_task task2: on-success: join_task """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions self._assert_single_item(t_execs, name='task1', state=states.SUCCESS) self._assert_single_item(t_execs, name='task2', state=states.SUCCESS) self._assert_single_item(t_execs, name='join_task', state=states.ERROR) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_names_validation.py0000644000175000017500000000271200000000000026104 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from mistral import exceptions as exc from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from testtools import ExpectedException # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class NameValidationTest(base.EngineTestCase): @staticmethod def test_workflow_name_validation(): wf = """ version: 2.0 wf name with space: tasks: t1: action: a1 """ with ExpectedException(exc.InvalidModelException, "Name 'wf name with space' " "must not contain spaces"): wf_service.create_workflows(wf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_noop_task.py0000644000175000017500000000777300000000000024600 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WF = """ --- version: '2.0' wf: type: direct input: - num1 - num2 output: result: <% $.result %> tasks: task1: action: std.echo output=<% $.num1 %> publish: result1: <% task(task1).result %> on-complete: - task3 task2: action: std.echo output=<% $.num2 %> publish: result2: <% task(task2).result %> on-complete: - task3 task3: description: | This task doesn't "action" or "workflow" property. It works as "no-op" task and serves just a decision point in the workflow. join: all on-complete: - task4: <% $.num1 + $.num2 = 2 %> - task5: <% $.num1 + $.num2 = 3 %> task4: action: std.echo output=4 publish: result: <% task(task4).result %> task5: action: std.echo output=5 publish: result: <% task(task5).result %> """ class NoopTaskEngineTest(base.EngineTestCase): def test_noop_task1(self): wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={'num1': 1, 'num2': 1} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task4 = self._assert_single_item(tasks, name='task4') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task3.state) self.assertEqual(states.SUCCESS, task4.state) self.assertDictEqual({'result': 4}, wf_output) def test_noop_task2(self): wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={'num1': 1, 'num2': 2} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task5 = self._assert_single_item(tasks, name='task5') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task3.state) self.assertEqual(states.SUCCESS, task5.state) self.assertDictEqual({'result': 5}, wf_output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_policies.py0000644000175000017500000015341700000000000024407 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import timeout import mock from oslo_config import cfg import requests from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral.engine import policies from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.rpc import clients as rpc from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions from mistral_lib.actions import types # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" wait-before: 2 wait-after: 5 timeout: 7 retry: count: 5 delay: 10 break-on: <% $.my_val = 10 %> """ WB_WITH_DEFAULTS = """ --- version: '2.0' name: wb workflows: wf1: type: direct task-defaults: wait-before: 2 retry: count: 2 delay: 1 tasks: task1: action: std.echo output="Hi!" wait-before: 3 wait-after: 5 timeout: 7 """ WAIT_BEFORE_WB = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" wait-before: %d """ WAIT_BEFORE_FROM_VAR = """ --- version: '2.0' name: wb workflows: wf1: type: direct input: - wait_before tasks: task1: action: std.echo output="Hi!" wait-before: <% $.wait_before %> """ WAIT_AFTER_WB = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" wait-after: %d """ WAIT_AFTER_FROM_VAR = """ --- version: '2.0' name: wb workflows: wf1: type: direct input: - wait_after tasks: task1: action: std.echo output="Hi!" wait-after: <% $.wait_after %> """ RETRY_WB = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.http url="http://some_non-existing_host" retry: count: %(count)d delay: %(delay)d """ RETRY_WB_FROM_VAR = """ --- version: '2.0' name: wb workflows: wf1: type: direct input: - count - delay tasks: task1: action: std.http url="http://some_non-existing_host" retry: count: <% $.count %> delay: <% $.delay %> """ TIMEOUT_WB = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.async_noop timeout: %d on-error: - task2 task2: action: std.echo output="Hi!" timeout: 3 """ TIMEOUT_WB2 = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.async_noop timeout: 1 """ TIMEOUT_FROM_VAR = """ --- version: '2.0' name: wb workflows: wf1: type: direct input: - timeout tasks: task1: action: std.async_noop timeout: <% $.timeout %> """ PAUSE_BEFORE_WB = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" pause-before: True on-success: - task2 task2: action: std.echo output="Bye!" """ PAUSE_BEFORE_DELAY_WB = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" wait-before: 1 pause-before: true on-success: - task2 task2: action: std.echo output="Bye!" """ CONCURRENCY_WB = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" concurrency: %d """ CONCURRENCY_WB_FROM_VAR = """ --- version: '2.0' name: wb workflows: wf1: type: direct input: - concurrency tasks: task1: action: std.echo output="Hi!" concurrency: <% $.concurrency %> """ class PoliciesTest(base.EngineTestCase): def setUp(self): super(PoliciesTest, self).setUp() self.wb_spec = spec_parser.get_workbook_spec_from_yaml(WORKBOOK) self.wf_spec = self.wb_spec.get_workflows()['wf1'] self.task_spec = self.wf_spec.get_tasks()['task1'] def test_build_policies(self): arr = policies.build_policies( self.task_spec.get_policies(), self.wf_spec ) self.assertEqual(4, len(arr)) p = self._assert_single_item(arr, delay=2) self.assertIsInstance(p, policies.WaitBeforePolicy) p = self._assert_single_item(arr, delay=5) self.assertIsInstance(p, policies.WaitAfterPolicy) p = self._assert_single_item(arr, delay=10) self.assertIsInstance(p, policies.RetryPolicy) self.assertEqual(5, p.count) self.assertEqual('<% $.my_val = 10 %>', p._break_on_clause) p = self._assert_single_item(arr, delay=7) self.assertIsInstance(p, policies.TimeoutPolicy) def test_task_policy_class(self): policy = policies.base.TaskPolicy() policy._schema = { "properties": { "delay": {"type": "integer"} } } wf_ex = models.WorkflowExecution( id='1-2-3-4', context={}, input={}, params={} ) task_ex = models.TaskExecution(in_context={'int_var': 5}) task_ex.workflow_execution = wf_ex policy.delay = "<% $.int_var %>" # Validation is ok. policy.before_task_start(task_ex, None) policy.delay = "some_string" # Validation is failing now. exception = self.assertRaises( exc.InvalidModelException, policy.before_task_start, task_ex, None ) self.assertIn("Invalid data type in TaskPolicy", str(exception)) def test_build_policies_with_workflow_defaults(self): wb_spec = spec_parser.get_workbook_spec_from_yaml(WB_WITH_DEFAULTS) wf_spec = wb_spec.get_workflows()['wf1'] task_spec = wf_spec.get_tasks()['task1'] arr = policies.build_policies(task_spec.get_policies(), wf_spec) self.assertEqual(4, len(arr)) p = self._assert_single_item(arr, delay=3) self.assertIsInstance(p, policies.WaitBeforePolicy) p = self._assert_single_item(arr, delay=5) self.assertIsInstance(p, policies.WaitAfterPolicy) p = self._assert_single_item(arr, delay=1) self.assertIsInstance(p, policies.RetryPolicy) self.assertEqual(2, p.count) p = self._assert_single_item(arr, delay=7) self.assertIsInstance(p, policies.TimeoutPolicy) def test_wait_before_policy(self): wb_service.create_workbook_v2(WAIT_BEFORE_WB % 1) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING_DELAYED, task_ex.state) self.assertDictEqual( {'wait_before_policy': {'skip': True}}, task_ex.runtime_context ) self.await_workflow_success(wf_ex.id) def test_wait_before_policy_zero_seconds(self): wb_service.create_workbook_v2(WAIT_BEFORE_WB % 0) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_workflow_success(wf_ex.id) def test_wait_before_policy_negative_number(self): self.assertRaises( exc.InvalidModelException, wb_service.create_workbook_v2, WAIT_BEFORE_WB % -1 ) def test_wait_before_policy_from_var(self): wb_service.create_workbook_v2(WAIT_BEFORE_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'wait_before': 1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING_DELAYED, task_ex.state) self.await_workflow_success(wf_ex.id) def test_wait_before_policy_from_var_zero_seconds(self): wb_service.create_workbook_v2(WAIT_BEFORE_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'wait_before': 0} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] # If wait_before is 0 start the task immediately without delay. self.assertEqual(states.RUNNING, task_ex.state) self.await_workflow_success(wf_ex.id) def test_wait_before_policy_from_var_negative_number(self): wb_service.create_workbook_v2(WAIT_BEFORE_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'wait_before': -1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] # If wait_before value is less than 0 the task should fail with # InvalidModelException. self.assertEqual(states.ERROR, task_ex.state) self.await_workflow_error(wf_ex.id) def test_wait_before_policy_two_tasks(self): wf_text = """--- version: '2.0' wf: tasks: a: wait-before: 2 on-success: b b: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) self._assert_multiple_items(task_execs, 2, state=states.SUCCESS) def test_wait_after_policy(self): wb_service.create_workbook_v2(WAIT_AFTER_WB % 2) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_task_delayed(task_ex.id, delay=0.5) self.await_task_success(task_ex.id) def test_wait_after_policy_zero_seconds(self): wb_service.create_workbook_v2(WAIT_AFTER_WB % 0) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) try: self.await_task_delayed(task_ex.id, delay=0.5) except AssertionError: # There was no delay as expected. pass else: self.fail("Shouldn't happen") self.await_task_success(task_ex.id) def test_wait_after_policy_negative_number(self): self.assertRaises( exc.InvalidModelException, wb_service.create_workbook_v2, WAIT_AFTER_WB % -1 ) def test_wait_after_policy_from_var(self): wb_service.create_workbook_v2(WAIT_AFTER_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'wait_after': 2} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_task_delayed(task_ex.id, delay=0.5) self.await_task_success(task_ex.id) def test_wait_after_policy_from_var_zero_seconds(self): wb_service.create_workbook_v2(WAIT_AFTER_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'wait_after': 0} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) try: self.await_task_delayed(task_ex.id, delay=0.5) except AssertionError: # There was no delay as expected. pass else: self.fail("Shouldn't happen") self.await_task_success(task_ex.id) def test_wait_after_policy_from_var_negative_number(self): wb_service.create_workbook_v2(WAIT_AFTER_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'wait_after': -1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] # If wait_after value is less than 0 the task should fail with # InvalidModelException. self.assertEqual(states.ERROR, task_ex.state) self.await_workflow_error(wf_ex.id) self.assertDictEqual({}, task_ex.runtime_context) @mock.patch.object( requests, 'request', mock.MagicMock(side_effect=Exception()) ) def test_retry_policy(self): wb_service.create_workbook_v2(RETRY_WB % {'count': 3, 'delay': 1}) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_task_delayed(task_ex.id, delay=0.5) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 3, task_ex.runtime_context["retry_task_policy"]["retry_no"] ) @mock.patch.object( requests, 'request', mock.MagicMock(side_effect=Exception()) ) def test_retry_policy_zero_count(self): wb_service.create_workbook_v2(RETRY_WB % {'count': 0, 'delay': 1}) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) try: self.await_task_delayed(task_ex.id, delay=0.5) except AssertionError: # There were no scheduled tasks as expected. pass else: self.fail("Shouldn't happen") self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) self.assertNotIn("retry_task_policy", task_ex.runtime_context) @mock.patch.object( requests, 'request', mock.MagicMock(side_effect=Exception()) ) def test_retry_policy_negative_numbers(self): # Negative delay is not accepted. self.assertRaises( exc.InvalidModelException, wb_service.create_workbook_v2, RETRY_WB % {'count': 1, 'delay': -1} ) # Negative count is not accepted. self.assertRaises( exc.InvalidModelException, wb_service.create_workbook_v2, RETRY_WB % {'count': -1, 'delay': 1} ) @mock.patch.object( requests, 'request', mock.MagicMock(side_effect=Exception()) ) def test_retry_policy_from_var(self): wb_service.create_workbook_v2(RETRY_WB_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'count': 3, 'delay': 1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_task_delayed(task_ex.id, delay=0.5) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 3, task_ex.runtime_context["retry_task_policy"]["retry_no"] ) @mock.patch.object( requests, 'request', mock.MagicMock(side_effect=Exception()) ) def test_retry_policy_from_var_zero_iterations(self): wb_service.create_workbook_v2(RETRY_WB_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'count': 0, 'delay': 1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) try: self.await_task_delayed(task_ex.id, delay=0.5) except AssertionError: # There were no scheduled tasks as expected. pass else: self.fail("Shouldn't happen") self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) self.assertNotIn("retry_task_policy", task_ex.runtime_context) @mock.patch.object( requests, 'request', mock.MagicMock(side_effect=Exception()) ) def test_retry_policy_from_var_negative_numbers(self): wb_service.create_workbook_v2(RETRY_WB_FROM_VAR) # Start workflow with negative count. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'count': -1, 'delay': 1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.ERROR, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_workflow_error(wf_ex.id) # Start workflow with negative delay. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'count': 1, 'delay': -1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.ERROR, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_workflow_error(wf_ex.id) def test_retry_policy_never_happen(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: action: std.echo output="hello" retry: count: 3 delay: 1 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_success(task_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( {}, task_ex.runtime_context["retry_task_policy"] ) def test_retry_policy_break_on(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: input: - var: 4 tasks: task1: action: std.fail retry: count: 3 delay: 1 break-on: <% $.var >= 3 %> """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( {}, task_ex.runtime_context["retry_task_policy"] ) def test_retry_policy_break_on_not_happened(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: input: - var: 2 tasks: task1: action: std.fail retry: count: 3 delay: 1 break-on: <% $.var >= 3 %> """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 3, task_ex.runtime_context['retry_task_policy']['retry_no'] ) @mock.patch.object( std_actions.EchoAction, 'run', mock.Mock(side_effect=[1, 2, 3, 4]) ) def test_retry_continue_on(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: action: std.echo output="mocked result" retry: count: 4 delay: 1 continue-on: <% task(task1).result < 3 %> """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_success(task_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 2, task_ex.runtime_context['retry_task_policy']['retry_no'] ) def test_retry_continue_on_not_happened(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: action: std.echo output=4 retry: count: 4 delay: 1 continue-on: <% task(task1).result <= 3 %> """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_success(task_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( {}, task_ex.runtime_context['retry_task_policy'] ) def test_retry_policy_one_line(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.fail retry: count=3 delay=1 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 3, task_ex.runtime_context['retry_task_policy']['retry_no'] ) def test_retry_policy_subworkflow_force_fail(self): retry_wb = """--- version: '2.0' name: wb workflows: main: tasks: task1: workflow: work retry: count: 3 delay: 1 work: tasks: do: action: std.fail on-error: - fail """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.main') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 3, task_ex.runtime_context['retry_task_policy']['retry_no'] ) @mock.patch.object( std_actions.EchoAction, 'run', mock.Mock(side_effect=[exc.ActionException(), "mocked result"]) ) def test_retry_policy_succeed_after_failure(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: output: result: <% task(task1).result %> tasks: task1: action: std.echo output="mocked result" retry: count: 3 delay: 1 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_success(task_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output task_ex = wf_ex.task_executions[0] self.assertDictEqual( {'retry_no': 1}, task_ex.runtime_context['retry_task_policy'] ) self.assertDictEqual({'result': 'mocked result'}, wf_output) def test_retry_join_task_after_failed_task(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: task-defaults: retry: count: 1 delay: 0 tasks: task1: on-success: join_task task2: action: std.fail on-success: join_task join_task: join: all """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_single_item(tasks, name="task2", state=states.ERROR) self._assert_single_item(tasks, name="join_task", state=states.ERROR) def test_retry_join_task_after_idle_task(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: task-defaults: retry: count: 1 delay: 0 tasks: task1: on-success: join_task task2: action: std.fail on-success: task3 task3: on-success: join_task join_task: join: all """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self._assert_single_item(tasks, name="task2", state=states.ERROR) self._assert_single_item(tasks, name="join_task", state=states.ERROR) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock(side_effect=[exc.ActionException(), 'value']) ) def test_retry_policy_succeed_after_failure_with_publish(self): retry_wf = """--- version: '2.0' wf1: output: result: <% task(task2).result %> tasks: task1: action: std.noop publish: key: value on-success: - task2 task2: action: std.echo output=<% $.key %> retry: count: 3 delay: 1 """ wf_service.create_workflows(retry_wf) wf_ex = self.engine.start_workflow('wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output task_execs = wf_ex.task_executions retry_task = self._assert_single_item(task_execs, name='task2') self.assertDictEqual( {'retry_no': 1}, retry_task.runtime_context['retry_task_policy'] ) self.assertDictEqual({'result': 'value'}, wf_output) @mock.patch.object( std_actions.MistralHTTPAction, 'run', mock.MagicMock(return_value='mock') ) def test_retry_async_action(self): retry_wf = """--- version: '2.0' repeated_retry: tasks: async_http: retry: delay: 0 count: 100 action: std.mistral_http url='https://google.com' """ wf_service.create_workflows(retry_wf) wf_ex = self.engine.start_workflow('repeated_retry') self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_running(task_ex.id) first_action_ex = task_ex.executions[0] self.await_action_state(first_action_ex.id, states.RUNNING) complete_action_params = ( first_action_ex.id, ml_actions.Result(error="mock") ) rpc.get_engine_client().on_action_complete(*complete_action_params) for _ in range(2): self.assertRaises( exc.MistralException, rpc.get_engine_client().on_action_complete, *complete_action_params ) self.await_task_running(task_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) action_exs = task_ex.executions self.assertEqual(2, len(action_exs)) for action_ex in action_exs: if action_ex.id == first_action_ex.id: expected_state = states.ERROR else: expected_state = states.RUNNING self.assertEqual(expected_state, action_ex.state) def test_timeout_policy(self): wb_service.create_workbook_v2(TIMEOUT_WB % 2) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_task_error(task_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self._assert_single_item(task_execs, name='task1') self.await_workflow_success(wf_ex.id) def test_timeout_policy_zero_seconds(self): wb = """--- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" timeout: 0 """ wb_service.create_workbook_v2(wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_task_success(task_ex.id) self.await_workflow_success(wf_ex.id) def test_timeout_policy_negative_number(self): # Negative timeout is not accepted. self.assertRaises( exc.InvalidModelException, wb_service.create_workbook_v2, TIMEOUT_WB % -1 ) def test_timeout_policy_success_after_timeout(self): wb_service.create_workbook_v2(TIMEOUT_WB2) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) # Wait until timeout exceeds. self._sleep(1) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions # Make sure that engine did not create extra tasks. self.assertEqual(1, len(task_execs)) def test_timeout_policy_from_var(self): wb_service.create_workbook_v2(TIMEOUT_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', wf_input={'timeout': 1}) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) def test_timeout_policy_from_var_zero_seconds(self): wb = """--- version: '2.0' name: wb workflows: wf1: type: direct input: - timeout tasks: task1: action: std.echo output="Hi!" timeout: <% $.timeout %> """ wb_service.create_workbook_v2(wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', wf_input={'timeout': 0}) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_task_success(task_ex.id) self.await_workflow_success(wf_ex.id) def test_timeout_policy_from_var_negative_number(self): wb_service.create_workbook_v2(TIMEOUT_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', wf_input={'timeout': -1}) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.ERROR, task_ex.state) self.await_workflow_error(wf_ex.id) def test_retry_with_input(self): wf_text = """--- version: '2.0' wf1: tasks: task1: action: std.noop on-success: - task2 publish: success: task4 task2: action: std.noop on-success: - task4: <% $.success = 'task4' %> - task5: <% $.success = 'task5' %> task4: on-complete: - task5 publish: param: data task5: action: std.echo input: output: { "status": 200, "param": <% $.param %> } publish: res: <% task(task5).result %> retry: continue-on: <% $.res.status < 300 %> count: 1 delay: 1 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf1') self.await_workflow_success(wf_ex.id) def test_action_timeout(self): wf_text = """--- version: '2.0' wf1: tasks: task1: action: std.sleep seconds=10 timeout: 2 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf1') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] action_ex = task_ex.action_executions[0] with timeout.Timeout(8): self.await_workflow_error(wf_ex.id) self.await_task_error(task_ex.id) self.await_action_error(action_ex.id) def test_pause_before_policy(self): wb_service.create_workbook_v2(PAUSE_BEFORE_WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.IDLE, task_ex.state) self.await_workflow_paused(wf_ex.id) self._sleep(1) self.engine.resume_workflow(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self._assert_single_item(task_execs, name='task1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') next_task_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, task_ex.state) self.assertEqual(states.SUCCESS, next_task_ex.state) def test_pause_before_with_delay_policy(self): wb_service.create_workbook_v2(PAUSE_BEFORE_DELAY_WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.IDLE, task_ex.state) # Verify wf paused by pause-before self.await_workflow_paused(wf_ex.id) # Allow wait-before to expire self._sleep(2) wf_ex = db_api.get_workflow_execution(wf_ex.id) # Verify wf still paused (wait-before didn't reactivate) self.await_workflow_paused(wf_ex.id) task_ex = db_api.get_task_execution(task_ex.id) self.assertEqual(states.IDLE, task_ex.state) self.engine.resume_workflow(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self._assert_single_item(task_execs, name='task1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') next_task_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, task_ex.state) self.assertEqual(states.SUCCESS, next_task_ex.state) def test_concurrency_is_in_runtime_context(self): wb_service.create_workbook_v2(CONCURRENCY_WB % 4) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.SUCCESS, task_ex.state) self.assertEqual(4, task_ex.runtime_context['concurrency']) def test_concurrency_is_in_runtime_context_zero_value(self): wb_service.create_workbook_v2(CONCURRENCY_WB % 0) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.SUCCESS, task_ex.state) self.assertNotIn('concurrency', task_ex.runtime_context) def test_concurrency_is_in_runtime_context_negative_number(self): # Negative concurrency value is not accepted. self.assertRaises( exc.InvalidModelException, wb_service.create_workbook_v2, CONCURRENCY_WB % -1 ) def test_concurrency_is_in_runtime_context_from_var(self): wb_service.create_workbook_v2(CONCURRENCY_WB_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'concurrency': 4} ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(4, task_ex.runtime_context['concurrency']) def test_concurrency_is_in_runtime_context_from_var_zero_value(self): wb_service.create_workbook_v2(CONCURRENCY_WB_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'concurrency': 0} ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertNotIn('concurrency', task_ex.runtime_context) def test_concurrency_is_in_runtime_context_from_var_negative_number(self): wb_service.create_workbook_v2(CONCURRENCY_WB_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'concurrency': -1} ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.ERROR, task_ex.state) self.await_workflow_error(wf_ex.id) def test_wrong_policy_prop_type(self): wb = """--- version: "2.0" name: wb workflows: wf1: type: direct input: - wait_before tasks: task1: action: std.echo output="Hi!" wait-before: <% $.wait_before %> """ wb_service.create_workbook_v2(wb) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'wait_before': '1'} ) self.assertIn( 'Invalid data type in WaitBeforePolicy', wf_ex.state_info ) self.assertEqual(states.ERROR, wf_ex.state) def test_delayed_task_and_correct_finish_workflow(self): wf_delayed_state = """--- version: "2.0" wf: type: direct tasks: task1: action: std.noop wait-before: 1 task2: action: std.noop """ wf_service.create_workflows(wf_delayed_state) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) @mock.patch('mistral.actions.std_actions.EchoAction.run') def test_retry_policy_break_on_with_dict(self, run_method): run_method.return_value = types.Result(error={'key-1': 15}) wf_retry_break_on_with_dictionary = """--- version: '2.0' name: wb workflows: wf1: tasks: fail_task: action: std.echo output='mock' retry: count: 3 delay: 1 break-on: <% task().result['key-1'] = 15 %> """ wb_service.create_workbook_v2(wf_retry_break_on_with_dictionary) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) fail_task_ex = wf_ex.task_executions[0] self.assertEqual(states.ERROR, fail_task_ex.state) self.assertEqual( {}, fail_task_ex.runtime_context["retry_task_policy"] ) def test_fail_on_true_condition(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: action: std.echo output=4 fail-on: <% task(task1).result <= 4 %> """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(task_ex.state, states.ERROR, "Check task state") def test_fail_on_false_condition(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: action: std.echo output=4 fail-on: <% task(task1).result != 4 %> """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(task_ex.state, states.SUCCESS, "Check task state") def test_fail_on_true_condition_task_defaults(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: task-defaults: fail-on: <% task().result <= 4 %> tasks: task1: action: std.echo output=4 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(task_ex.state, states.ERROR, "Check task state") @mock.patch.object( std_actions.EchoAction, 'run', mock.Mock(side_effect=[1, 2, 3, 4]) ) def test_fail_on_with_retry(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: action: std.echo output="mocked" fail-on: <% task(task1).result <= 2 %> retry: count: 3 delay: 0 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(task_ex.state, states.SUCCESS, "Check task state") self.assertEqual( 2, task_ex.runtime_context['retry_task_policy']['retry_no'] ) @mock.patch.object( std_actions.EchoAction, 'run', mock.Mock(side_effect=[1, 2, 3, 4]) ) def test_fail_on_with_retry_and_with_items(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: with-items: x in [1, 2] action: std.echo output="mocked" fail-on: <% not task(task1).result.contains(4) %> retry: count: 3 delay: 0 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(task_ex.state, states.SUCCESS, "Check task state") self.assertEqual( 1, task_ex.runtime_context['retry_task_policy']['retry_no'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_profiler.py0000644000175000017500000000516400000000000024415 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg import osprofiler from mistral import context from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') cfg.CONF.set_default('enabled', True, group='profiler') cfg.CONF.set_default('hmac_keys', 'foobar', group='profiler') cfg.CONF.set_default('profiler_log_name', 'profile_trace', group='profiler') class EngineProfilerTest(base.EngineTestCase): def setUp(self): super(EngineProfilerTest, self).setUp() # Configure the profiler. self.mock_profiler_log_func = mock.Mock(return_value=None) osprofiler.notifier.set(self.mock_profiler_log_func) self.ctx_serializer = context.RpcContextSerializer() def test_profile_trace(self): wf_def = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Peace!" """ wf_service.create_workflows(wf_def) wf_ex = self.engine_client.start_workflow('wf') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex['state']) self.await_workflow_success(wf_ex['id']) self.assertGreater(self.mock_profiler_log_func.call_count, 0) def test_no_profile_trace(self): self.override_config('enabled', False, 'profiler') wf_def = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Peace!" """ wf_service.create_workflows(wf_def) wf_ex = self.engine_client.start_workflow('wf') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex['state']) self.await_workflow_success(wf_ex['id']) self.assertEqual(self.mock_profiler_log_func.call_count, 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_race_condition.py0000644000175000017500000001356300000000000025555 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import corolocal from eventlet import semaphore from oslo_config import cfg import testtools from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as actions_base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WF_LONG_ACTION = """ --- version: '2.0' wf: type: direct description: | The idea is to use action that runs longer than engine.start_workflow() method. And we need to check that engine handles this situation. output: result: <% $.result %> tasks: task1: action: test.block publish: result: <% task(task1).result %> """ WF_SHORT_ACTION = """ --- version: '2.0' wf: type: direct description: | The idea is to use action that runs faster than engine.start_workflow(). And we need to check that engine handles this situation as well. This was a situation previously that led to a race condition in engine, method on_action_complete() was called while DB transaction in start_workflow() was still active (not committed yet). To emulate a short action we use a workflow with two start tasks so they run both in parallel on the first engine iteration when we call method start_workflow(). First task has a short action that just returns a predefined result and the second task blocks until the test explicitly unblocks it. So the first action will always end before start_workflow() method ends. output: result: <% $.result %> tasks: task1: action: std.echo output=1 publish: result: <% task(task1).result %> task2: action: test.block """ ACTION_SEMAPHORE = None TEST_SEMAPHORE = None class BlockingAction(actions_base.Action): def __init__(self): pass @staticmethod def unblock_test(): TEST_SEMAPHORE.release() @staticmethod def wait_for_test(): ACTION_SEMAPHORE.acquire() def run(self, context): self.unblock_test() self.wait_for_test() print('Action completed [eventlet_id=%s]' % corolocal.get_ident()) return 'test' def test(self): pass class EngineActionRaceConditionTest(base.EngineTestCase): def setUp(self): super(EngineActionRaceConditionTest, self).setUp() global ACTION_SEMAPHORE global TEST_SEMAPHORE ACTION_SEMAPHORE = semaphore.Semaphore(1) TEST_SEMAPHORE = semaphore.Semaphore(0) test_base.register_action_class('test.block', BlockingAction) @staticmethod def block_action(): ACTION_SEMAPHORE.acquire() @staticmethod def unblock_action(): ACTION_SEMAPHORE.release() @staticmethod def wait_for_action(): TEST_SEMAPHORE.acquire() def test_long_action(self): wf_service.create_workflows(WF_LONG_ACTION) self.block_action() wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.RUNNING, task_execs[0].state) self.wait_for_action() with db_api.transaction(): # Here's the point when the action is blocked but already running. # Do the same check again, it should always pass. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.RUNNING, task_execs[0].state) self.unblock_action() self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output self.assertDictEqual({'result': 'test'}, wf_output) # TODO(rakhmerov): Should periodically fail now because of poor # transaction isolation support in SQLite. Requires more research # to understand all the details. It's not reproducible on MySql. @testtools.skip('Skip until we know how to fix it with SQLite.') def test_short_action(self): wf_service.create_workflows(WF_SHORT_ACTION) self.block_action() wf_ex = self.engine.start_workflow('wf') wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='task1') task2_ex = self._assert_single_item( task_execs, name='task2', state=states.RUNNING ) self.await_task_success(task1_ex.id, timeout=10) self.unblock_action() self.await_task_success(task2_ex.id) self.await_workflow_success(wf_ex.id) task1_ex = db_api.get_task_execution(task1_ex.id) task1_action_ex = db_api.get_action_executions( task_execution_id=task1_ex.id )[0] self.assertEqual(1, task1_action_ex.output['result']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_reverse_workflow.py0000644000175000017500000001221500000000000026173 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK = """ --- version: '2.0' name: my_wb workflows: wf1: type: reverse input: - param1 - param2 tasks: task1: action: std.echo output=<% $.param1 %> publish: result1: <% task(task1).result %> task2: action: std.echo output="<% $.result1 %> & <% $.param2 %>" publish: result2: <% task(task2).result %> requires: [task1] task3: action: std.noop task4: action: std.noop requires: task3 """ class ReverseWorkflowEngineTest(base.EngineTestCase): def setUp(self): super(ReverseWorkflowEngineTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK) def test_start_task1(self): wf_input = { 'param1': 'a', 'param2': 'b' } wf_ex = self.engine.start_workflow( 'my_wb.wf1', wf_input=wf_input, task_name='task1' ) # Execution 1. self.assertIsNotNone(wf_ex) self.assertDictEqual(wf_input, wf_ex.input) self.assertDictEqual( { 'task_name': 'task1', 'namespace': '', 'env': {} }, wf_ex.params ) # Wait till workflow 'wf1' is completed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self.assertEqual(1, len(db_api.get_task_executions())) task_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertDictEqual({'result1': 'a'}, task_ex.published) def test_start_task2(self): wf_input = { 'param1': 'a', 'param2': 'b' } wf_ex = self.engine.start_workflow( 'my_wb.wf1', wf_input=wf_input, task_name='task2' ) # Execution 1. self.assertIsNotNone(wf_ex) self.assertDictEqual(wf_input, wf_ex.input) self.assertDictEqual( { 'task_name': 'task2', 'namespace': '', 'env': {} }, wf_ex.params ) # Wait till workflow 'wf1' is completed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) self.assertEqual(2, len(db_api.get_task_executions())) task1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) self.assertDictEqual({'result1': 'a'}, task1_ex.published) task2_ex = self._assert_single_item( task_execs, name='task2', state=states.SUCCESS ) self.assertDictEqual({'result2': 'a & b'}, task2_ex.published) def test_one_line_requires_syntax(self): wf_input = {'param1': 'a', 'param2': 'b'} wf_ex = self.engine.start_workflow( 'my_wb.wf1', wf_input=wf_input, task_name='task4' ) self.await_workflow_success(wf_ex.id) tasks = db_api.get_task_executions() self.assertEqual(2, len(tasks)) self._assert_single_item(tasks, name='task4', state=states.SUCCESS) self._assert_single_item(tasks, name='task3', state=states.SUCCESS) def test_inconsistent_task_names(self): wf_text = """ version: '2.0' wf: type: reverse tasks: task2: action: std.noop task3: action: std.noop requires: [task1] """ exception = self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, wf_text ) self.assertIn("Task 'task1' not found", str(exception)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_reverse_workflow_rerun.py0000644000175000017500000002643000000000000027412 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') SIMPLE_WORKBOOK = """ --- version: '2.0' name: wb1 workflows: wf1: type: reverse tasks: t1: action: std.echo output="Task 1" t2: action: std.echo output="Task 2" requires: - t1 t3: action: std.echo output="Task 3" requires: - t2 """ SIMPLE_WORKBOOK_DIFF_ENV_VAR = """ --- version: '2.0' name: wb1 workflows: wf1: type: reverse tasks: t1: action: std.echo output="Task 1" t2: action: std.echo output=<% env().var1 %> requires: - t1 t3: action: std.echo output=<% env().var2 %> requires: - t2 """ class ReverseWorkflowRerunTest(base.EngineTestCase): @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. exc.ActionException(), # Mock task2 exception for initial run. 'Task 2', # Mock task2 success for rerun. 'Task 3' # Mock task3 success. ] ) ) def test_rerun(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1', task_name='t3') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Resume workflow and re-run failed task. self.engine.rerun_workflow(task_2_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) # Wait for the workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(2, len(task_2_action_exs)) # Check there is exactly 1 action in Success and 1 in Error state. # Order doesn't matter. self.assertEqual( 1, len([act_ex for act_ex in task_2_action_exs if act_ex.state == states.SUCCESS]) ) self.assertEqual( 1, len([act_ex for act_ex in task_2_action_exs if act_ex.state == states.ERROR]) ) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id ) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. exc.ActionException(), # Mock task2 exception for initial run. 'Task 2', # Mock task2 success for rerun. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_diff_env_vars(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK_DIFF_ENV_VAR) # Initial environment variables for the workflow execution. env = { 'var1': 'fee fi fo fum', 'var2': 'foobar' } # Run workflow and fail task. wf_ex = self.engine.start_workflow( 'wb1.wf1', task_name='t3', env=env ) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) self.assertDictEqual(env, wf_ex.params['env']) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Update env in workflow execution with the following. updated_env = { 'var1': 'Task 2', 'var2': 'Task 3' } # Resume workflow and re-run failed task. self.engine.rerun_workflow(task_2_ex.id, env=updated_env) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertDictEqual(updated_env, wf_ex.params['env']) # Wait for the workflow to succeed. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) self.assertDictEqual( {'output': 'Task 1'}, task_1_action_exs[0].input ) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(2, len(task_2_action_exs)) # Assert that one action ex is in error and one in success states. self.assertIn( task_2_action_exs[0].state, [states.ERROR, states.SUCCESS] ) self.assertIn( task_2_action_exs[1].state, [states.ERROR, states.SUCCESS] ) self.assertNotEqual( task_2_action_exs[0].state, task_2_action_exs[1].state ) # Assert that one action ex got first env and one got second env self.assertIn( task_2_action_exs[0].input['output'], [env['var1'], updated_env['var1']] ) self.assertIn( task_2_action_exs[1].input['output'], [env['var1'], updated_env['var1']] ) self.assertNotEqual( task_2_action_exs[0].input, task_2_action_exs[1].input ) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id ) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state) self.assertDictEqual( {'output': updated_env['var2']}, task_3_action_exs[0].input ) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success for initial run. exc.ActionException() # Mock task2 exception for initial run. ] ) ) def test_rerun_from_prev_step(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1', task_name='t3') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Resume workflow and re-run failed task. e = self.assertRaises( exc.MistralError, self.engine.rerun_workflow, task_1_ex.id ) self.assertIn('not supported', str(e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_reverse_workflow_rerun_cancelled.py0000644000175000017500000001512000000000000031376 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class ReverseWorkflowRerunCancelledTest(base.EngineTestCase): @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 2', # Mock task2 success. 'Task 3' # Mock task3 success. ] ) ) def test_rerun_cancelled_task(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: reverse tasks: t1: action: std.async_noop t2: action: std.echo output="Task 2" requires: - t1 t3: action: std.echo output="Task 3" requires: - t2 """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1', task_name='t3') self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(1, len(wf1_t1_action_exs)) self.assertEqual(states.RUNNING, wf1_t1_action_exs[0].state) # Cancel action execution for task. self.engine.on_action_complete( wf1_t1_action_exs[0].id, ml_actions.Result(cancel=True) ) self.await_workflow_cancelled(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) self.await_task_cancelled(wf1_t1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) self.assertEqual(states.CANCELLED, wf1_ex.state) self.assertEqual("Cancelled tasks: t1", wf1_ex.state_info) self.assertEqual(1, len(wf1_ex.task_executions)) self.assertEqual(states.CANCELLED, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) # Resume workflow and re-run cancelled task. self.engine.rerun_workflow(wf1_t1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions self.assertEqual(states.RUNNING, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) # Mark async action execution complete. wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(states.RUNNING, wf1_t1_ex.state) self.assertEqual(2, len(wf1_t1_action_exs)) # Check there is exactly 1 action in Running and 1 in Cancelled state. # Order doesn't matter. self._assert_single_item(wf1_t1_action_exs, state=states.CANCELLED) running_execution = self._assert_single_item( wf1_t1_action_exs, state=states.RUNNING ) self.engine.on_action_complete( running_execution.id, ml_actions.Result(data={'foo': 'bar'}) ) # Wait for the workflow to succeed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions self.assertEqual(states.SUCCESS, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) self.assertEqual(3, len(wf1_task_execs)) wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') wf1_t2_ex = self._assert_single_item(wf1_task_execs, name='t2') wf1_t3_ex = self._assert_single_item(wf1_task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, wf1_t1_ex.state) self.assertIsNone(wf1_t2_ex.state_info) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(2, len(wf1_t1_action_exs)) # Check there is exactly 1 action in Success and 1 in Cancelled state. # Order doesn't matter. self._assert_single_item(wf1_t1_action_exs, state=states.SUCCESS) self._assert_single_item(wf1_t1_action_exs, state=states.CANCELLED) # Check action executions of task 2. self.assertEqual(states.SUCCESS, wf1_t2_ex.state) wf1_t2_action_exs = db_api.get_action_executions( task_execution_id=wf1_t2_ex.id ) self.assertEqual(1, len(wf1_t2_action_exs)) self.assertEqual(states.SUCCESS, wf1_t2_action_exs[0].state) # Check action executions of task 3. self.assertEqual(states.SUCCESS, wf1_t3_ex.state) wf1_t3_action_exs = db_api.get_action_executions( task_execution_id=wf1_t3_ex.id ) self.assertEqual(1, len(wf1_t3_action_exs)) self.assertEqual(states.SUCCESS, wf1_t3_action_exs[0].state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_run_action.py0000644000175000017500000003216300000000000024733 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.services import actions from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class RunActionEngineTest(base.EngineTestCase): @classmethod def heavy_init(cls): super(RunActionEngineTest, cls).heavy_init() action = """--- version: '2.0' concat: base: std.echo base-input: output: <% $.left %><% $.right %> input: - left - right concat3: base: concat base-input: left: <% $.left %><% $.center %> right: <% $.right %> input: - left - center - right concat4: base: concat3 base-input: left: <% $.left %> center: <% $.center_left %><% $.center_right %> right: <% $.right %> input: - left - center_left - center_right - right missing_base: base: wrong input: - some_input nested_missing_base: base: missing_base input: - some_input loop_action: base: loop_action base-input: output: <% $.output %> input: - output level2_loop_action: base: loop_action base-input: output: <% $.output %> input: - output """ actions.create_actions(action) def test_run_action_sync(self): # Start action and see the result. action_ex = self.engine.start_action('std.echo', {'output': 'Hello!'}) self.assertEqual('Hello!', action_ex.output['result']) self.assertEqual(states.SUCCESS, action_ex.state) def test_run_action_with_namespace(self): namespace = 'test_ns' action_text = """--- version: '2.0' concat1: base: std.echo base-input: output: <% $.left %><% $.right %> input: - left - right concat2: base: concat1 base-input: left: <% $.left %><% $.center %> right: <% $.right %> input: - left - center - right """ actions.create_actions(action_text, namespace=namespace) self.assertRaises( exc.InvalidActionException, self.engine.start_action, 'concat1', { 'left': 'Hello, ', 'right': 'John Doe!' }, save_result=True, namespace='' ) action_ex = self.engine.start_action( 'concat1', { 'left': 'Hello, ', 'right': 'John Doe!' }, save_result=True, namespace=namespace ) self.assertEqual(namespace, action_ex.workflow_namespace) self.await_action_success(action_ex.id) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual({'result': u'Hello, John Doe!'}, action_ex.output) action_ex = self.engine.start_action( 'concat2', { 'left': 'Hello, ', 'center': 'John', 'right': ' Doe!' }, save_result=True, namespace=namespace ) self.assertEqual(namespace, action_ex.workflow_namespace) self.await_action_success(action_ex.id) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual('Hello, John Doe!', action_ex.output['result']) def test_run_action_with_invalid_namespace(self): # This test checks the case in which, the action with that name is # not found with the given name, if an action was found with the # same name in default namespace, that action will run. action_ex = self.engine.start_action( 'concat', {'left': 'Hello, ', 'right': 'John Doe!'}, save_result=True, namespace='namespace' ) self.assertIsNotNone(action_ex) @mock.patch.object( std_actions.EchoAction, 'run', mock.Mock(side_effect=exc.ActionException("some error")) ) def test_run_action_error(self): # Start action and see the result. action_ex = self.engine.start_action('std.echo', {'output': 'Hello!'}) self.assertIsNotNone(action_ex.output) self.assertIn('some error', action_ex.output['result']) self.assertEqual(states.ERROR, action_ex.state) def test_run_action_save_result(self): # Start action. action_ex = self.engine.start_action( 'std.echo', {'output': 'Hello!'}, save_result=True ) self.await_action_success(action_ex.id) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual({'result': 'Hello!'}, action_ex.output) def test_run_action_run_sync(self): # Start action. action_ex = self.engine.start_action( 'std.echo', {'output': 'Hello!'}, run_sync=True ) self.assertEqual('Hello!', action_ex.output['result']) self.assertEqual(states.SUCCESS, action_ex.state) def test_run_action_save_result_and_run_sync(self): # Start action. action_ex = self.engine.start_action( 'std.echo', {'output': 'Hello!'}, save_result=True, run_sync=True ) self.assertEqual('Hello!', action_ex.output['result']) self.assertEqual(states.SUCCESS, action_ex.state) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual({'result': 'Hello!'}, action_ex.output) def test_run_action_run_sync_error(self): # Start action. self.assertRaises( exc.InputException, self.engine.start_action, 'std.async_noop', {}, run_sync=True ) def test_run_action_async(self): action_ex = self.engine.start_action('std.async_noop', {}) self.await_action_state(action_ex.id, states.RUNNING) action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.RUNNING, action_ex.state) @mock.patch.object( std_actions.AsyncNoOpAction, 'run', mock.MagicMock(side_effect=exc.ActionException('Invoke failed.'))) def test_run_action_async_invoke_failure(self): action_ex = self.engine.start_action('std.async_noop', {}) self.await_action_error(action_ex.id) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.ERROR, action_ex.state) self.assertIn('Invoke failed.', action_ex.output.get('result', '')) @mock.patch.object( std_actions.AsyncNoOpAction, 'run', mock.MagicMock(return_value=ml_actions.Result(error='Invoke erred.'))) def test_run_action_async_invoke_with_error(self): action_ex = self.engine.start_action('std.async_noop', {}) self.await_action_error(action_ex.id) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.ERROR, action_ex.state) self.assertIn('Invoke erred.', action_ex.output.get('result', '')) def test_run_action_adhoc(self): # Start action and see the result. action_ex = self.engine.start_action( 'concat', {'left': 'Hello, ', 'right': 'John Doe!'} ) self.assertEqual('Hello, John Doe!', action_ex.output['result']) def test_run_level_two_action_adhoc(self): # Start action and see the result. action_ex = self.engine.start_action( 'concat3', {'left': 'Hello, ', 'center': 'John', 'right': ' Doe!'} ) self.assertEqual('Hello, John Doe!', action_ex.output['result']) def test_run_level_three_action_adhoc(self): # Start action and see the result. action_ex = self.engine.start_action( 'concat4', { 'left': 'Hello, ', 'center_left': 'John', 'center_right': ' Doe', 'right': '!' } ) self.assertEqual('Hello, John Doe!', action_ex.output['result']) def test_run_action_with_missing_base(self): # Start action and see the result. self.assertRaises( exc.InvalidActionException, self.engine.start_action, 'missing_base', {'some_input': 'Hi'} ) def test_run_action_with_missing_nested_base(self): # Start action and see the result. self.assertRaises( exc.InvalidActionException, self.engine.start_action, 'nested_missing_base', {'some_input': 'Hi'} ) def test_run_loop_action(self): # Start action and see the result. self.assertRaises( ValueError, self.engine.start_action, 'loop_action', {'output': 'Hello'} ) def test_run_level_two_loop_action(self): # Start action and see the result. self.assertRaises( ValueError, self.engine.start_action, 'level2_loop_action', {'output': 'Hello'} ) def test_run_action_wrong_input(self): # Start action and see the result. exception = self.assertRaises( exc.InputException, self.engine.start_action, 'std.http', {'url': 'Hello, ', 'metod': 'John Doe!'} ) self.assertIn('std.http', str(exception)) def test_adhoc_action_wrong_input(self): # Start action and see the result. exception = self.assertRaises( exc.InputException, self.engine.start_action, 'concat', {'left': 'Hello, ', 'ri': 'John Doe!'} ) self.assertIn('concat', str(exception)) # TODO(rakhmerov): This is an example of a bad test. It pins to # implementation details too much and prevents from making refactoring # easily. When writing tests we should make assertions about # consequences, not about how internal machinery works, i.e. we need to # follow "black box" testing paradigm. @mock.patch('mistral.engine.actions.resolve_action_definition') @mock.patch('mistral.engine.utils.validate_input') @mock.patch('mistral.services.action_manager.get_action_class') @mock.patch('mistral.engine.actions.PythonAction.run') def test_run_action_with_kwargs_input(self, run_mock, class_mock, validate_mock, def_mock): action_def = models.ActionDefinition() action_def.update({ 'name': 'fake_action', 'action_class': '', 'attributes': {}, 'description': '', 'input': '**kwargs', 'is_system': True, 'scope': 'public' }) def_mock.return_value = action_def run_mock.return_value = ml_actions.Result(data='Hello') class_ret = mock.MagicMock() class_mock.return_value = class_ret self.engine.start_action('fake_action', {'input': 'Hello'}) self.assertEqual(1, def_mock.call_count) def_mock.assert_called_with('fake_action', namespace='') self.assertEqual(0, validate_mock.call_count) class_ret.assert_called_once_with(input='Hello') run_mock.assert_called_once_with( {'input': 'Hello'}, None, save=False, timeout=None ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_safe_rerun.py0000644000175000017500000001574000000000000024725 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mistral.db.v2 import api as db_api from mistral.executors import default_executor as d_exe from mistral.executors import remote_executor as r_exe from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import data_flow from mistral.workflow import states def _run_at_target(action_ex_id, action_class_str, attributes, action_params, safe_rerun, execution_context, target=None, async_=True, timeout=None): # We'll just call executor directly for testing purposes. executor = d_exe.DefaultExecutor() executor.run_action( action_ex_id, action_class_str, attributes, action_params, safe_rerun, execution_context, redelivered=True ) MOCK_RUN_AT_TARGET = mock.MagicMock(side_effect=_run_at_target) class TestSafeRerun(base.EngineTestCase): @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_safe_rerun_true(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop safe-rerun: true on-success: - task2 on-error: - task3 task2: action: std.noop safe-rerun: true task3: action: std.noop safe-rerun: true """ # Note: because every task have redelivered flag set to true in mock # function (_run_at_target), task2 and task3 have to set safe-rerun # to true. wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(len(tasks), 2) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(task1.state, states.SUCCESS) self.assertEqual(task2.state, states.SUCCESS) @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_safe_rerun_false(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop safe-rerun: false on-success: - task2 on-error: - task3 task2: action: std.noop safe-rerun: true task3: action: std.noop safe-rerun: true """ # Note: because every task have redelivered flag set to true in mock # function (_run_at_target), task2 and task3 have to set safe-rerun # to true. wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(len(tasks), 2) task1 = self._assert_single_item(tasks, name='task1') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(task1.state, states.ERROR) self.assertEqual(task3.state, states.SUCCESS) @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_safe_rerun_with_items(self): wf_text = """--- version: '2.0' wf: tasks: task1: with-items: i in [1, 2, 3] action: std.echo output=<% $.i %> safe-rerun: true publish: result: <% task(task1).result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(len(tasks), 1) task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(task1.state, states.SUCCESS) result = data_flow.get_task_execution_result(task1) self.assertIn(1, result) self.assertIn(2, result) self.assertIn(3, result) @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_safe_rerun_in_task_defaults(self): wf_text = """--- version: '2.0' wf: task-defaults: safe-rerun: true tasks: task1: safe-rerun: false on-error: - task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(len(tasks), 2) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(task1.state, states.ERROR) self.assertEqual(task2.state, states.SUCCESS) @mock.patch.object(r_exe.RemoteExecutor, 'run_action', MOCK_RUN_AT_TARGET) def test_default_value_of_safe_rerun(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(len(tasks), 1) task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(task1.state, states.ERROR) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_set_state.py0000644000175000017500000000362000000000000024561 0ustar00coreycorey00000000000000# Copyright 2017 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.engine import workflows from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class TestSetState(base.EngineTestCase): def test_set_state(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="Echo" on-success: - task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) # The state in db is SUCCESS, but wf_ex still contains outdated info. self.assertEqual("RUNNING", wf_ex.state) wf = workflows.Workflow(wf_ex) # Trying to change the status of succeed execution. There is no error, # only warning message that state has been changed in db. wf.set_state("ERROR") with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual("SUCCESS", wf_ex.state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_state_info.py0000644000175000017500000001342100000000000024721 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class ExecutionStateInfoTest(base.EngineTestCase): def test_state_info(self): workflow = """--- version: '2.0' test_wf: type: direct tasks: task1: action: std.fail task2: action: std.noop """ wf_service.create_workflows(workflow) # Start workflow. wf_ex = self.engine.start_workflow('test_wf') self.await_workflow_error(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIn("error in tasks: task1", wf_ex.state_info) def test_state_info_two_failed_branches(self): workflow = """--- version: '2.0' test_wf: type: direct tasks: task1: action: std.fail task2: action: std.fail """ wf_service.create_workflows(workflow) # Start workflow. wf_ex = self.engine.start_workflow('test_wf') self.await_workflow_error(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIn("error in tasks: task1, task2", wf_ex.state_info) def test_state_info_with_policies(self): workflow = """--- version: '2.0' test_wf: type: direct tasks: task1: action: std.fail wait-after: 1 task2: action: std.noop wait-after: 3 """ wf_service.create_workflows(workflow) # Start workflow. wf_ex = self.engine.start_workflow('test_wf') self.await_workflow_error(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIn("error in tasks: task1", wf_ex.state_info) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.1', # Mock task1 success for initial run. exc.ActionException(), # Mock task1 exception for initial run. 'Task 1.0', # Mock task1 success for rerun. 'Task 1.2' # Mock task1 success for rerun. ] ) ) def test_state_info_with_items(self): workflow = """--- version: '2.0' wf: type: direct tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.echo output="Task 1.<% $.i %>" """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) task_1_ex = self._assert_single_item(task_execs, name='t1') self.assertEqual(states.ERROR, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(3, len(task_1_action_exs)) error_actions = [ action_ex for action_ex in task_1_action_exs if action_ex.state == states.ERROR ] self.assertEqual(2, len(error_actions)) success_actions = [ action_ex for action_ex in task_1_action_exs if action_ex.state == states.SUCCESS ] self.assertEqual(1, len(success_actions)) for action_ex in error_actions: self.assertIn(action_ex.id, wf_ex.state_info) for action_ex in success_actions: self.assertNotIn(action_ex.id, wf_ex.state_info) def test_state_info_with_json(self): workflow = """--- version: "2.0" wf_state_info: type: direct tasks: main_task: action: std.test_dict input: size: 1 key_prefix: "abc" val: "pqr" on-success: - fail msg="<% task().result %>" """ wf_service.create_workflows(workflow) # Start workflow. wf_ex = self.engine.start_workflow('wf_state_info') self.await_workflow_error(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIn('{"abc0": "pqr"}', wf_ex.state_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_subworkflows.py0000644000175000017500000003402500000000000025340 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.actions import std_actions from mistral import context as auth_context from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WB1 = """ --- version: '2.0' name: wb1 workflows: wf1: type: reverse input: - param1 - param2 output: final_result: <% $.final_result %> tasks: task1: action: std.echo output=<% $.param1 %> publish: result1: <% task(task1).result %> task2: action: std.echo output="'<% $.param1 %> & <% $.param2 %>'" publish: final_result: <% task(task2).result %> requires: [task1] wf2: type: direct output: slogan: <% $.slogan %> tasks: task1: workflow: wf1 param1='Bonnie' param2='Clyde' task_name='task2' publish: slogan: "<% task(task1).result.final_result %> is a cool movie!" """ WB2 = """ --- version: '2.0' name: wb2 workflows: wf1: type: direct tasks: task1: workflow: wf2 wf2: type: direct output: var1: <% $.does_not_exist %> tasks: task1: action: std.noop """ WB3 = """ --- version: '2.0' name: wb3 workflows: wf1: input: - wf_name output: sub_wf_out: <% $.sub_wf_out %> tasks: task1: workflow: <% $.wf_name %> publish: sub_wf_out: <% task(task1).result.sub_wf_out %> wf2: output: sub_wf_out: wf2_out tasks: task1: action: std.noop """ WB4 = """ --- version: '2.0' name: wb4 workflows: wf1: input: - wf_name - inp output: sub_wf_out: <% $.sub_wf_out %> tasks: task1: workflow: <% $.wf_name %> input: <% $.inp %> publish: sub_wf_out: <% task(task1).result.sub_wf_out %> wf2: input: - inp output: sub_wf_out: <% $.inp %> tasks: task1: action: std.noop """ WB5 = """ --- version: '2.0' name: wb5 workflows: wf1: input: - wf_name - inp output: sub_wf_out: '{{ _.sub_wf_out }}' tasks: task1: workflow: '{{ _.wf_name }}' input: '{{ _.inp }}' publish: sub_wf_out: '{{ task("task1").result.sub_wf_out }}' wf2: input: - inp output: sub_wf_out: '{{ _.inp }}' tasks: task1: action: std.noop """ WB6 = """ --- version: '2.0' name: wb6 workflows: wf1: tasks: task1: workflow: wf2 wf2: tasks: task1: workflow: wf3 wf3: tasks: task1: action: std.noop """ class SubworkflowsTest(base.EngineTestCase): def setUp(self): super(SubworkflowsTest, self).setUp() wb_service.create_workbook_v2(WB1) wb_service.create_workbook_v2(WB2) wb_service.create_workbook_v2(WB3) wb_service.create_workbook_v2(WB4) wb_service.create_workbook_v2(WB5) wb_service.create_workbook_v2(WB6) def test_subworkflow_success(self): wf2_ex = self.engine.start_workflow('wb1.wf2') project_id = auth_context.ctx().project_id # Execution of 'wf2'. self.assertEqual(project_id, wf2_ex.project_id) self.assertIsNotNone(wf2_ex) self.assertDictEqual({}, wf2_ex.input) self.assertDictEqual({'namespace': '', 'env': {}}, wf2_ex.params) self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) # Execution of 'wf2'. wf1_ex = self._assert_single_item(wf_execs, name='wb1.wf1') wf2_ex = self._assert_single_item(wf_execs, name='wb1.wf2') self.assertEqual(project_id, wf1_ex.project_id) self.assertIsNotNone(wf1_ex.task_execution_id) self.assertDictContainsSubset( { 'task_name': 'task2', 'task_execution_id': wf1_ex.task_execution_id }, wf1_ex.params ) self.assertDictEqual( { 'param1': 'Bonnie', 'param2': 'Clyde' }, wf1_ex.input ) # Wait till workflow 'wf1' is completed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_output = wf1_ex.output self.assertDictEqual( {'final_result': "'Bonnie & Clyde'"}, wf1_output ) # Wait till workflow 'wf2' is completed. self.await_workflow_success(wf2_ex.id, timeout=4) with db_api.transaction(): wf2_ex = db_api.get_workflow_execution(wf2_ex.id) wf2_output = wf2_ex.output self.assertDictEqual( {'slogan': "'Bonnie & Clyde' is a cool movie!"}, wf2_output ) # Check project_id in tasks. wf1_task_execs = db_api.get_task_executions( workflow_execution_id=wf1_ex.id ) wf2_task_execs = db_api.get_task_executions( workflow_execution_id=wf2_ex.id ) wf2_task1_ex = self._assert_single_item(wf1_task_execs, name='task1') wf1_task1_ex = self._assert_single_item(wf2_task_execs, name='task1') wf1_task2_ex = self._assert_single_item(wf1_task_execs, name='task2') self.assertEqual(project_id, wf2_task1_ex.project_id) self.assertEqual(project_id, wf1_task1_ex.project_id) self.assertEqual(project_id, wf1_task2_ex.project_id) @mock.patch.object(std_actions.EchoAction, 'run', mock.MagicMock(side_effect=exc.ActionException)) def test_subworkflow_error(self): self.engine.start_workflow('wb1.wf2') self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5) wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) wf1_ex = self._assert_single_item(wf_execs, name='wb1.wf1') wf2_ex = self._assert_single_item(wf_execs, name='wb1.wf2') # Wait till workflow 'wf1' is completed. self.await_workflow_error(wf1_ex.id) # Wait till workflow 'wf2' is completed, its state must be ERROR. self.await_workflow_error(wf2_ex.id) def test_subworkflow_yaql_error(self): wf_ex = self.engine.start_workflow('wb2.wf1') self.await_workflow_error(wf_ex.id) wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) wf2_ex = self._assert_single_item(wf_execs, name='wb2.wf2') self.assertEqual(states.ERROR, wf2_ex.state) self.assertIn('Can not evaluate YAQL expression', wf2_ex.state_info) # Ensure error message is bubbled up to the main workflow. wf1_ex = self._assert_single_item(wf_execs, name='wb2.wf1') self.assertEqual(states.ERROR, wf1_ex.state) self.assertIn('Can not evaluate YAQL expression', wf1_ex.state_info) def test_subworkflow_environment_inheritance(self): env = {'key1': 'abc'} wf2_ex = self.engine.start_workflow('wb1.wf2', env=env) # Execution of 'wf2'. self.assertIsNotNone(wf2_ex) self.assertDictEqual({}, wf2_ex.input) self.assertDictEqual( {'env': env, 'namespace': ''}, wf2_ex.params ) self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) # Execution of 'wf1'. wf1_ex = self._assert_single_item(wf_execs, name='wb1.wf1') wf2_ex = self._assert_single_item(wf_execs, name='wb1.wf2') self.assertIsNotNone(wf1_ex.task_execution_id) self.assertDictContainsSubset({}, wf1_ex.params) # Wait till workflow 'wf1' is completed. self.await_workflow_success(wf1_ex.id) # Wait till workflow 'wf2' is completed. self.await_workflow_success(wf2_ex.id) def test_dynamic_subworkflow_wf2(self): ex = self.engine.start_workflow('wb3.wf1', wf_input={'wf_name': 'wf2'}) self.await_workflow_success(ex.id) with db_api.transaction(): ex = db_api.get_workflow_execution(ex.id) self.assertEqual({'sub_wf_out': 'wf2_out'}, ex.output) def test_dynamic_subworkflow_call_failure(self): ex = self.engine.start_workflow( 'wb3.wf1', wf_input={'wf_name': 'not_existing_wf'} ) self.await_workflow_error(ex.id) with db_api.transaction(): ex = db_api.get_workflow_execution(ex.id) self.assertIn('not_existing_wf', ex.state_info) def test_dynamic_subworkflow_with_generic_input(self): self._test_dynamic_workflow_with_dict_param('wb4.wf1') def test_dynamic_subworkflow_with_jinja(self): self._test_dynamic_workflow_with_dict_param('wb5.wf1') def test_string_workflow_input_failure(self): ex = self.engine.start_workflow( 'wb4.wf1', wf_input={'wf_name': 'wf2', 'inp': 'invalid_string_input'} ) self.await_workflow_error(ex.id) with db_api.transaction(): ex = db_api.get_workflow_execution(ex.id) self.assertIn('invalid_string_input', ex.state_info) def _test_dynamic_workflow_with_dict_param(self, wf_identifier): ex = self.engine.start_workflow( wf_identifier, wf_input={'wf_name': 'wf2', 'inp': {'inp': 'abc'}} ) self.await_workflow_success(ex.id) with db_api.transaction(): ex = db_api.get_workflow_execution(ex.id) self.assertEqual({'sub_wf_out': 'abc'}, ex.output) def test_subworkflow_root_execution_id(self): self.engine.start_workflow('wb6.wf1') self._await(lambda: len(db_api.get_workflow_executions()) == 3, 0.5, 5) wf_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf_execs, name='wb6.wf1') wf2_ex = self._assert_single_item(wf_execs, name='wb6.wf2') wf3_ex = self._assert_single_item(wf_execs, name='wb6.wf3') self.assertEqual(3, len(wf_execs)) # Wait till workflow 'wf1' is completed (and all the sub-workflows # will be completed also). self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf2_ex = db_api.get_workflow_execution(wf2_ex.id) wf3_ex = db_api.get_workflow_execution(wf3_ex.id) self.assertIsNone(wf1_ex.root_execution_id, None) self.assertEqual(wf2_ex.root_execution_id, wf1_ex.id) self.assertEqual(wf2_ex.root_execution, wf1_ex) self.assertEqual(wf3_ex.root_execution_id, wf1_ex.id) self.assertEqual(wf3_ex.root_execution, wf1_ex) def test_cascade_delete(self): wf_text = """ version: 2.0 wf: tasks: task1: workflow: sub_wf1 task2: workflow: sub_wf2 sub_wf1: tasks: task1: action: std.noop sub_wf2: tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) self.assertEqual(3, len(db_api.get_workflow_executions())) self.assertEqual(4, len(db_api.get_task_executions())) self.assertEqual(2, len(db_api.get_action_executions())) # Now delete the root workflow execution and make sure that # all dependent objects are deleted as well. db_api.delete_workflow_execution(wf_ex.id) self.assertEqual(0, len(db_api.get_workflow_executions())) self.assertEqual(0, len(db_api.get_task_executions())) self.assertEqual(0, len(db_api.get_action_executions())) def test_cascade_delete_deep(self): wf_text = """ version: 2.0 wf: input: - level tasks: initial: action: std.noop on-success: - recurse: <% $.level > 0 %> recurse: workflow: wf input: level: <% $.level - 1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', wf_input={"level": 7}) self.await_workflow_success(wf_ex.id) self.assertEqual(8, len(db_api.get_workflow_executions())) # Now delete the root workflow execution and make sure that # all dependent objects are deleted as well. db_api.delete_workflow_execution(wf_ex.id) self.assertEqual(0, len(db_api.get_workflow_executions())) self.assertEqual(0, len(db_api.get_task_executions())) self.assertEqual(0, len(db_api.get_action_executions())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_subworkflows_pause_resume.py0000644000175000017500000023432600000000000030123 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions class SubworkflowPauseResumeTest(base.EngineTestCase): def test_pause_resume_cascade_down_to_subworkflow(self): wb_text = """ version: '2.0' name: wb workflows: wf1: tasks: task1: workflow: wf2 on-success: task3 task2: workflow: wf3 on-success: task3 task3: join: all wf2: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop wf3: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop """ wb_service.create_workbook_v2(wb_text) # Start workflow execution. wf_1_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_state(wf_1_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_execs = wf_1_ex.task_executions wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_execs = wf_2_ex.task_executions wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_execs = wf_3_ex.task_executions wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.RUNNING, wf_1_ex.state) self.assertEqual(2, len(wf_1_task_execs)) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) self.assertEqual(1, len(wf_1_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex.id) self.assertEqual(1, len(wf_1_task_2_action_exs)) self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id) self.assertEqual(states.RUNNING, wf_2_ex.state) self.assertEqual(1, len(wf_2_task_execs)) self.assertEqual(states.RUNNING, wf_2_task_1_ex.state) self.assertEqual(1, len(wf_2_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(1, len(wf_3_task_execs)) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(1, len(wf_3_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # Pause the main workflow. self.engine.pause_workflow(wf_1_ex.id) self.await_workflow_paused(wf_1_ex.id) self.await_workflow_paused(wf_2_ex.id) self.await_workflow_paused(wf_3_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.PAUSED, wf_2_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) self.assertEqual(states.PAUSED, wf_1_ex.state) # Resume the main workflow. self.engine.resume_workflow(wf_1_ex.id) self.await_workflow_running(wf_1_ex.id) self.await_workflow_running(wf_2_ex.id) self.await_workflow_running(wf_3_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.RUNNING, wf_2_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) self.assertEqual(states.RUNNING, wf_1_ex.state) # Complete action executions of the subworkflows. self.engine.on_action_complete( wf_2_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.engine.on_action_complete( wf_3_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_success(wf_2_ex.id) self.await_workflow_success(wf_3_ex.id) self.await_workflow_success(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_execs = wf_1_ex.task_executions wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions wf_1_task_3_ex = self._assert_single_item( wf_1_ex.task_executions, name='task3' ) # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_execs = wf_2_ex.task_executions wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_2_ex = self._assert_single_item( wf_2_ex.task_executions, name='task2' ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_execs = wf_3_ex.task_executions wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_2_ex = self._assert_single_item( wf_3_ex.task_executions, name='task2' ) self.assertEqual(states.SUCCESS, wf_1_ex.state) self.assertEqual(3, len(wf_1_task_execs)) self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_3_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_2_ex.state) self.assertEqual(2, len(wf_2_task_execs)) self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_2_ex.state) self.assertEqual(states.SUCCESS, wf_3_ex.state) self.assertEqual(2, len(wf_3_task_execs)) self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_2_ex.state) def test_pause_resume_cascade_up_from_subworkflow(self): wb_text = """ version: '2.0' name: wb workflows: wf1: tasks: task1: workflow: wf2 on-success: task3 task2: workflow: wf3 on-success: task3 task3: join: all wf2: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop wf3: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop """ wb_service.create_workbook_v2(wb_text) # Start workflow execution. wf_1_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_state(wf_1_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_execs = wf_1_ex.task_executions wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_execs = wf_2_ex.task_executions wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_execs = wf_3_ex.task_executions wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.RUNNING, wf_1_ex.state) self.assertEqual(2, len(wf_1_task_execs)) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) self.assertEqual(1, len(wf_1_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex.id) self.assertEqual(1, len(wf_1_task_2_action_exs)) self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id) self.assertEqual(states.RUNNING, wf_2_ex.state) self.assertEqual(1, len(wf_2_task_execs)) self.assertEqual(states.RUNNING, wf_2_task_1_ex.state) self.assertEqual(1, len(wf_2_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(1, len(wf_3_task_execs)) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(1, len(wf_3_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # Pause the subworkflow. self.engine.pause_workflow(wf_2_ex.id) self.await_workflow_paused(wf_2_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.PAUSED, wf_2_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) self.assertEqual(states.PAUSED, wf_1_ex.state) # Resume the 1st subworkflow. self.engine.resume_workflow(wf_2_ex.id) self.await_workflow_running(wf_2_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.RUNNING, wf_2_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) self.assertEqual(states.PAUSED, wf_1_ex.state) # Complete action execution of 1st subworkflow. self.engine.on_action_complete( wf_2_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_success(wf_2_ex.id) self.await_task_success(wf_1_task_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.SUCCESS, wf_2_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) self.assertEqual(states.PAUSED, wf_1_ex.state) # Resume the 2nd subworkflow. self.engine.resume_workflow(wf_3_ex.id) self.await_workflow_running(wf_3_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.SUCCESS, wf_2_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) self.assertEqual(states.RUNNING, wf_1_ex.state) # Complete action execution of 2nd subworkflow. self.engine.on_action_complete( wf_3_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_success(wf_3_ex.id) self.await_workflow_success(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_execs = wf_1_ex.task_executions wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions wf_1_task_3_ex = self._assert_single_item( wf_1_ex.task_executions, name='task3' ) # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_execs = wf_2_ex.task_executions wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_2_ex = self._assert_single_item( wf_2_ex.task_executions, name='task2' ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_execs = wf_3_ex.task_executions wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_2_ex = self._assert_single_item( wf_3_ex.task_executions, name='task2' ) self.assertEqual(states.SUCCESS, wf_1_ex.state) self.assertEqual(3, len(wf_1_task_execs)) self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_3_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_2_ex.state) self.assertEqual(2, len(wf_2_task_execs)) self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_2_ex.state) self.assertEqual(states.SUCCESS, wf_3_ex.state) self.assertEqual(2, len(wf_3_task_execs)) self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_2_ex.state) def test_pause_resume_cascade_down_to_with_items_subworkflows(self): wb_text = """ version: '2.0' name: wb workflows: wf1: tasks: task1: with-items: i in <% range(3) %> workflow: wf2 on-success: task3 task2: workflow: wf3 on-success: task3 task3: join: all wf2: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop wf3: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop """ wb_service.create_workbook_v2(wb_text) # Start workflow execution. wf_1_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_state(wf_1_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_execs = wf_1_ex.task_executions wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_execs = wf_2_ex_1.task_executions wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_execs = wf_2_ex_2.task_executions wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_execs = wf_2_ex_3.task_executions wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_execs = wf_3_ex.task_executions wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.RUNNING, wf_1_ex.state) self.assertEqual(2, len(wf_1_task_execs)) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) self.assertEqual(3, len(wf_1_task_1_action_exs)) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex_1.id) self.assertEqual(states.RUNNING, wf_2_ex_1.state) self.assertEqual(1, len(wf_2_ex_1_task_execs)) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state) self.assertEqual(1, len(wf_2_ex_1_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[1].state) self.assertEqual(wf_1_task_1_action_exs[1].id, wf_2_ex_2.id) self.assertEqual(states.RUNNING, wf_2_ex_2.state) self.assertEqual(1, len(wf_2_ex_2_task_execs)) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state) self.assertEqual(1, len(wf_2_ex_2_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[2].state) self.assertEqual(wf_1_task_1_action_exs[2].id, wf_2_ex_3.id) self.assertEqual(states.RUNNING, wf_2_ex_3.state) self.assertEqual(1, len(wf_2_ex_3_task_execs)) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state) self.assertEqual(1, len(wf_2_ex_3_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(1, len(wf_1_task_2_action_exs)) self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(1, len(wf_3_task_execs)) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(1, len(wf_3_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # Pause the main workflow. self.engine.pause_workflow(wf_1_ex.id) self.await_workflow_paused(wf_2_ex_1.id) self.await_workflow_paused(wf_2_ex_2.id) self.await_workflow_paused(wf_2_ex_3.id) self.await_workflow_paused(wf_3_ex.id) self.await_task_paused(wf_1_task_1_ex.id) self.await_task_paused(wf_1_task_2_ex.id) self.await_workflow_paused(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.PAUSED, wf_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_2_ex_1.state) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[1].state) self.assertEqual(states.PAUSED, wf_2_ex_2.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[2].state) self.assertEqual(states.PAUSED, wf_2_ex_3.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # Resume the main workflow. self.engine.resume_workflow(wf_1_ex.id) self.await_workflow_running(wf_2_ex_1.id) self.await_workflow_running(wf_2_ex_2.id) self.await_workflow_running(wf_2_ex_3.id) self.await_workflow_running(wf_3_ex.id) self.await_task_running(wf_1_task_1_ex.id) self.await_task_running(wf_1_task_2_ex.id) self.await_workflow_running(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.RUNNING, wf_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_2_ex_1.state) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[1].state) self.assertEqual(states.RUNNING, wf_2_ex_2.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[2].state) self.assertEqual(states.RUNNING, wf_2_ex_3.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # Complete action execution of subworkflows. self.engine.on_action_complete( wf_2_ex_1_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.engine.on_action_complete( wf_2_ex_2_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.engine.on_action_complete( wf_2_ex_3_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.engine.on_action_complete( wf_3_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_success(wf_2_ex_1.id) self.await_workflow_success(wf_2_ex_2.id) self.await_workflow_success(wf_2_ex_3.id) self.await_workflow_success(wf_3_ex.id) self.await_task_success(wf_1_task_1_ex.id) self.await_task_success(wf_1_task_2_ex.id) self.await_workflow_success(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.SUCCESS, wf_1_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_2_ex_1.state) self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[1].state) self.assertEqual(states.SUCCESS, wf_2_ex_2.state) self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[2].state) self.assertEqual(states.SUCCESS, wf_2_ex_3.state) self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_3_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_1_action_exs[0].state) def test_pause_resume_cascade_up_from_with_items_subworkflow(self): wb_text = """ version: '2.0' name: wb workflows: wf1: tasks: task1: with-items: i in <% range(3) %> workflow: wf2 on-success: task3 task2: workflow: wf3 on-success: task3 task3: join: all wf2: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop wf3: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop """ wb_service.create_workbook_v2(wb_text) # Start workflow execution. wf_1_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_state(wf_1_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_execs = wf_1_ex.task_executions wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_execs = wf_2_ex_1.task_executions wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_execs = wf_2_ex_2.task_executions wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_execs = wf_2_ex_3.task_executions wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_execs = wf_3_ex.task_executions wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.RUNNING, wf_1_ex.state) self.assertEqual(2, len(wf_1_task_execs)) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) self.assertEqual(3, len(wf_1_task_1_action_exs)) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex_1.id) self.assertEqual(states.RUNNING, wf_2_ex_1.state) self.assertEqual(1, len(wf_2_ex_1_task_execs)) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state) self.assertEqual(1, len(wf_2_ex_1_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[1].state) self.assertEqual(wf_1_task_1_action_exs[1].id, wf_2_ex_2.id) self.assertEqual(states.RUNNING, wf_2_ex_2.state) self.assertEqual(1, len(wf_2_ex_2_task_execs)) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state) self.assertEqual(1, len(wf_2_ex_2_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[2].state) self.assertEqual(wf_1_task_1_action_exs[2].id, wf_2_ex_3.id) self.assertEqual(states.RUNNING, wf_2_ex_3.state) self.assertEqual(1, len(wf_2_ex_3_task_execs)) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state) self.assertEqual(1, len(wf_2_ex_3_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(1, len(wf_1_task_2_action_exs)) self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(1, len(wf_3_task_execs)) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(1, len(wf_3_task_1_action_exs)) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # Pause one of the subworkflows in the with-items task. self.engine.pause_workflow(wf_2_ex_1.id) self.await_workflow_paused(wf_2_ex_1.id) self.await_workflow_paused(wf_2_ex_2.id) self.await_workflow_paused(wf_2_ex_3.id) self.await_workflow_paused(wf_3_ex.id) self.await_task_paused(wf_1_task_1_ex.id) self.await_task_paused(wf_1_task_2_ex.id) self.await_workflow_paused(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.PAUSED, wf_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_2_ex_1.state) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[1].state) self.assertEqual(states.PAUSED, wf_2_ex_2.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[2].state) self.assertEqual(states.PAUSED, wf_2_ex_3.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # NOTE(rakhmerov): Since cascade pausing is not atomic we need # to make sure that all internal operations related to pausing # one of workflow executions 'wb.wf2' are completed. So we have # to look if any "_on_action_update" calls are scheduled. def _predicate(): return all( [ '_on_action_update' not in c.target_method_name for c in db_api.get_delayed_calls() ] ) self._await(_predicate) # Resume one of the subworkflows in the with-items task. self.engine.resume_workflow(wf_2_ex_1.id) self.await_workflow_running(wf_2_ex_1.id) self.await_workflow_paused(wf_2_ex_2.id) self.await_workflow_paused(wf_2_ex_3.id) self.await_workflow_paused(wf_3_ex.id) self.await_task_paused(wf_1_task_1_ex.id) self.await_task_paused(wf_1_task_2_ex.id) self.await_workflow_paused(wf_1_ex.id) # Complete action execution of the subworkflow that is resumed. self.engine.on_action_complete( wf_2_ex_1_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_success(wf_2_ex_1.id) self.await_workflow_paused(wf_2_ex_2.id) self.await_workflow_paused(wf_2_ex_3.id) self.await_workflow_paused(wf_3_ex.id) self.await_task_paused(wf_1_task_1_ex.id) self.await_task_paused(wf_1_task_2_ex.id) self.await_workflow_paused(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.PAUSED, wf_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_2_ex_1.state) self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[1].state) self.assertEqual(states.PAUSED, wf_2_ex_2.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[2].state) self.assertEqual(states.PAUSED, wf_2_ex_3.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) # Resume one of the remaining subworkflows. self.engine.resume_workflow(wf_2_ex_2.id) self.engine.resume_workflow(wf_2_ex_3.id) self.engine.resume_workflow(wf_3_ex.id) self.await_workflow_running(wf_2_ex_2.id) self.await_workflow_running(wf_2_ex_3.id) self.await_workflow_running(wf_3_ex.id) self.await_task_running(wf_1_task_1_ex.id) self.await_task_running(wf_1_task_2_ex.id) self.await_workflow_running(wf_1_ex.id) # Complete action executions of the remaining subworkflows. self.engine.on_action_complete( wf_2_ex_2_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.engine.on_action_complete( wf_2_ex_3_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.engine.on_action_complete( wf_3_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_success(wf_2_ex_1.id) self.await_workflow_success(wf_2_ex_2.id) self.await_workflow_success(wf_2_ex_3.id) self.await_workflow_success(wf_3_ex.id) self.await_task_success(wf_1_task_1_ex.id) self.await_task_success(wf_1_task_2_ex.id) self.await_workflow_success(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = sorted( wf_1_task_1_ex.executions, key=lambda x: x['runtime_context']['index'] ) wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the with-items subworkflow executions. wf_2_ex_1 = db_api.get_workflow_execution( wf_1_task_1_action_exs[0].id ) wf_2_ex_1_task_1_ex = self._assert_single_item( wf_2_ex_1.task_executions, name='task1' ) wf_2_ex_1_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_1_task_1_ex.id ) wf_2_ex_2 = db_api.get_workflow_execution( wf_1_task_1_action_exs[1].id ) wf_2_ex_2_task_1_ex = self._assert_single_item( wf_2_ex_2.task_executions, name='task1' ) wf_2_ex_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_2_task_1_ex.id ) wf_2_ex_3 = db_api.get_workflow_execution( wf_1_task_1_action_exs[2].id ) wf_2_ex_3_task_1_ex = self._assert_single_item( wf_2_ex_3.task_executions, name='task1' ) wf_2_ex_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_ex_3_task_1_ex.id ) # Get objects for the wf3 subworkflow execution. wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) # Check state of parent workflow execution. self.assertEqual(states.SUCCESS, wf_1_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state) # Check state of wf2 (1) subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_2_ex_1.state) self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_action_exs[0].state) # Check state of wf2 (2) subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[1].state) self.assertEqual(states.SUCCESS, wf_2_ex_2.state) self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_action_exs[0].state) # Check state of wf2 (3) subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[2].state) self.assertEqual(states.SUCCESS, wf_2_ex_3.state) self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_action_exs[0].state) # Check state of wf3 subworkflow execution. self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_3_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_1_action_exs[0].state) def test_pause_resume_cascade_up_from_subworkflow_pause_before(self): wb_text = """ version: '2.0' name: wb workflows: wf1: tasks: task1: workflow: wf2 on-success: task3 task2: workflow: wf3 on-success: task3 task3: join: all wf2: tasks: task1: action: std.noop on-success: task2 task2: pause-before: true action: std.async_noop wf3: tasks: task1: action: std.async_noop on-success: task2 task2: action: std.noop """ wb_service.create_workbook_v2(wb_text) # Start workflow execution. wf_1_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_state(wf_1_ex.id, states.PAUSED) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_2_task_2_ex = self._assert_single_item( wf_2_ex.task_executions, name='task2' ) wf_2_task_2_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_2_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.PAUSED, wf_2_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state) self.assertEqual(states.IDLE, wf_2_task_2_ex.state) self.assertEqual(0, len(wf_2_task_2_action_exs)) self.assertEqual(states.PAUSED, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_1_ex.state) self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state) self.assertEqual(states.PAUSED, wf_1_task_2_ex.state) self.assertEqual(states.PAUSED, wf_1_ex.state) # Resume the main workflow. self.engine.resume_workflow(wf_1_ex.id) self.await_workflow_running(wf_1_ex.id) self.await_workflow_running(wf_2_ex.id) self.await_workflow_running(wf_3_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_2_task_2_ex = self._assert_single_item( wf_2_ex.task_executions, name='task2' ) wf_2_task_2_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_2_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) self.assertEqual(states.RUNNING, wf_2_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_2_task_2_ex.state) self.assertEqual(states.RUNNING, wf_2_task_2_action_exs[0].state) self.assertEqual(states.RUNNING, wf_3_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_ex.state) self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_1_ex.state) self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state) self.assertEqual(states.RUNNING, wf_1_task_2_ex.state) self.assertEqual(states.RUNNING, wf_1_ex.state) # Complete action executions of the subworkflows. self.engine.on_action_complete( wf_2_task_2_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.engine.on_action_complete( wf_3_task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_success(wf_2_ex.id) self.await_workflow_success(wf_3_ex.id) self.await_workflow_success(wf_1_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() # Get objects for the parent workflow execution. wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1') wf_1_task_execs = wf_1_ex.task_executions wf_1_task_1_ex = self._assert_single_item( wf_1_ex.task_executions, name='task1' ) wf_1_task_1_action_exs = wf_1_task_1_ex.executions wf_1_task_2_ex = self._assert_single_item( wf_1_ex.task_executions, name='task2' ) wf_1_task_2_action_exs = wf_1_task_2_ex.executions wf_1_task_3_ex = self._assert_single_item( wf_1_ex.task_executions, name='task3' ) # Get objects for the subworkflow executions. wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2') wf_2_task_execs = wf_2_ex.task_executions wf_2_task_1_ex = self._assert_single_item( wf_2_ex.task_executions, name='task1' ) wf_2_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_1_ex.id ) wf_2_task_2_ex = self._assert_single_item( wf_2_ex.task_executions, name='task2' ) wf_2_task_2_action_exs = db_api.get_action_executions( task_execution_id=wf_2_task_2_ex.id ) wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3') wf_3_task_execs = wf_3_ex.task_executions wf_3_task_1_ex = self._assert_single_item( wf_3_ex.task_executions, name='task1' ) wf_3_task_1_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_1_ex.id ) wf_3_task_2_ex = self._assert_single_item( wf_3_ex.task_executions, name='task2' ) wf_3_task_2_action_exs = db_api.get_action_executions( task_execution_id=wf_3_task_2_ex.id ) self.assertEqual(states.SUCCESS, wf_1_ex.state) self.assertEqual(3, len(wf_1_task_execs)) self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_3_ex.state) self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_2_ex.state) self.assertEqual(2, len(wf_2_task_execs)) self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_2_ex.state) self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_2_task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_3_ex.state) self.assertEqual(2, len(wf_3_task_execs)) self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_2_ex.state) self.assertEqual(states.SUCCESS, wf_3_task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf_3_task_2_action_exs[0].state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_task_cancel.py0000644000175000017500000002467700000000000025054 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import testtools from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions class TaskCancelTest(base.EngineTestCase): def test_cancel_action_execution(self): workflow = """ version: '2.0' wf: tasks: task1: action: std.async_noop on-success: - task2 on-error: - task3 on-complete: - task4 task2: action: std.noop task3: action: std.noop task4: action: std.noop """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.await_workflow_state(wf_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wf') task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(cancel=True) ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.await_task_cancelled(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: task1", wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.CANCELLED, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.CANCELLED, task_1_action_exs[0].state) self.assertIsNone(task_1_action_exs[0].state_info) def test_cancel_child_workflow_action_execution(self): workbook = """ version: '2.0' name: wb workflows: wf: tasks: taskx: workflow: subwf subwf: tasks: task1: action: std.async_noop on-success: - task2 on-error: - task3 on-complete: - task4 task2: action: std.noop task3: action: std.noop task4: action: std.noop """ wb_service.create_workbook_v2(workbook) wf_ex = self.engine.start_workflow('wb.wf') self.await_workflow_state(wf_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_ex = self._assert_single_item(wf_execs, name='wb.subwf') task_1_ex = self._assert_single_item( subwf_ex.task_executions, name='task1' ) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(cancel=True) ) self.await_workflow_cancelled(subwf_ex.id) self.await_task_cancelled(task_ex.id) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_ex = self._assert_single_item(wf_execs, name='wb.subwf') subwf_task_execs = subwf_ex.task_executions self.assertEqual(states.CANCELLED, subwf_ex.state) self.assertEqual("Cancelled tasks: task1", subwf_ex.state_info) self.assertEqual(1, len(subwf_task_execs)) self.assertEqual(states.CANCELLED, task_ex.state) self.assertEqual("Cancelled tasks: task1", task_ex.state_info) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: taskx", wf_ex.state_info) def test_cancel_action_execution_with_task_retry(self): workflow = """ version: '2.0' wf: tasks: task1: action: std.async_noop retry: count: 3 delay: 0 on-success: - task2 task2: action: std.noop """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.await_workflow_state(wf_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wf') task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(cancel=True) ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.await_task_cancelled(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: task1", wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.CANCELLED, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.CANCELLED, task_1_action_exs[0].state) self.assertIsNone(task_1_action_exs[0].state_info) @testtools.skip('Restore concurrency support.') @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 2' # Mock task2 success. ] ) ) def test_cancel_with_items_concurrency(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: tasks: t1: with-items: i in <% list(range(0, 4)) %> action: std.async_noop concurrency: 2 on-success: - t2 t2: action: std.echo output="Task 2" """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(2, len(wf1_t1_action_exs)) self.assertEqual(states.RUNNING, wf1_t1_action_exs[0].state) self.assertEqual(states.RUNNING, wf1_t1_action_exs[1].state) # Cancel action execution for task. for wf1_t1_action_ex in wf1_t1_action_exs: self.engine.on_action_complete( wf1_t1_action_ex.id, ml_actions.Result(cancel=True) ) self.await_task_cancelled(wf1_t1_ex.id) self.await_workflow_cancelled(wf1_ex.id) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(2, len(wf1_t1_action_exs)) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[0].state) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[1].state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_task_defaults.py0000644000175000017500000001506500000000000025425 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime as dt import mock from oslo_config import cfg import requests from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class TaskDefaultsDirectWorkflowEngineTest(base.EngineTestCase): @mock.patch.object( requests, 'request', mock.MagicMock(side_effect=Exception()) ) def test_task_defaults_on_error(self): wf_text = """--- version: '2.0' wf: type: direct task-defaults: on-error: - task3 tasks: task1: description: That should lead to transition to task3. action: std.http url="http://some_url" on-success: - task2 task2: action: std.echo output="Morpheus" task3: action: std.echo output="output" """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(2, len(tasks)) self.assertEqual(states.ERROR, task1.state) self.assertEqual(states.SUCCESS, task3.state) class TaskDefaultsReverseWorkflowEngineTest(base.EngineTestCase): def test_task_defaults_retry_policy(self): wf_text = """--- version: '2.0' wf: type: reverse task-defaults: retry: count: 2 delay: 1 tasks: task1: action: std.fail task2: action: std.echo output=2 requires: [task1] """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', task_name='task2') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(1, len(tasks)) task1 = self._assert_single_item( tasks, name='task1', state=states.ERROR ) self.assertGreater( task1.runtime_context['retry_task_policy']['retry_no'], 0 ) def test_task_defaults_timeout_policy(self): wf_text = """--- version: '2.0' wf: type: reverse task-defaults: timeout: 1 tasks: task1: action: std.async_noop task2: action: std.echo output=2 requires: [task1] """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', task_name='task2') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(1, len(tasks)) self._assert_single_item(tasks, name='task1', state=states.ERROR) task_ex = db_api.get_task_execution(tasks[0].id) self.assertIn("Task timed out", task_ex.state_info) def test_task_defaults_wait_policies(self): wf_text = """--- version: '2.0' wf: type: reverse task-defaults: wait-before: 1 wait-after: 1 tasks: task1: action: std.echo output=1 """ wf_service.create_workflows(wf_text) time_before = dt.datetime.utcnow() # Start workflow. wf_ex = self.engine.start_workflow('wf', task_name='task1') self.await_workflow_success(wf_ex.id) # Workflow must work at least 2 seconds (1+1). self.assertGreater( (dt.datetime.utcnow() - time_before).total_seconds(), 2 ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(1, len(tasks)) self._assert_single_item(tasks, name='task1', state=states.SUCCESS) def test_task_defaults_requires(self): wf_text = """--- version: '2.0' wf: type: reverse task-defaults: requires: [always_do] tasks: task1: action: std.echo output=1 task2: action: std.echo output=1 requires: [task1] always_do: action: std.echo output="Do something" """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', task_name='task2') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(3, len(tasks)) self._assert_single_item(tasks, name='task1', state=states.SUCCESS) self._assert_single_item(tasks, name='task2', state=states.SUCCESS) self._assert_single_item(tasks, name='always_do', state=states.SUCCESS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_task_pause_resume.py0000644000175000017500000002473300000000000026315 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states from mistral_lib import actions as ml_actions class TaskPauseResumeTest(base.EngineTestCase): def test_pause_resume_action_ex(self): workflow = """ version: '2.0' wf: tasks: task1: action: std.async_noop on-success: - task2 task2: action: std.noop """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.await_workflow_state(wf_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wf') task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_execs)) self.assertEqual(states.RUNNING, task_1_ex.state) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Pause the action execution of task 1. self.engine.on_action_update(task_1_action_exs[0].id, states.PAUSED) self.await_task_paused(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_execs)) self.assertEqual(states.PAUSED, task_1_ex.state) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.PAUSED, task_1_action_exs[0].state) # Resume the action execution of task 1. self.engine.on_action_update(task_1_action_exs[0].id, states.RUNNING) self.await_task_running(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_execs)) self.assertEqual(states.RUNNING, task_1_ex.state) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Complete action execution of task 1. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) # Wait for the workflow execution to complete. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) task_2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, task_2_ex.state) def test_pause_resume_action_ex_with_items_task(self): workflow = """ version: '2.0' wf: tasks: task1: with-items: i in <% range(3) %> action: std.async_noop on-success: - task2 task2: action: std.noop """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.await_workflow_state(wf_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wf') task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = sorted( db_api.get_action_executions(task_execution_id=task_1_ex.id), key=lambda x: x['runtime_context']['index'] ) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_execs)) self.assertEqual(states.RUNNING, task_1_ex.state) self.assertEqual(3, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) self.assertEqual(states.RUNNING, task_1_action_exs[1].state) self.assertEqual(states.RUNNING, task_1_action_exs[2].state) # Pause the 1st action execution of task 1. self.engine.on_action_update(task_1_action_exs[0].id, states.PAUSED) self.await_task_paused(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = sorted( db_api.get_action_executions(task_execution_id=task_1_ex.id), key=lambda x: x['runtime_context']['index'] ) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_execs)) self.assertEqual(states.PAUSED, task_1_ex.state) self.assertEqual(3, len(task_1_action_exs)) self.assertEqual(states.PAUSED, task_1_action_exs[0].state) self.assertEqual(states.RUNNING, task_1_action_exs[1].state) self.assertEqual(states.RUNNING, task_1_action_exs[2].state) # Complete 2nd and 3rd action executions of task 1. self.engine.on_action_complete( task_1_action_exs[1].id, ml_actions.Result(data={'result': 'two'}) ) self.engine.on_action_complete( task_1_action_exs[2].id, ml_actions.Result(data={'result': 'three'}) ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = sorted( db_api.get_action_executions(task_execution_id=task_1_ex.id), key=lambda x: x['runtime_context']['index'] ) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_execs)) self.assertEqual(states.PAUSED, task_1_ex.state) self.assertEqual(3, len(task_1_action_exs)) self.assertEqual(states.PAUSED, task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, task_1_action_exs[1].state) self.assertEqual(states.SUCCESS, task_1_action_exs[2].state) # Resume the 1st action execution of task 1. self.engine.on_action_update(task_1_action_exs[0].id, states.RUNNING) self.await_task_running(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task_1_action_exs = sorted( db_api.get_action_executions(task_execution_id=task_1_ex.id), key=lambda x: x['runtime_context']['index'] ) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_execs)) self.assertEqual(states.RUNNING, task_1_ex.state) self.assertEqual(3, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, task_1_action_exs[1].state) self.assertEqual(states.SUCCESS, task_1_action_exs[2].state) # Complete the 1st action execution of task 1. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) # Wait for the workflow execution to complete. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') task_1_action_exs = sorted( db_api.get_action_executions(task_execution_id=task_1_ex.id), key=lambda x: x['runtime_context']['index'] ) task_2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(3, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) self.assertEqual(states.SUCCESS, task_1_action_exs[1].state) self.assertEqual(states.SUCCESS, task_1_action_exs[2].state) self.assertEqual(states.SUCCESS, task_2_ex.state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_task_publish.py0000644000175000017500000000536700000000000025270 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') SIMPLE_WORKBOOK = """ --- version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.echo output="Task 1" publish: v1: <% $.t1.get($foobar) %> on-success: - t2 t2: action: std.echo output="Task 2" on-success: - t3 t3: action: std.echo output="Task 3" """ class TaskPublishTest(base.EngineTestCase): @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success. 'Task 2', # Mock task2 success. 'Task 3' # Mock task3 success. ] ) ) def test_publish_failure(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') # Task 1 should have failed. self.assertEqual(states.ERROR, task_1_ex.state) self.assertIn('Can not evaluate YAQL expression', task_1_ex.state_info) # Action execution of task 1 should have succeeded. task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_task_started_finished_at.py0000644000175000017500000001253300000000000027616 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from mistral.tests.unit.engine import base from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service class TaskStartedFinishedAtTest(base.EngineTestCase): def setUp(self): super(TaskStartedFinishedAtTest, self).setUp() def test_started_finished_fields_updated_after_rerun(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail wait-before: 2 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) started_1st, finished_1st = self._get_started_finished(task_ex) # Make sure to rerun the workflow after a certain delay so that # times for the first run are different from times in the second run. eventlet.sleep(1) wf_ex = self.engine.rerun_workflow(task_ex.id) self.await_workflow_error(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) started_2nd, finished_2nd = self._get_started_finished(task_ex) self.assertNotEqual(started_1st, started_2nd) self.assertNotEqual(finished_1st, finished_2nd) def test_correct_duration_in_case_of_join_all(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.sleep seconds=1 on-success: join_task task2: action: std.sleep seconds=2 on-success: join_task task3: action: std.sleep seconds=3 on-success: join_task join_task: join: all action: std.sleep seconds=1 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) task1_ex = self._extract_task_ex(wf_ex.id, 'task1') task2_ex = self._extract_task_ex(wf_ex.id, 'task2') task3_ex = self._extract_task_ex(wf_ex.id, 'task3') join_task_ex = self._extract_task_ex(wf_ex.id, 'join_task') self._check_started_after(join_task_ex, task1_ex) self._check_started_after(join_task_ex, task2_ex) self._check_started_after(join_task_ex, task3_ex) def test_retries_do_not_update_created_at(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail retry: delay: 1 count: 5 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) created_at = task_ex.created_at started_at = self._get_started_finished(task_ex)[0] self.assertEqual(created_at, started_at) def test_wait_before_after_are_included_to_duration(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop wait-before: 1 wait-after: 2 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) started, finished = self._get_started_finished(task_ex) duration = self._get_task_duration(started, finished) self._check_duration_more_than(duration, 1) def _extract_task_ex(self, wf_ex_id, name='task1'): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) task_execs = wf_ex.task_executions return self._assert_single_item(task_execs, name=name) def _get_started_finished(self, task_ex): started_at = task_ex.started_at finished_at = task_ex.finished_at self.assertIsNotNone(started_at) self.assertIsNotNone(finished_at) return started_at, finished_at def _get_task_duration(self, start_time, finish_time): return (finish_time - start_time).total_seconds() def _check_started_after(self, task_ex1, task_ex2): first_finished = self._get_started_finished(task_ex2)[1] second_started = self._get_started_finished(task_ex1)[0] delta = self._get_task_duration(first_finished, second_started) self.assertTrue( delta >= 0, "Expected {} was started after {} was finished".format( task_ex1.name, task_ex2.name) ) def _check_duration_more_than(self, duration, time): self.assertTrue( time < duration, "Expected duration {} was more than {}".format(duration, time) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_tasks_function.py0000644000175000017500000003002100000000000025613 0ustar00coreycorey00000000000000# Copyright 2016 - Nokia, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK_WITH_EXPRESSIONS = """ --- version: '2.0' name: wb workflows: test_tasks_function: input: - wf1_wx_id - wf2_wx_id - wf3_wx_id - wf4_wx_id - wf5_wx_id tasks: main_task: action: std.noop publish: all_tasks_yaql: <% tasks() %> all_tasks_jinja: "{{ tasks() }}" wf1_tasks_yaql: <% tasks($.wf1_wx_id) %> wf1_tasks_jinja: "{{ tasks(_.wf1_wx_id) }}" wf1_recursive_tasks_yaql: <% tasks($.wf1_wx_id, true) %> wf1_recursive_tasks_jinja: "{{ tasks(_.wf1_wx_id, true) }}" wf1_recursive_error_tasks_yaql: <% tasks($.wf1_wx_id, true, ERROR) %> wf1_recursive_error_tasks_jinja: "{{ tasks(_.wf1_wx_id, True, 'ERROR') }}" wf1_not_recursive_error_tasks_yaql: <% tasks($.wf1_wx_id, false, ERROR) %> wf1_not_recursive_error_tasks_jinja: "{{ tasks(_.wf1_wx_id, False, 'ERROR') }}" wf1_recursive_success_flat_tasks_yaql: <% tasks($.wf1_wx_id, true, SUCCESS, true) %> wf1_recursive_success_flat_tasks_jinja: "{{ tasks(_.wf1_wx_id, True, 'SUCCESS', True) }}" wf2_recursive_tasks_yaql: <% tasks($.wf2_wx_id, true) %> wf2_recursive_tasks_jinja: "{{ tasks(_.wf2_wx_id, true) }}" wf3_recursive_error_tasks_yaql: <% tasks($.wf3_wx_id, true, ERROR) %> wf3_recursive_error_tasks_jinja: "{{ tasks(_.wf3_wx_id, True, 'ERROR') }}" wf3_recursive_error_flat_tasks_yaql: <% tasks($.wf3_wx_id, true, ERROR, true) %> wf3_recursive_error_flat_tasks_jinja: "{{ tasks(_.wf3_wx_id, True, 'ERROR', True) }}" wf4_recursive_error_flat_tasks_yaql: <% tasks($.wf4_wx_id, true, ERROR, true) %> wf4_recursive_error_flat_tasks_jinja: "{{ tasks(_.wf4_wx_id, True, 'ERROR', True) }}" wf5_recursive_error_flat_tasks_yaql: <% tasks($.wf5_wx_id, true, ERROR, true) %> wf5_recursive_error_flat_tasks_jinja: "{{ tasks(_.wf5_wx_id, True, 'ERROR', True) }}" wf1_top_lvl: tasks: top_lvl_wf1_task_1: workflow: wf1_second_lvl top_lvl_wf1_task_2: action: std.noop wf1_second_lvl: tasks: second_lvl_wf1_task_1: workflow: wf1_third_lvl_fail on-error: - second_lvl_wf1_task_2 second_lvl_wf1_task_2: action: std.noop second_lvl_wf1_task_3: action: std.noop wf1_third_lvl_fail: tasks: third_lvl_wf1_task_1: action: std.noop on-success: - third_lvl_wf1_task_2_fail third_lvl_wf1_task_2_fail: action: std.fail third_lvl_wf1_task_3: action: std.noop wf2_top_lvl: tasks: top_lvl_wf2_task_1: action: std.noop top_lvl_wf2_task_2: action: std.noop wf3_top_lvl: tasks: top_lvl_wf3_task_1_fail: workflow: wf3_second_lvl_fail top_lvl_wf3_task_2_fail: action: std.fail wf3_second_lvl_fail: tasks: second_lvl_wf3_task_1_fail: workflow: wf3_third_lvl_fail second_lvl_wf3_task_2: action: std.noop second_lvl_wf3_task_3: action: std.noop wf3_third_lvl_fail: tasks: third_lvl_wf3_task_1: action: std.noop on-success: - third_lvl_wf3_task_2 third_lvl_wf3_task_2: action: std.noop third_lvl_wf3_task_3_fail: action: std.fail wf4_top_lvl: tasks: top_lvl_wf4_task_1: workflow: wf4_second_lvl publish: raise_error: <% $.invalid_yaql_expression %> wf4_second_lvl: tasks: second_lvl_wf4_task_1: action: std.noop wf5_top_lvl: tasks: top_lvl_wf5_task_1: workflow: wf4_second_lvl input: raise_error: <% $.invalid_yaql_expression2 %> wf5_second_lvl: tasks: second_lvl_wf5_task_1: workflow: wf5_third_lvl wf5_third_lvl: tasks: third_lvl_wf5_task_1: action: std.noop """ class TasksFunctionTest(base.EngineTestCase): def _assert_published_tasks(self, task, published_key, expected_tasks_count=None, expected_tasks_names=None): published = task.published[published_key] self.assertIsNotNone( published, "there is a problem with publishing '{}'".format(published_key) ) published_names = [t['name'] for t in published] if expected_tasks_names: for e in expected_tasks_names: self.assertIn(e, published_names) if not expected_tasks_count: expected_tasks_count = len(expected_tasks_names) if expected_tasks_count: self.assertEqual(expected_tasks_count, len(published)) def test_tasks_function(self): wb_service.create_workbook_v2(WORKBOOK_WITH_EXPRESSIONS) # Start helping workflow executions. wf1_ex = self.engine.start_workflow('wb.wf1_top_lvl') wf2_ex = self.engine.start_workflow('wb.wf2_top_lvl') wf3_ex = self.engine.start_workflow('wb.wf3_top_lvl') wf4_ex = self.engine.start_workflow('wb.wf4_top_lvl') wf5_ex = self.engine.start_workflow('wb.wf5_top_lvl') self.await_workflow_success(wf1_ex.id) self.await_workflow_success(wf2_ex.id) self.await_workflow_error(wf3_ex.id) self.await_workflow_error(wf4_ex.id) self.await_workflow_error(wf5_ex.id) # Start test workflow execution wf_ex = self.engine.start_workflow( 'wb.test_tasks_function', wf_input={ 'wf1_wx_id': wf1_ex.id, 'wf2_wx_id': wf2_ex.id, 'wf3_wx_id': wf3_ex.id, 'wf4_wx_id': wf4_ex.id, 'wf5_wx_id': wf5_ex.id } ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(1, len(task_execs)) main_task = task_execs[0] self._assert_published_tasks(main_task, 'all_tasks_yaql', 22) self._assert_published_tasks(main_task, 'all_tasks_jinja', 22) self._assert_published_tasks( main_task, 'wf1_tasks_yaql', 2, ['top_lvl_wf1_task_1', 'top_lvl_wf1_task_2'] ) self._assert_published_tasks( main_task, 'wf1_tasks_jinja', 2, ['top_lvl_wf1_task_1', 'top_lvl_wf1_task_2'] ) self._assert_published_tasks( main_task, 'wf1_recursive_tasks_yaql', 8, [ 'top_lvl_wf1_task_1', 'top_lvl_wf1_task_2', 'second_lvl_wf1_task_3', 'second_lvl_wf1_task_1', 'second_lvl_wf1_task_2', 'third_lvl_wf1_task_3', 'third_lvl_wf1_task_1', 'third_lvl_wf1_task_2_fail' ] ) self._assert_published_tasks( main_task, 'wf1_recursive_tasks_jinja', 8, [ 'top_lvl_wf1_task_1', 'top_lvl_wf1_task_2', 'second_lvl_wf1_task_3', 'second_lvl_wf1_task_1', 'second_lvl_wf1_task_2', 'third_lvl_wf1_task_3', 'third_lvl_wf1_task_1', 'third_lvl_wf1_task_2_fail' ] ) self._assert_published_tasks( main_task, 'wf1_recursive_error_tasks_yaql', 2, ['second_lvl_wf1_task_1', 'third_lvl_wf1_task_2_fail'] ) self._assert_published_tasks( main_task, 'wf1_recursive_error_tasks_jinja', 2, ['second_lvl_wf1_task_1', 'third_lvl_wf1_task_2_fail'] ) self._assert_published_tasks( main_task, 'wf1_not_recursive_error_tasks_yaql', 0 ) self._assert_published_tasks( main_task, 'wf1_not_recursive_error_tasks_jinja', 0 ) self._assert_published_tasks( main_task, 'wf1_recursive_success_flat_tasks_yaql', 5, [ 'top_lvl_wf1_task_2', 'second_lvl_wf1_task_3', 'second_lvl_wf1_task_2', 'third_lvl_wf1_task_3', 'third_lvl_wf1_task_1' ] ) self._assert_published_tasks( main_task, 'wf1_recursive_success_flat_tasks_jinja', 5, [ 'top_lvl_wf1_task_2', 'second_lvl_wf1_task_3', 'second_lvl_wf1_task_2', 'third_lvl_wf1_task_3', 'third_lvl_wf1_task_1' ] ) self._assert_published_tasks( main_task, 'wf2_recursive_tasks_yaql', 2, ['top_lvl_wf2_task_2', 'top_lvl_wf2_task_1'] ) self._assert_published_tasks( main_task, 'wf2_recursive_tasks_jinja', 2, ['top_lvl_wf2_task_2', 'top_lvl_wf2_task_1'] ) self._assert_published_tasks( main_task, 'wf3_recursive_error_tasks_yaql', 4, [ 'top_lvl_wf3_task_1_fail', 'top_lvl_wf3_task_2_fail', 'second_lvl_wf3_task_1_fail', 'third_lvl_wf3_task_3_fail' ] ) self._assert_published_tasks( main_task, 'wf3_recursive_error_tasks_jinja', 4, [ 'top_lvl_wf3_task_1_fail', 'top_lvl_wf3_task_2_fail', 'second_lvl_wf3_task_1_fail', 'third_lvl_wf3_task_3_fail' ] ) self._assert_published_tasks( main_task, 'wf3_recursive_error_flat_tasks_yaql', 2, ['top_lvl_wf3_task_2_fail', 'third_lvl_wf3_task_3_fail'] ) self._assert_published_tasks( main_task, 'wf3_recursive_error_flat_tasks_jinja', 2, ['top_lvl_wf3_task_2_fail', 'third_lvl_wf3_task_3_fail'] ) self._assert_published_tasks( main_task, 'wf4_recursive_error_flat_tasks_yaql', 1, ['top_lvl_wf4_task_1'] ) self._assert_published_tasks( main_task, 'wf4_recursive_error_flat_tasks_jinja', 1, ['top_lvl_wf4_task_1'] ) self._assert_published_tasks( main_task, 'wf5_recursive_error_flat_tasks_yaql', 1, ['top_lvl_wf5_task_1'] ) self._assert_published_tasks( main_task, 'wf5_recursive_error_flat_tasks_jinja', 1, ['top_lvl_wf5_task_1'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_with_items.py0000644000175000017500000011327300000000000024750 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import mock from oslo_config import cfg from mistral.actions import std_actions from mistral import config from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit import base as test_base from mistral.tests.unit.engine import base from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import actions as actions_base from mistral_lib import utils # TODO(nmakhotkin) Need to write more tests. # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group=config.PECAN_GROUP) cfg.CONF.set_default('max_missed_heartbeats', 0, group=config.ACTION_HEARTBEAT_GROUP) WB = """ --- version: "2.0" name: wb workflows: wf: input: - names_info tasks: task1: with-items: name_info in <% $.names_info %> action: std.echo output=<% $.name_info.name %> publish: result: <% task(task1).result[0] %> """ WB_WITH_STATIC_VAR = """ --- version: "2.0" name: wb workflows: wf: input: - names_info - greeting tasks: task1: with-items: name_info in <% $.names_info %> action: std.echo output="<% $.greeting %>, <% $.name_info.name %>!" publish: result: <% task(task1).result %> """ WB_MULTI_ARRAY = """ --- version: "2.0" name: wb workflows: wf: input: - arrayI - arrayJ tasks: task1: with-items: - itemX in <% $.arrayI %> - itemY in <% $.arrayJ %> action: std.echo output="<% $.itemX %> <% $.itemY %>" publish: result: <% task(task1).result %> """ WB_ACTION_CONTEXT = """ --- version: "2.0" name: wb workflows: wf: input: - items tasks: task1: with-items: item in <% $.items %> action: std.async_noop """ WF_INPUT = { 'names_info': [ {'name': 'John'}, {'name': 'Ivan'}, {'name': 'Mistral'} ] } WF_INPUT_ONE_ITEM = { 'names_info': [ {'name': 'Guy'} ] } class RandomSleepEchoAction(actions_base.Action): def __init__(self, output): self.output = output def run(self, context): utils.random_sleep(1) return self.output def test(self): utils.random_sleep(1) class WithItemsEngineTest(base.EngineTestCase): @staticmethod def _get_incomplete_action(task_ex_id): with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex_id) return [e for e in task_ex.executions if not e.accepted][0] @staticmethod def _get_running_actions_count(task_ex_id): with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex_id) return len( [e for e in task_ex.executions if e.state == states.RUNNING] ) @staticmethod def _action_result_equals(action_ex_id, output): with db_api.transaction(): a_ex = db_api.get_action_execution(action_ex_id) return a_ex.output == output def test_with_items_simple(self): wb_service.create_workbook_v2(WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input=WF_INPUT) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='task1') with_items_ctx = task1_ex.runtime_context['with_items'] self.assertEqual(3, with_items_ctx['count']) # Since we know that we can receive results in random order, # check is not depend on order of items. with db_api.transaction(): task1_ex = db_api.get_task_execution(task1_ex.id) result = data_flow.get_task_execution_result(task1_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) published = task1_ex.published self.assertIn(published['result'], ['John', 'Ivan', 'Mistral']) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task1_ex.state) def test_with_items_fail(self): wf_text = """--- version: "2.0" wf: type: direct tasks: task1: with-items: i in [1, 2, 3] action: std.fail on-error: task2 task2: action: std.echo output="With-items failed" """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) def test_with_items_yaql_fail(self): wf_text = """--- version: "2.0" wf: type: direct tasks: task1: with-items: i in <% $.foobar %> action: std.noop """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') result = data_flow.get_task_execution_result(task1) self.assertEqual(states.ERROR, task1.state) self.assertIsInstance(result, list) self.assertListEqual(result, []) def test_with_items_sub_workflow_fail(self): wb_text = """--- version: "2.0" name: wb1 workflows: wf: type: direct tasks: task1: with-items: i in [1, 2, 3] workflow: subwf on-error: task2 task2: action: std.echo output="With-items failed" subwf: type: direct tasks: fail-task: action: std.fail """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('wb1.wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) def test_with_items_static_var(self): wb_service.create_workbook_v2(WB_WITH_STATIC_VAR) wf_input = copy.deepcopy(WF_INPUT) wf_input.update({'greeting': 'Hello'}) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input=wf_input) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') result = data_flow.get_task_execution_result(task1) self.assertIsInstance(result, list) self.assertIn('Hello, John!', result) self.assertIn('Hello, Ivan!', result) self.assertIn('Hello, Mistral!', result) self.assertEqual(1, len(tasks)) self.assertEqual(states.SUCCESS, task1.state) def test_with_items_multi_array(self): wb_service.create_workbook_v2(WB_MULTI_ARRAY) wf_input = {'arrayI': ['a', 'b', 'c'], 'arrayJ': [1, 2, 3]} # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input=wf_input) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='task1') # Since we know that we can receive results in random order, # check is not depend on order of items. result = data_flow.get_task_execution_result(task1_ex) self.assertIsInstance(result, list) self.assertIn('a 1', result) self.assertIn('b 2', result) self.assertIn('c 3', result) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task1_ex.state) def test_with_items_action_context(self): # TODO(rakhmerov): Seems like the name of the test is not valid # anymore since there's nothing related to action context in it. # We need to revisit and refactor the entire module. wb_service.create_workbook_v2(WB_ACTION_CONTEXT) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', wf_input={'items': [1, 2, 3]} ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] act_exs = task_ex.executions self.engine.on_action_complete( act_exs[0].id, actions_base.Result("Ivan") ) self.engine.on_action_complete( act_exs[1].id, actions_base.Result("John") ) self.engine.on_action_complete( act_exs[2].id, actions_base.Result("Mistral") ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertEqual(states.SUCCESS, task_ex.state) def test_with_items_empty_list(self): wb_text = """--- version: "2.0" name: wb1 workflows: with_items: type: direct input: - names_info tasks: task1: with-items: name_info in <% $.names_info %> action: std.echo output=<% $.name_info.name %> on-success: - task2 task2: action: std.echo output="Hi!" """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_input = {'names_info': []} wf_ex = self.engine.start_workflow('wb1.with_items', wf_input=wf_input) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='task1') task2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(2, len(task_execs)) self.assertEqual(states.SUCCESS, task1_ex.state) self.assertEqual(states.SUCCESS, task2_ex.state) def test_with_items_plain_list(self): wb_text = """--- version: "2.0" name: wb1 workflows: with_items: type: direct tasks: task1: with-items: i in [1, 2, 3] action: std.echo output=<% $.i %> """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('wb1.with_items') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task1_ex = self._assert_single_item( wf_ex.task_executions, name='task1', state=states.SUCCESS ) result = data_flow.get_task_execution_result(task1_ex) # Since we know that we can receive results in random order, # check is not depend on order of items. self.assertIn(1, result) self.assertIn(2, result) self.assertIn(3, result) def test_with_items_plain_list_wrong(self): wb_text = """--- version: "2.0" name: wb1 workflows: with_items: type: direct tasks: task1: with-items: i in [1,,3] action: std.echo output=<% $.i %> """ exception = self.assertRaises( exc.InvalidModelException, wb_service.create_workbook_v2, wb_text ) self.assertIn("Invalid array in 'with-items'", str(exception)) def test_with_items_results_order(self): wb_text = """--- version: "2.0" name: wb1 workflows: with_items: type: direct tasks: task1: with-items: i in [1, 2, 3] action: sleep_echo output=<% $.i %> publish: one_two_three: <% task(task1).result %> """ # Register random sleep action in the DB. test_base.register_action_class('sleep_echo', RandomSleepEchoAction) wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('wb1.with_items') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) published = task1_ex.published # Now we can check order of results explicitly. self.assertEqual([1, 2, 3], published['one_two_three']) def test_with_items_results_one_item_as_list(self): wb_service.create_workbook_v2(WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input=WF_INPUT_ONE_ITEM) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) result = data_flow.get_task_execution_result(task1_ex) self.assertIsInstance(result, list) self.assertIn('Guy', result) self.assertIn(task1_ex.published['result'], ['Guy']) def test_with_items_concurrency_1(self): wf_with_concurrency_1 = """--- version: "2.0" wf: input: - names: ["John", "Ivan", "Mistral"] tasks: task1: action: std.async_noop with-items: name in <% $.names %> concurrency: 1 """ wf_service.create_workflows(wf_with_concurrency_1) # Start workflow. wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) # Also initialize lazy collections. task_ex = wf_ex.task_executions[0] self.assertEqual(1, self._get_running_actions_count(task_ex.id)) # 1st iteration complete. action_ex_id = self._get_incomplete_action(task_ex.id).id self.engine.on_action_complete( action_ex_id, actions_base.Result("John") ) # Wait till the delayed on_action_complete is processed. self._await( lambda: self._action_result_equals(action_ex_id, {'result': 'John'}) ) self._await(lambda: self._get_running_actions_count(task_ex.id) == 1) # 2nd iteration complete. action_ex_id = self._get_incomplete_action(task_ex.id).id self.engine.on_action_complete( action_ex_id, actions_base.Result("Ivan") ) self._await( lambda: self._action_result_equals(action_ex_id, {'result': 'Ivan'}) ) self._await(lambda: self._get_running_actions_count(task_ex.id) == 1) # 3rd iteration complete. action_ex_id = self._get_incomplete_action(task_ex.id).id self.engine.on_action_complete( action_ex_id, actions_base.Result("Mistral") ) self._await( lambda: self._action_result_equals(action_ex_id, {'result': 'Mistral'}) ) task_ex = db_api.get_task_execution(task_ex.id) self.await_workflow_success(wf_ex.id) # Since we know that we can receive results in random order, # the check does not depend on order of items. with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertEqual(states.SUCCESS, task_ex.state) def test_with_items_concurrency_yaql(self): # TODO(rakhmerov): This test passes even with broken 'concurrency'. # The idea of the test is not fully clear. wf_text = """--- version: "2.0" wf: type: direct input: - names: ["John", "Ivan", "Mistral"] - concurrency tasks: task1: action: std.echo output=<% $.name %> with-items: name in <% $.names %> concurrency: <% $.concurrency %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', wf_input={'concurrency': 2}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.SUCCESS, task_ex.state) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) # Since we know that we can receive results in random order, # the check does not depend on order of items. self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) def test_with_items_concurrency_yaql_wrong_type(self): wf_with_concurrency_yaql = """--- version: "2.0" wf: type: direct input: - names: ["John", "Ivan", "Mistral"] - concurrency tasks: task1: action: std.echo output=<% $.name %> with-items: name in <% $.names %> concurrency: <% $.concurrency %> """ wf_service.create_workflows(wf_with_concurrency_yaql) # Start workflow. wf_ex = self.engine.start_workflow('wf', wf_input={'concurrency': '2'}) self.assertIn( 'Invalid data type in ConcurrencyPolicy', wf_ex.state_info ) self.assertEqual(states.ERROR, wf_ex.state) def test_with_items_concurrency_2(self): wf_with_concurrency_2 = """--- version: "2.0" wf: type: direct input: - names: ["John", "Ivan", "Mistral", "Hello"] tasks: task1: action: std.async_noop with-items: name in <% $.names %> concurrency: 2 """ wf_service.create_workflows(wf_with_concurrency_2) # Start workflow. wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(2, self._get_running_actions_count(task_ex.id)) # 1st iteration complete. action_ex_id = self._get_incomplete_action(task_ex.id).id self.engine.on_action_complete( action_ex_id, actions_base.Result("John") ) # Wait till the delayed on_action_complete is processed. self._await( lambda: self._action_result_equals(action_ex_id, {'result': 'John'}) ) self._await(lambda: self._get_running_actions_count(task_ex.id) == 2) # 2nd iteration complete. action_ex_id = self._get_incomplete_action(task_ex.id).id self.engine.on_action_complete( action_ex_id, actions_base.Result("Ivan") ) self._await( lambda: self._action_result_equals(action_ex_id, {'result': 'Ivan'}) ) self._await(lambda: self._get_running_actions_count(task_ex.id) == 2) # 3rd iteration complete. action_ex_id = self._get_incomplete_action(task_ex.id).id self.engine.on_action_complete( action_ex_id, actions_base.Result("Mistral") ) self._await( lambda: self._action_result_equals(action_ex_id, {'result': 'Mistral'}) ) incomplete_action = self._get_incomplete_action(task_ex.id) # 4th iteration complete. self.engine.on_action_complete( incomplete_action.id, actions_base.Result("Hello") ) self._await( lambda: self._action_result_equals( incomplete_action.id, {'result': 'Hello'} ) ) task_ex = db_api.get_task_execution(task_ex.id) self.await_workflow_success(wf_ex.id) # Since we know that we can receive results in random order, # check is not depend on order of items. with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertIn('Hello', result) self.assertEqual(states.SUCCESS, task_ex.state) def test_with_items_concurrency_2_fail(self): wf_with_concurrency_2_fail = """--- version: "2.0" concurrency_test_fail: type: direct tasks: task1: with-items: i in [1, 2, 3, 4] action: std.fail concurrency: 2 on-error: task2 task2: action: std.echo output="With-items failed" """ wf_service.create_workflows(wf_with_concurrency_2_fail) # Start workflow. wf_ex = self.engine.start_workflow('concurrency_test_fail') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(2, len(task_exs)) task_2 = self._assert_single_item(task_exs, name='task2') with db_api.transaction(): task_2 = db_api.get_task_execution(task_2.id) result = data_flow.get_task_execution_result(task_2) self.assertEqual('With-items failed', result) def test_with_items_concurrency_3(self): wf_with_concurrency_3 = """--- version: "2.0" concurrency_test: type: direct input: - names: ["John", "Ivan", "Mistral"] tasks: task1: action: std.async_noop with-items: name in <% $.names %> concurrency: 3 """ wf_service.create_workflows(wf_with_concurrency_3) # Start workflow. wf_ex = self.engine.start_workflow('concurrency_test') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(3, self._get_running_actions_count(task_ex.id)) # 1st iteration complete. action_ex_id = self._get_incomplete_action(task_ex.id).id self.engine.on_action_complete( action_ex_id, actions_base.Result("John") ) # Wait till the delayed on_action_complete is processed. self._await( lambda: self._action_result_equals(action_ex_id, {'result': 'John'}) ) incomplete_action = self._get_incomplete_action(task_ex.id) # 2nd iteration complete. self.engine.on_action_complete( incomplete_action.id, actions_base.Result("Ivan") ) self._await( lambda: self._action_result_equals( incomplete_action.id, {'result': 'Ivan'} ) ) incomplete_action = self._get_incomplete_action(task_ex.id) # 3rd iteration complete. self.engine.on_action_complete( incomplete_action.id, actions_base.Result("Mistral") ) self._await( lambda: self._action_result_equals( incomplete_action.id, {'result': 'Mistral'} ) ) task_ex = db_api.get_task_execution(task_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self.assertEqual(states.SUCCESS, task_ex.state) # Since we know that we can receive results in random order, # check is not depend on order of items. result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) def test_with_items_concurrency_gt_list_length(self): # TODO(rakhmerov): This test passes even with disabled 'concurrency' # support. Make sure it's valid. wf_definition = """--- version: "2.0" concurrency_test: type: direct input: - names: ["John", "Ivan"] tasks: task1: with-items: name in <% $.names %> action: std.echo output=<% $.name %> concurrency: 3 """ wf_service.create_workflows(wf_definition) # Start workflow. wf_ex = self.engine.start_workflow('concurrency_test') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) def test_with_items_retry_policy(self): wf_text = """--- version: "2.0" with_items_retry: tasks: task1: with-items: i in [1, 2] action: std.fail retry: count: 1 delay: 1 on-error: task2 task2: action: std.echo output="With-items failed" """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('with_items_retry') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) task1_ex = self._assert_single_item(task_execs, name='task1') task1_executions = task1_ex.executions self.assertEqual( 1, task1_ex.runtime_context['retry_task_policy']['retry_no'] ) self.assertEqual(4, len(task1_executions)) self._assert_multiple_items(task1_executions, 2, accepted=True) def test_with_items_concurrency_retry_policy(self): wf_text = """--- version: "2.0" wf: tasks: task1: with-items: i in [1, 2] action: std.fail retry: count: 2 delay: 1 concurrency: 2 on-error: task2 task2: action: std.echo output="With-items failed" """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) task1_ex = self._assert_single_item(task_execs, name='task1') with db_api.transaction(): task1_ex = db_api.get_task_execution(task1_ex.id) task1_execs = task1_ex.executions self.assertEqual(6, len(task1_execs)) self._assert_multiple_items(task1_execs, 2, accepted=True) def test_with_items_env(self): wf_text = """--- version: "2.0" wf: tasks: task1: with-items: i in [1, 2, 3, 4] action: std.echo output="<% $.i %>.<% env().name %>" """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'name': 'Mistral'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task1 = self._assert_single_item( wf_ex.task_executions, name='task1' ) result = data_flow.get_task_execution_result(task1) self.assertEqual( [ "1.Mistral", "2.Mistral", "3.Mistral", "4.Mistral" ], result ) self.assertEqual(states.SUCCESS, task1.state) def test_with_items_env_in_with_items_expression(self): wf_text = """--- version: "2.0" wf: tasks: task1: with-items: env_param in <% env().input_array %> action: std.echo output=<% $.env_param %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', env={'input_array': ['1', '2', '33']} ) self.await_workflow_success(wf_ex.id, timeout=10) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task1 = self._assert_single_item( wf_ex.task_executions, name='task1' ) result = data_flow.get_task_execution_result(task1) self.assertListEqual(['1', '2', '33'], result) self.assertEqual(states.SUCCESS, task1.state) def test_with_items_two_tasks_second_starts_on_success(self): wb_text = """--- version: "2.0" name: wb1 workflows: with_items: type: direct tasks: task1: with-items: i in [1, 2] action: std.echo output=<% $.i %> on-success: task2 task2: with-items: i in [3, 4] action: std.echo output=<% $.i %> """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('wb1.with_items') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) task2_ex = self._assert_single_item( task_execs, name='task2', state=states.SUCCESS ) with db_api.transaction(): task1_ex = db_api.get_task_execution(task1_ex.id) task2_ex = db_api.get_task_execution(task2_ex.id) result_task1 = data_flow.get_task_execution_result(task1_ex) result_task2 = data_flow.get_task_execution_result(task2_ex) # Since we know that we can receive results in random order, # check is not depend on order of items. self.assertIn(1, result_task1) self.assertIn(2, result_task1) self.assertIn(3, result_task2) self.assertIn(4, result_task2) def test_with_items_subflow_concurrency_gt_list_length(self): wb_text = """--- version: "2.0" name: wb1 workflows: main: type: direct input: - names tasks: task1: with-items: name in <% $.names %> workflow: subflow1 name=<% $.name %> concurrency: 3 subflow1: type: direct input: - name output: result: <% task(task1).result %> tasks: task1: action: std.echo output=<% $.name %> """ wb_service.create_workbook_v2(wb_text) # Start workflow. names = ["Peter", "Susan", "Edmund", "Lucy", "Aslan", "Caspian"] wf_ex = self.engine.start_workflow( 'wb1.main', wf_input={'names': names} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) task_result = data_flow.get_task_execution_result(task_ex) result = [item['result'] for item in task_result] self.assertListEqual(sorted(result), sorted(names)) @mock.patch.object(std_actions.HTTPAction, 'run') def test_with_items_and_adhoc_action(self, mock_http_action): mock_http_action.return_value = '' wb_text = """--- version: "2.0" name: test actions: http: input: - url: http://www.example.com - method: GET - timeout: 10 output: <% $.content %> base: std.http base-input: url: <% $.url %> method: <% $.method %> timeout: <% $.timeout %> workflows: with_items_default_bug: description: Re-create the with-items bug with default values type: direct tasks: get_pages: with-items: page in <% range(0, 1) %> action: test.http input: url: http://www.example.com method: GET on-success: - well_done well_done: action: std.echo output="Well done" """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('test.with_items_default_bug') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='get_pages') task2_ex = self._assert_single_item(task_execs, name='well_done') self.assertEqual(2, len(task_execs)) self.assertEqual(states.SUCCESS, task1_ex.state) self.assertEqual(states.SUCCESS, task2_ex.state) self.assertEqual(1, mock_http_action.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_with_items_task.py0000644000175000017500000000417400000000000025771 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2.sqlalchemy import models from mistral.engine import tasks from mistral.tests.unit import base from mistral.workflow import states # TODO(rakhmerov): This test is a legacy of the previous 'with-items' # implementation when most of its logic was in with_items.py module. # It makes sense to add more test for various methods of WithItemsTask. class WithItemsTaskTest(base.BaseTest): @staticmethod def get_action_ex(accepted, state, index): return models.ActionExecution( accepted=accepted, state=state, runtime_context={'index': index} ) def test_get_next_indices(self): # Task execution for running 6 items with concurrency=3. task_ex = models.TaskExecution( spec={ 'action': 'myaction' }, runtime_context={ 'with_items': { 'capacity': 3, 'count': 6 } }, action_executions=[], workflow_executions=[] ) task = tasks.WithItemsTask(None, None, None, {}, task_ex) # Set 3 items: 2 success and 1 error unaccepted. task_ex.action_executions += [ self.get_action_ex(True, states.SUCCESS, 0), self.get_action_ex(True, states.SUCCESS, 1), self.get_action_ex(False, states.ERROR, 2) ] # Then call get_indices and expect [2, 3, 4]. indexes = task._get_next_indexes() self.assertListEqual([2, 3, 4], indexes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_workflow_cancel.py0000644000175000017500000004463100000000000025754 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states class WorkflowCancelTest(base.EngineTestCase): def test_cancel_workflow(self): workflow = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 3 """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') self.await_task_success(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled by user.", wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state) def test_cancel_workflow_if_definition_deleted(self): workflow = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="foo" wait-before: 5 """ wf = wf_service.create_workflows(workflow)[0] wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): db_api.delete_workflow_definition(wf.id) self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(wf_ex.id) def test_cancel_paused_workflow(self): workflow = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 3 """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.engine.pause_workflow(wf_ex.id) self.await_workflow_paused(wf_ex.id) self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') self.await_task_success(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( task_execs, name='task1' ) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled by user.", wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state) def test_cancel_completed_workflow(self): workflow = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state) def test_cancel_parent_workflow(self): workbook = """ version: '2.0' name: wb workflows: wf: type: direct tasks: taskx: workflow: subwf subwf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 2 """ wb_service.create_workbook_v2(workbook) wf_ex = self.engine.start_workflow('wb.wf') self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='taskx') self.await_task_cancelled(task_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='taskx') subwf_execs = db_api.get_workflow_executions( task_execution_id=task_ex.id ) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled by user.", wf_ex.state_info) self.assertEqual(states.CANCELLED, task_ex.state) self.assertEqual("Cancelled by user.", task_ex.state_info) self.assertEqual(1, len(subwf_execs)) self.assertEqual(states.CANCELLED, subwf_execs[0].state) self.assertEqual("Cancelled by user.", subwf_execs[0].state_info) def test_cancel_child_workflow(self): workbook = """ version: '2.0' name: wb workflows: wf: type: direct tasks: taskx: workflow: subwf subwf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 3 """ wb_service.create_workbook_v2(workbook) self.engine.start_workflow('wb.wf') with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_ex = self._assert_single_item(wf_execs, name='wb.subwf') self.engine.stop_workflow( subwf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(subwf_ex.id) self.await_task_cancelled(task_ex.id) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_ex = self._assert_single_item(wf_execs, name='wb.subwf') self.assertEqual(states.CANCELLED, subwf_ex.state) self.assertEqual("Cancelled by user.", subwf_ex.state_info) self.assertEqual(states.CANCELLED, task_ex.state) self.assertIn("Cancelled by user.", task_ex.state_info) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: taskx", wf_ex.state_info) def test_cancel_with_items_parent_workflow(self): workbook = """ version: '2.0' name: wb workflows: wf: type: direct tasks: taskx: with-items: i in [1, 2] workflow: subwf subwf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 1 """ wb_service.create_workbook_v2(workbook) wf_ex = self.engine.start_workflow('wb.wf') self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='taskx') self.await_workflow_cancelled(wf_ex.id) self.await_task_cancelled(task_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_exs = self._assert_multiple_items( wf_execs, 2, name='wb.subwf' ) self.assertEqual(states.CANCELLED, subwf_exs[0].state) self.assertEqual("Cancelled by user.", subwf_exs[0].state_info) self.assertEqual(states.CANCELLED, subwf_exs[1].state) self.assertEqual("Cancelled by user.", subwf_exs[1].state_info) self.assertEqual(states.CANCELLED, task_ex.state) self.assertIn("cancelled", task_ex.state_info) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled by user.", wf_ex.state_info) def test_cancel_with_items_child_workflow(self): workbook = """ version: '2.0' name: wb workflows: wf: type: direct tasks: taskx: with-items: i in [1, 2] workflow: subwf subwf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 1 """ wb_service.create_workbook_v2(workbook) self.engine.start_workflow('wb.wf') with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_exs = self._assert_multiple_items( wf_execs, 2, name='wb.subwf' ) self.engine.stop_workflow( subwf_exs[0].id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(subwf_exs[0].id) self.await_workflow_success(subwf_exs[1].id) self.await_task_cancelled(task_ex.id) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_exs = self._assert_multiple_items( wf_execs, 2, name='wb.subwf' ) self.assertEqual(states.CANCELLED, subwf_exs[0].state) self.assertEqual("Cancelled by user.", subwf_exs[0].state_info) self.assertEqual(states.SUCCESS, subwf_exs[1].state) self.assertIsNone(subwf_exs[1].state_info) self.assertEqual(states.CANCELLED, task_ex.state) self.assertIn("cancelled", task_ex.state_info) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: taskx", wf_ex.state_info) def test_cancel_then_fail_with_items_child_workflow(self): workbook = """ version: '2.0' name: wb workflows: wf: type: direct tasks: taskx: with-items: i in [1, 2] workflow: subwf subwf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 1 """ wb_service.create_workbook_v2(workbook) self.engine.start_workflow('wb.wf') with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_exs = self._assert_multiple_items( wf_execs, 2, name='wb.subwf' ) self.engine.stop_workflow( subwf_exs[0].id, states.CANCELLED, "Cancelled by user." ) self.engine.stop_workflow( subwf_exs[1].id, states.ERROR, "Failed by user." ) self.await_workflow_cancelled(subwf_exs[0].id) self.await_workflow_error(subwf_exs[1].id) self.await_task_cancelled(task_ex.id) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_exs = self._assert_multiple_items( wf_execs, 2, name='wb.subwf' ) self.assertEqual(states.CANCELLED, subwf_exs[0].state) self.assertEqual("Cancelled by user.", subwf_exs[0].state_info) self.assertEqual(states.ERROR, subwf_exs[1].state) self.assertEqual("Failed by user.", subwf_exs[1].state_info) self.assertEqual(states.CANCELLED, task_ex.state) self.assertIn("cancelled", task_ex.state_info) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: taskx", wf_ex.state_info) def test_fail_then_cancel_with_items_child_workflow(self): workbook = """ version: '2.0' name: wb workflows: wf: type: direct tasks: taskx: with-items: i in [1, 2] workflow: subwf subwf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 1 """ wb_service.create_workbook_v2(workbook) self.engine.start_workflow('wb.wf') with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_exs = self._assert_multiple_items( wf_execs, 2, name='wb.subwf' ) self.engine.stop_workflow( subwf_exs[1].id, states.ERROR, "Failed by user." ) self.engine.stop_workflow( subwf_exs[0].id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(subwf_exs[0].id) self.await_workflow_error(subwf_exs[1].id) self.await_task_cancelled(task_ex.id) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wb.wf') task_ex = self._assert_single_item( wf_ex.task_executions, name='taskx' ) subwf_exs = self._assert_multiple_items( wf_execs, 2, name='wb.subwf' ) self.assertEqual(states.CANCELLED, subwf_exs[0].state) self.assertEqual("Cancelled by user.", subwf_exs[0].state_info) self.assertEqual(states.ERROR, subwf_exs[1].state) self.assertEqual("Failed by user.", subwf_exs[1].state_info) self.assertEqual(states.CANCELLED, task_ex.state) self.assertIn("cancelled", task_ex.state_info) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: taskx", wf_ex.state_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_workflow_resume.py0000644000175000017500000003033700000000000026025 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.tests.unit.engine import base from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import actions as ml_actions # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') RESUME_WORKBOOK = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" on-complete: - task2 - pause task2: action: std.echo output="Task 2" """ RESUME_WORKBOOK_DIFF_ENV_VAR = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" on-complete: - task2 task2: action: std.echo output=<% env().var1 %> pause-before: true on-complete: - task3 task3: action: std.echo output=<% env().var2 %> """ RESUME_WORKBOOK_REVERSE = """ --- version: '2.0' name: resume_reverse workflows: wf: type: reverse tasks: task1: action: std.echo output="Hi!" wait-after: 1 task2: action: std.echo output="Task 2" requires: [task1] """ WORKBOOK_TWO_BRANCHES = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" on-complete: - task2 - task3 - pause task2: action: std.echo output="Task 2" task3: action: std.echo output="Task 3" """ WORKBOOK_TWO_START_TASKS = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Task 1" on-complete: - task3 - pause task2: action: std.echo output="Task 2" on-complete: - pause task3: action: std.echo output="Task 3" """ WORKBOOK_DIFFERENT_TASK_STATES = """ --- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.echo output="Hi!" on-complete: - task3 - pause task2: action: std.async_noop # This one won't be finished when execution is already PAUSED. on-complete: - task4 task3: action: std.echo output="Task 3" task4: action: std.echo output="Task 4" """ class WorkflowResumeTest(base.EngineTestCase): def setUp(self): super(WorkflowResumeTest, self).setUp() self.wb_spec = spec_parser.get_workbook_spec_from_yaml(RESUME_WORKBOOK) self.wf_spec = self.wb_spec.get_workflows()['wf1'] def test_resume_direct(self): wb_service.create_workbook_v2(RESUME_WORKBOOK) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(2, len(task_execs)) self.engine.resume_workflow(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(task_execs)) def test_resume_reverse(self): wb_service.create_workbook_v2(RESUME_WORKBOOK_REVERSE) # Start workflow. wf_ex = self.engine.start_workflow( 'resume_reverse.wf', task_name='task2' ) self.engine.pause_workflow(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_execs)) self.engine.resume_workflow(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(task_execs)) def test_resume_two_branches(self): wb_service.create_workbook_v2(WORKBOOK_TWO_BRANCHES) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(3, len(task_execs)) wf_ex = self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) # We can see 3 tasks in execution. self.assertEqual(3, len(task_execs)) def test_resume_two_start_tasks(self): wb_service.create_workbook_v2(WORKBOOK_TWO_START_TASKS) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) # The exact number of tasks depends on which of two tasks # 'task1' and 'task2' completed earlier. self.assertGreaterEqual(len(task_execs), 2) task1_ex = self._assert_single_item(task_execs, name='task1') task2_ex = self._assert_single_item(task_execs, name='task2') self.await_task_success(task1_ex.id) self.await_task_success(task2_ex.id) self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(3, len(task_execs)) def test_resume_different_task_states(self): wb_service.create_workbook_v2(WORKBOOK_DIFFERENT_TASK_STATES) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(3, len(task_execs)) task2_ex = self._assert_single_item(task_execs, name='task2') # Task2 is not finished yet. self.assertFalse(states.is_completed(task2_ex.state)) wf_ex = self.engine.resume_workflow(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) # Wait for task3 to be processed. task3_ex = self._assert_single_item(task_execs, name='task3') self.await_task_success(task3_ex.id) self.await_task_processed(task3_ex.id) # Finish task2. task2_action_ex = db_api.get_action_executions( task_execution_id=task2_ex.id )[0] self.engine.on_action_complete(task2_action_ex.id, ml_actions.Result()) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state, wf_ex.state_info) self.assertEqual(4, len(task_execs)) def test_resume_fails(self): # Start and pause workflow. wb_service.create_workbook_v2(WORKBOOK_DIFFERENT_TASK_STATES) wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_paused(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.PAUSED, wf_ex.state) # Simulate failure and check if it is handled. err = exc.MistralError('foo') with mock.patch.object( db_api, 'get_workflow_execution', side_effect=err): self.assertRaises( exc.MistralError, self.engine.resume_workflow, wf_ex.id ) def test_resume_diff_env_vars(self): wb_service.create_workbook_v2(RESUME_WORKBOOK_DIFF_ENV_VAR) # Initial environment variables for the workflow execution. env = { 'var1': 'fee fi fo fum', 'var2': 'foobar' } # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', env=env) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') task_2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(2, len(task_execs)) self.assertDictEqual(env, wf_ex.params['env']) self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.IDLE, task_2_ex.state) # Update env in workflow execution with the following. updated_env = { 'var1': 'Task 2', 'var2': 'Task 3' } # Update the env variables and resume workflow. self.engine.resume_workflow(wf_ex.id, env=updated_env) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertDictEqual(updated_env, wf_ex.params['env']) self.assertEqual(3, len(task_execs)) # Check result of task2. task_2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, task_2_ex.state) # Re-read task execution, otherwise lazy loading of action executions # may not work. with db_api.transaction(): task_2_ex = db_api.get_task_execution(task_2_ex.id) task_2_result = data_flow.get_task_execution_result(task_2_ex) self.assertEqual(updated_env['var1'], task_2_result) # Check result of task3. task_3_ex = self._assert_single_item( task_execs, name='task3' ) self.assertEqual(states.SUCCESS, task_3_ex.state) # Re-read task execution, otherwise lazy loading of action executions # may not work. with db_api.transaction(): task_3_ex = db_api.get_task_execution(task_3_ex.id) task_3_result = data_flow.get_task_execution_result(task_3_ex) self.assertEqual(updated_env['var2'], task_3_result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_workflow_stop.py0000644000175000017500000000367400000000000025516 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states class WorkflowStopTest(base.EngineTestCase): def setUp(self): super(WorkflowStopTest, self).setUp() WORKFLOW = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 3 """ wf_service.create_workflows(WORKFLOW) self.exec_id = self.engine.start_workflow('wf').id def test_stop_failed(self): self.engine.stop_workflow(self.exec_id, states.SUCCESS, "Force stop") self.await_workflow_success(self.exec_id) wf_ex = db_api.get_workflow_execution(self.exec_id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual("Force stop", wf_ex.state_info) def test_stop_succeeded(self): self.engine.stop_workflow(self.exec_id, states.ERROR, "Failure") self.await_workflow_error(self.exec_id) wf_ex = db_api.get_workflow_execution(self.exec_id) self.assertEqual(states.ERROR, wf_ex.state) self.assertEqual("Failure", wf_ex.state_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_workflow_variables.py0000644000175000017500000001034600000000000026473 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class WorkflowVariablesTest(base.EngineTestCase): def test_workflow_variables(self): wf_text = """--- version: '2.0' wf: input: - param1: "Hello" - param2 vars: literal_var: "Literal value" yaql_var: "<% $.param1 %> <% $.param2 %>" output: literal_var: <% $.literal_var %> yaql_var: <% $.yaql_var %> tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', wf_input={'param2': 'Renat'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.SUCCESS, task1.state) self.assertDictEqual( { 'literal_var': 'Literal value', 'yaql_var': 'Hello Renat' }, wf_output ) def test_dynamic_action_names(self): wf_text = """--- version: '2.0' wf2: input: - wf_action - param1 tasks: task1: action: <% $.wf_action %> output=<% $.param1 %> publish: var1: <% task(task1).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow( 'wf2', wf_input={'wf_action': 'std.echo', 'param1': 'Hello'} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual("Hello", wf_output['var1']) def test_dynamic_action_names_and_input(self): wf_text = """--- version: '2.0' wf3: input: - wf_action - wf_input tasks: task1: action: <% $.wf_action %> input: <% $.wf_input %> publish: var1: <% task(task1).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow( 'wf3', wf_input={'wf_action': 'std.echo', 'wf_input': {'output': 'Hello'}} ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual("Hello", wf_output['var1']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/engine/test_yaql_functions.py0000644000175000017500000003635700000000000025641 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.expressions import std_functions from mistral.services import workflows as wf_service from mistral.tests.unit.engine import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') class YAQLFunctionsEngineTest(base.EngineTestCase): def test_task_function(self): wf_text = """--- version: '2.0' wf: tasks: task1: description: This is task 1 tags: ['t1'] action: std.echo output=1 publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> on-success: - task2 task2: action: std.echo output=<% task(task1).result + 1 %> publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> task2_res: <% task(task2).result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item( tasks, name='task1', state=states.SUCCESS ) task2 = self._assert_single_item( tasks, name='task2', state=states.SUCCESS ) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1 }, task1.published ) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1, 'task2_res': 2 }, task2.published ) def test_task_function_caching(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output=1 publish: var1: <% task(task1) %> var2: <% task(task1) %> var3: <% task(task1) %> """ wf_service.create_workflows(wf_text) # The idea of what happens next is to make sure that the method # std_function._convert_to_user_model is called once. It will # prove that the function '_task' is also called once. We can't # check directly if '_task' is called because it's wrapped with # a decorator and it breaks all capabilities that 'mock' provides. # In fact, this is an example of 'white box' testing (when we check # what happens within a certain function/component) and we always # try to avoid it but we have to do it here because of technical # limitations. with mock.patch.object( std_functions, '_convert_to_user_model', wraps=std_functions._convert_to_user_model ) as mocked_mtd: wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1_ex = self._assert_single_item( tasks, name='task1', state=states.SUCCESS ) published = task1_ex.published self.assertIsNotNone(published) self.assertDictEqual(published['var1'], published['var2']) self.assertDictEqual(published['var1'], published['var3']) mocked_mtd.assert_called_once() def test_task_function_returns_null(self): wf_text = """--- version: '2.0' wf: output: task2: <% task(task2) %> task2bool: <% task(task2) = null %> tasks: task1: action: std.noop on-success: - task2: <% false %> task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'task2': None, 'task2bool': True }, wf_ex.output ) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) def test_task_function_non_existing(self): wf_text = """--- version: '2.0' wf: type: direct output: task_name: <% task(non_existing_task).name %> tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('non_existing_task', wf_ex.state_info) def test_task_function_no_arguments(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output=1 publish: task1_id: <% task().id %> task1_result: <% task().result %> task1_state: <% task().state %> on-success: task2 task2: action: std.echo output=2 publish: task2_id: <% task().id %> task2_result: <% task().result %> task2_state: <% task().state %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) task2_ex = self._assert_single_item( wf_ex.task_executions, name='task2' ) self.assertDictEqual( { 'task1_id': task1_ex.id, 'task1_result': 1, 'task1_state': states.SUCCESS }, task1_ex.published ) self.assertDictEqual( { 'task2_id': task2_ex.id, 'task2_result': 2, 'task2_state': states.SUCCESS }, task2_ex.published ) # The internal data needed for evaluation of the task() function # should not be persisted to DB. self.assertNotIn('__task_execution', task1_ex.in_context) self.assertNotIn('__task_execution', task2_ex.in_context) def test_task_function_no_name_on_complete_case(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output=1 on-complete: - fail(msg=<% task() %>) """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertIn(wf_ex.id, wf_ex.state_info) def test_task_function_no_name_on_success_case(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output=1 on-success: - task2: <% task().result = 1 %> - task3: <% task().result = 100 %> task2: action: std.echo output=2 task3: action: std.echo output=3 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) self._assert_single_item(wf_ex.task_executions, name='task1') self._assert_single_item(wf_ex.task_executions, name='task2') def test_task_function_no_name_when_calculating_end_tasks(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.fail error_data="Some data" on-error: - task2: <% task().result != '' %> task2: action: std.echo output=2 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) self._assert_single_item(wf_ex.task_executions, name='task1') self._assert_single_item(wf_ex.task_executions, name='task2') def test_uuid_function(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output=<% uuid() %> publish: result: <% task(task1).result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = task_execs[0] result = task_ex.published['result'] self.assertIsNotNone(result) self.assertEqual(36, len(result)) self.assertEqual(4, result.count('-')) def test_execution_function(self): wf_text = """--- version: '2.0' wf: input: - k1 - k2: v2_default tasks: task1: action: std.echo output=<% execution() %> publish: result: <% task(task1).result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow( 'wf', wf_input={'k1': 'v1'}, param1='blablabla' ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = task_execs[0] execution = task_ex.published['result'] self.assertIsInstance(execution, dict) spec = execution['spec'] self.assertEqual('2.0', spec['version']) self.assertEqual('wf', spec['name']) self.assertIn('tasks', spec) self.assertEqual(1, len(spec['tasks'])) self.assertDictEqual( { 'k1': 'v1', 'k2': 'v2_default' }, execution['input'] ) self.assertDictEqual( { 'param1': 'blablabla', 'namespace': '', 'env': {} }, execution['params'] ) self.assertEqual( wf_ex.created_at.isoformat(' '), execution['created_at'] ) def test_yaml_dump_function(self): wf_text = """--- version: '2.0' wf: tasks: task1: publish: data: <% {key1 => foo, key2 => bar} %> on-success: task2 task2: publish: yaml_str: <% yaml_dump($.data) %> json_str: <% json_dump($.data) %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(read_only=True): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item( wf_ex.task_executions, name='task2' ) yaml_str = task_ex.published['yaml_str'] json_str = task_ex.published['json_str'] self.assertIsNotNone(yaml_str) self.assertIn('key1: foo', yaml_str) self.assertIn('key2: bar', yaml_str) self.assertIsNotNone(json_str) self.assertIn('"key1": "foo"', json_str) self.assertIn('"key2": "bar"', json_str) def test_yaml_dump(self): data = [ { "this": "is valid", }, { "so": "is this", "and": "this too", "might": "as well", }, "validaswell" ] expected = ( "- this: is valid\n" "- and: this too\n" " might: as well\n" " so: is this\n" "- validaswell\n" ) yaml_str = std_functions.yaml_dump_(None, data) self.assertEqual(expected, yaml_str) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.149568 mistral-10.0.0.0b3/mistral/tests/unit/executors/0000755000175000017500000000000000000000000021730 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/executors/__init__.py0000644000175000017500000000000000000000000024027 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/executors/base.py0000644000175000017500000000150200000000000023212 0ustar00coreycorey00000000000000# Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from mistral.tests.unit.engine import base as engine_test_base LOG = logging.getLogger(__name__) class ExecutorTestCase(engine_test_base.EngineTestCase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/executors/test_local_executor.py0000644000175000017500000001237700000000000026363 0ustar00coreycorey00000000000000# Copyright 2017 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_log import log as logging from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.executors import base as exe from mistral.executors import remote_executor as r_exe from mistral.services import workbooks as wb_svc from mistral.tests.unit.executors import base from mistral.workflow import states LOG = logging.getLogger(__name__) # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') @mock.patch.object( r_exe.RemoteExecutor, 'run_action', mock.MagicMock(return_value=None) ) class LocalExecutorTest(base.ExecutorTestCase): @classmethod def setUpClass(cls): super(LocalExecutorTest, cls).setUpClass() cfg.CONF.set_default('type', 'local', group='executor') @classmethod def tearDownClass(cls): exe.cleanup() cfg.CONF.set_default('type', 'remote', group='executor') super(LocalExecutorTest, cls).tearDownClass() @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1', # Mock task1 success. 'Task 2', # Mock task2 success. 'Task 3' # Mock task3 success. ] ) ) def test_run(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.echo output="Task 1" on-success: - t2 t2: action: std.echo output="Task 2" on-success: - t3 t3: action: std.echo output="Task 3" """ wb_svc.create_workbook_v2(wb_def) wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') task_3_ex = self._assert_single_item(task_execs, name='t3') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertEqual(states.SUCCESS, task_3_ex.state) # Make sure the remote executor is not called. self.assertFalse(r_exe.RemoteExecutor.run_action.called) @mock.patch.object( std_actions.EchoAction, 'run', mock.MagicMock( side_effect=[ 'Task 1.0', # Mock task1 success. 'Task 1.1', # Mock task1 success. 'Task 1.2', # Mock task1 success. 'Task 2' # Mock task2 success. ] ) ) def test_run_with_items(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.echo output="Task 1.<% $.i %>" publish: v1: <% task(t1).result %> on-success: - t2 t2: action: std.echo output="Task 2" """ wb_svc.create_workbook_v2(wb_def) wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.SUCCESS, task_2_ex.state) with db_api.transaction(): task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(3, len(task_1_action_exs)) # Make sure the remote executor is not called. self.assertFalse(r_exe.RemoteExecutor.run_action.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/executors/test_server_plugins.py0000644000175000017500000000275000000000000026414 0ustar00coreycorey00000000000000# Copyright 2017 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from stevedore import exception as sd_exc from mistral.executors import base as exe from mistral.executors import default_executor as d from mistral.executors import remote_executor as r from mistral.tests.unit.executors import base LOG = logging.getLogger(__name__) class PluginTest(base.ExecutorTestCase): def tearDown(self): exe.cleanup() super(PluginTest, self).tearDown() def test_get_local_executor(self): executor = exe.get_executor('local') self.assertIsInstance(executor, d.DefaultExecutor) def test_get_remote_executor(self): executor = exe.get_executor('remote') self.assertIsInstance(executor, r.RemoteExecutor) def test_get_bad_executor(self): self.assertRaises(sd_exc.NoMatches, exe.get_executor, 'foobar') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.149568 mistral-10.0.0.0b3/mistral/tests/unit/expressions/0000755000175000017500000000000000000000000022271 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/expressions/__init__.py0000644000175000017500000000000000000000000024370 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/expressions/test_jinja_expression.py0000644000175000017500000004731000000000000027261 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from mistral.db.v2.sqlalchemy import api as db_api from mistral import exceptions as exc from mistral.expressions import jinja_expression as expr from mistral.tests.unit import base from mistral_lib import utils DATA = { "server": { "id": "03ea824a-aa24-4105-9131-66c48ae54acf", "name": "cloud-fedora", "status": "ACTIVE" }, "status": "OK" } SERVERS = { "servers": [ {'name': 'centos'}, {'name': 'ubuntu'}, {'name': 'fedora'} ] } WF_EXECS = [ { 'spec': {}, 'id': "one", 'start_params': {'task': 'my_task1'}, 'state': 'IDLE', 'state_info': "Running...", 'created_at': datetime.datetime(2016, 12, 1, 15, 0, 0), 'updated_at': None, 'context': None, 'task_id': None, 'trust_id': None, 'description': None, 'output': None }, { 'spec': {}, 'id': "two", 'root_execution_id': "one", 'start_params': {'task': 'my_task1'}, 'state': 'RUNNING', 'state_info': "Running...", 'created_at': datetime.datetime(2016, 12, 1, 15, 1, 0), 'updated_at': None, 'context': {'image_id': '123123'}, 'task_id': None, 'trust_id': None, 'description': None, 'output': None } ] class JinjaEvaluatorTest(base.DbTestCase): def setUp(self): super(JinjaEvaluatorTest, self).setUp() self._evaluator = expr.JinjaEvaluator() def test_expression_result(self): res = self._evaluator.evaluate('_.server', DATA) self.assertEqual({ 'id': '03ea824a-aa24-4105-9131-66c48ae54acf', 'name': 'cloud-fedora', 'status': 'ACTIVE' }, res) res = self._evaluator.evaluate('_.server.id', DATA) self.assertEqual('03ea824a-aa24-4105-9131-66c48ae54acf', res) res = self._evaluator.evaluate("_.server.status == 'ACTIVE'", DATA) self.assertTrue(res) def test_select_result(self): res = self._evaluator.evaluate( '_.servers|selectattr("name", "equalto", "ubuntu")', SERVERS ) item = list(res)[0] self.assertEqual({'name': 'ubuntu'}, item) def test_function_string(self): self.assertEqual('3', self._evaluator.evaluate('_|string', '3')) self.assertEqual('3', self._evaluator.evaluate('_|string', 3)) def test_function_len(self): self.assertEqual( 3, self._evaluator.evaluate('_|length', 'hey') ) data = [{'some': 'thing'}] self.assertEqual( 1, self._evaluator.evaluate( '_|selectattr("some", "equalto", "thing")|list|length', data ) ) def test_validate(self): self._evaluator.validate('abc') self._evaluator.validate('1') self._evaluator.validate('1 + 2') self._evaluator.validate('_.a1') self._evaluator.validate('_.a1 * _.a2') def test_validate_failed(self): self.assertRaises(exc.JinjaGrammarException, self._evaluator.validate, '*') self.assertRaises(exc.JinjaEvaluationException, self._evaluator.validate, [1, 2, 3]) self.assertRaises(exc.JinjaEvaluationException, self._evaluator.validate, {'a': 1}) def test_function_json_pp(self): self.assertEqual('"3"', self._evaluator.evaluate('json_pp(_)', '3')) self.assertEqual('3', self._evaluator.evaluate('json_pp(_)', 3)) self.assertEqual( '[\n 1,\n 2\n]', self._evaluator.evaluate('json_pp(_)', [1, 2]) ) self.assertEqual( '{\n "a": "b"\n}', self._evaluator.evaluate('json_pp(_)', {'a': 'b'}) ) self.assertEqual( '"Mistral\nis\nawesome"', self._evaluator.evaluate( 'json_pp(_)', '\n'.join(['Mistral', 'is', 'awesome']) ) ) def test_filter_json_pp(self): self.assertEqual('"3"', self._evaluator.evaluate('_|json_pp', '3')) self.assertEqual('3', self._evaluator.evaluate('_|json_pp', 3)) self.assertEqual( '[\n 1,\n 2\n]', self._evaluator.evaluate('_|json_pp', [1, 2]) ) self.assertEqual( '{\n "a": "b"\n}', self._evaluator.evaluate('_|json_pp', {'a': 'b'}) ) self.assertEqual( '"Mistral\nis\nawesome"', self._evaluator.evaluate( '_|json_pp', '\n'.join(['Mistral', 'is', 'awesome']) ) ) def test_function_uuid(self): uuid = self._evaluator.evaluate('uuid()', {}) self.assertTrue(utils.is_valid_uuid(uuid)) def test_filter_uuid(self): uuid = self._evaluator.evaluate('_|uuid', '3') self.assertTrue(utils.is_valid_uuid(uuid)) def test_function_env(self): ctx = {'__env': 'some'} self.assertEqual(ctx['__env'], self._evaluator.evaluate('env()', ctx)) def test_filter_env(self): ctx = {'__env': 'some'} self.assertEqual(ctx['__env'], self._evaluator.evaluate('_|env', ctx)) @mock.patch('mistral.db.v2.api.get_task_executions') @mock.patch('mistral.workflow.data_flow.get_task_execution_result') def test_filter_task_without_task_execution(self, task_execution_result, task_executions): task = mock.MagicMock(return_value={}) task_executions.return_value = [task] ctx = { '__task_execution': None, '__execution': { 'id': 'some' } } result = self._evaluator.evaluate('_|task("some")', ctx) self.assertEqual({ 'id': task.id, 'name': task.name, 'published': task.published, 'result': task_execution_result(), 'spec': task.spec, 'state': task.state, 'state_info': task.state_info, 'type': task.type, 'workflow_execution_id': task.workflow_execution_id, 'created_at': task.created_at.isoformat(' '), 'updated_at': task.updated_at.isoformat(' '), }, result) @mock.patch('mistral.db.v2.api.get_task_executions') @mock.patch('mistral.workflow.data_flow.get_task_execution_result') def test_filter_tasks_without_task_execution(self, task_execution_result, task_executions): task = mock.MagicMock(return_value={}) task_executions.return_value = [task] ctx = { '__task_execution': None, '__execution': { 'id': 'some' } } result = self._evaluator.evaluate('_|tasks()', ctx) self.assertEqual([{ 'id': task.id, 'name': task.name, 'published': task.published, 'result': task_execution_result(), 'spec': task.spec, 'state': task.state, 'state_info': task.state_info, 'type': task.type, 'workflow_execution_id': task.workflow_execution_id, 'created_at': task.created_at.isoformat(' '), 'updated_at': task.updated_at.isoformat(' ') }], result) @mock.patch('mistral.db.v2.api.get_task_execution') @mock.patch('mistral.workflow.data_flow.get_task_execution_result') def test_filter_task_with_taskexecution(self, task_execution_result, task_execution): ctx = { '__task_execution': { 'id': 'some', 'name': 'some' } } result = self._evaluator.evaluate('_|task("some")', ctx) self.assertEqual({ 'id': task_execution().id, 'name': task_execution().name, 'published': task_execution().published, 'result': task_execution_result(), 'spec': task_execution().spec, 'state': task_execution().state, 'state_info': task_execution().state_info, 'type': task_execution().type, 'workflow_execution_id': task_execution().workflow_execution_id, 'created_at': task_execution().created_at.isoformat(' '), 'updated_at': task_execution().updated_at.isoformat(' ') }, result) @mock.patch('mistral.db.v2.api.get_task_execution') @mock.patch('mistral.workflow.data_flow.get_task_execution_result') def test_function_task(self, task_execution_result, task_execution): ctx = { '__task_execution': { 'id': 'some', 'name': 'some' } } result = self._evaluator.evaluate('task("some")', ctx) self.assertEqual({ 'id': task_execution().id, 'name': task_execution().name, 'published': task_execution().published, 'result': task_execution_result(), 'spec': task_execution().spec, 'state': task_execution().state, 'state_info': task_execution().state_info, 'type': task_execution().type, 'workflow_execution_id': task_execution().workflow_execution_id, 'created_at': task_execution().created_at.isoformat(' '), 'updated_at': task_execution().updated_at.isoformat(' ') }, result) @mock.patch('mistral.db.v2.api.get_workflow_execution') def test_filter_execution(self, workflow_execution): wf_ex = mock.MagicMock(return_value={}) workflow_execution.return_value = wf_ex ctx = { '__execution': { 'id': 'some' } } result = self._evaluator.evaluate('_|execution', ctx) self.assertEqual({ 'id': wf_ex.id, 'name': wf_ex.name, 'spec': wf_ex.spec, 'input': wf_ex.input, 'params': wf_ex.params, 'created_at': wf_ex.created_at.isoformat(' '), 'updated_at': wf_ex.updated_at.isoformat(' '), 'root_execution_id': wf_ex.root_execution_id }, result) def test_executions(self): with db_api.transaction(read_only=True): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) ctx = { '__execution': { 'id': 'some' } } result = self._evaluator.evaluate('_|executions()', ctx) self.assertEqual([created0, created1], result) def test_executions_id_filter(self): with db_api.transaction(read_only=True): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) ctx = { '__execution': { 'id': 'some' } } result = self._evaluator.evaluate('_|executions("one")', ctx) self.assertEqual([created0], result) result = self._evaluator.evaluate( 'executions(root_execution_id="one") ', ctx ) self.assertEqual([created1], result) def test_executions_state_filter(self): with db_api.transaction(read_only=True): db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) ctx = { '__execution': { 'id': 'some' } } result = self._evaluator.evaluate( '_|executions(state="RUNNING")', ctx ) self.assertEqual([created1], result) result = self._evaluator.evaluate( '_|executions(id="one", state="RUNNING")', ctx ) self.assertEqual([], result) def test_executions_from_time_filter(self): with db_api.transaction(read_only=True): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) ctx = { '__execution': { 'id': 'some' } } result = self._evaluator.evaluate( '_|executions(from_time="2000-01-01")', ctx ) self.assertEqual([created0, created1], result) result = self._evaluator.evaluate( '_|executions(from_time="2016-12-01 15:01:00")', ctx ) self.assertEqual([created1], result) result = self._evaluator.evaluate( '_|executions(id="one", from_time="2016-12-01 15:01:00")', ctx ) self.assertEqual([], result) def test_executions_to_time_filter(self): with db_api.transaction(read_only=True): created0 = db_api.create_workflow_execution(WF_EXECS[0]) created1 = db_api.create_workflow_execution(WF_EXECS[1]) ctx = { '__execution': { 'id': 'some' } } result = self._evaluator.evaluate( '_|executions(to_time="2020-01-01")', ctx ) self.assertEqual([created0, created1], result) result = self._evaluator.evaluate( '_|executions(to_time="2016-12-01 15:01:00")', ctx ) self.assertEqual([created0], result) result = self._evaluator.evaluate( '_|executions(id="two", to_time="2016-12-01 15:01:00")', ctx ) self.assertEqual([], result) @mock.patch('mistral.db.v2.api.get_workflow_execution') def test_function_execution(self, workflow_execution): wf_ex = mock.MagicMock(return_value={}) workflow_execution.return_value = wf_ex ctx = { '__execution': { 'id': 'some' } } result = self._evaluator.evaluate('execution()', ctx) self.assertEqual({ 'id': wf_ex.id, 'name': wf_ex.name, 'spec': wf_ex.spec, 'input': wf_ex.input, 'params': wf_ex.params, 'created_at': wf_ex.created_at.isoformat(' '), 'updated_at': wf_ex.updated_at.isoformat(' '), 'root_execution_id': wf_ex.root_execution_id }, result) class InlineJinjaEvaluatorTest(base.BaseTest): def setUp(self): super(InlineJinjaEvaluatorTest, self).setUp() self._evaluator = expr.InlineJinjaEvaluator() def test_multiple_placeholders(self): expr_str = """ Statistics for tenant "{{ _.project_id }}" Number of virtual machines: {{ _.vm_count }} Number of active virtual machines: {{ _.active_vm_count }} Number of networks: {{ _.net_count }} -- Sincerely, Mistral Team. """ result = self._evaluator.evaluate( expr_str, { 'project_id': '1-2-3-4', 'vm_count': 28, 'active_vm_count': 0, 'net_count': 1 } ) expected_result = """ Statistics for tenant "1-2-3-4" Number of virtual machines: 28 Number of active virtual machines: 0 Number of networks: 1 -- Sincerely, Mistral Team. """ self.assertEqual(expected_result, result) def test_block_placeholders(self): expr_str = """ Statistics for tenant "{{ _.project_id }}" Number of virtual machines: {{ _.vm_count }} {% if _.active_vm_count %} Number of active virtual machines: {{ _.active_vm_count }} {% endif %} Number of networks: {{ _.net_count }} -- Sincerely, Mistral Team. """ result = self._evaluator.evaluate( expr_str, { 'project_id': '1-2-3-4', 'vm_count': 28, 'active_vm_count': 0, 'net_count': 1 } ) expected_result = """ Statistics for tenant "1-2-3-4" Number of virtual machines: 28 Number of networks: 1 -- Sincerely, Mistral Team. """ self.assertEqual(expected_result, result) def test_single_value_casting(self): self.assertEqual(3, self._evaluator.evaluate('{{ _ }}', 3)) self.assertEqual('33', self._evaluator.evaluate('{{ _ }}{{ _ }}', 3)) def test_multiple_expressions(self): context = {'dir': '/tmp', 'file': 'a.txt'} expected_result = '/tmp/a.txt' result = self._evaluator.evaluate('{{ _.dir }}/{{ _.file }}', context) self.assertEqual(expected_result, result) def test_function_string(self): self.assertEqual('3', self._evaluator.evaluate('{{ _|string }}', '3')) self.assertEqual('3', self._evaluator.evaluate('{{ _|string }}', 3)) def test_validate(self): self._evaluator.validate('There is no expression.') self._evaluator.validate('{{ abc }}') self._evaluator.validate('{{ 1 }}') self._evaluator.validate('{{ 1 + 2 }}') self._evaluator.validate('{{ _.a1 }}') self._evaluator.validate('{{ _.a1 * _.a2 }}') self._evaluator.validate('{{ _.a1 }} is {{ _.a2 }}') self._evaluator.validate('The value is {{ _.a1 }}.') def test_validate_failed(self): self.assertRaises(exc.JinjaGrammarException, self._evaluator.validate, 'The value is {{ * }}.') self.assertRaises(exc.JinjaEvaluationException, self._evaluator.validate, [1, 2, 3]) self.assertRaises(exc.JinjaEvaluationException, self._evaluator.validate, {'a': 1}) def test_wrong_expression(self): res = self._evaluator.evaluate("{{ _.status == 'Invalid value' }}", DATA) self.assertFalse(res) # One thing to note about Jinja is that by default it would not raise # an exception on KeyError inside the expression, it will consider # value to be None. Same with NameError, it won't return an original # expression (which by itself seems confusing). Jinja allows us to # change behavior in both cases by switching to StrictUndefined, but # either one or the other will surely suffer. self.assertRaises( exc.JinjaEvaluationException, self._evaluator.evaluate, '{{ _.wrong_key }}', DATA ) self.assertRaises( exc.JinjaEvaluationException, self._evaluator.evaluate, '{{ invalid_expression_string }}', DATA ) self.assertRaises( exc.JinjaEvaluationException, self._evaluator.evaluate, '!! {{ _.nonexistent_variable }} !!', DATA ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/expressions/test_yaql_expression.py0000644000175000017500000002640100000000000027132 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import sys import warnings import mock from mistral.config import cfg from mistral import exceptions as exc from mistral.expressions import yaql_expression as expr from mistral.tests.unit import base from mistral_lib import utils CONF = cfg.CONF DATA = { "server": { "id": "03ea824a-aa24-4105-9131-66c48ae54acf", "name": "cloud-fedora", "status": "ACTIVE" }, "status": "OK" } SERVERS = { "servers": [ {'name': 'centos'}, {'name': 'ubuntu'}, {'name': 'fedora'} ] } class YaqlEvaluatorTest(base.BaseTest): def setUp(self): super(YaqlEvaluatorTest, self).setUp() self._evaluator = expr.YAQLEvaluator() def test_expression_result(self): self.assertEqual( { 'id': "03ea824a-aa24-4105-9131-66c48ae54acf", 'name': 'cloud-fedora', 'status': 'ACTIVE' }, self._evaluator.evaluate('$.server', DATA) ) self.assertEqual( '03ea824a-aa24-4105-9131-66c48ae54acf', self._evaluator.evaluate('$.server.id', DATA) ) self.assertTrue( self._evaluator.evaluate("$.server.status = 'ACTIVE'", DATA) ) def test_wrong_expression(self): self.assertFalse( self._evaluator.evaluate("$.status = 'Invalid value'", DATA) ) self.assertRaises( exc.YaqlEvaluationException, self._evaluator.evaluate, '$.wrong_key', DATA ) expression_str = 'invalid_expression_string' self.assertEqual( expression_str, self._evaluator.evaluate(expression_str, DATA) ) def test_select_result(self): res = self._evaluator.evaluate( '$.servers.where($.name = ubuntu)', SERVERS ) item = list(res)[0] self.assertEqual({'name': 'ubuntu'}, item) def test_function_string(self): self.assertEqual('3', self._evaluator.evaluate('str($)', '3')) self.assertEqual('3', self._evaluator.evaluate('str($)', 3)) def test_function_len(self): self.assertEqual(3, self._evaluator.evaluate('len($)', 'hey')) data = [{'some': 'thing'}] self.assertEqual( 1, self._evaluator.evaluate('$.where($.some = thing).len()', data) ) def test_validate(self): self._evaluator.validate('abc') self._evaluator.validate('1') self._evaluator.validate('1 + 2') self._evaluator.validate('$.a1') self._evaluator.validate('$.a1 * $.a2') def test_validate_failed(self): self.assertRaises( exc.YaqlGrammarException, self._evaluator.validate, '*' ) self.assertRaises( exc.YaqlGrammarException, self._evaluator.validate, [1, 2, 3] ) self.assertRaises( exc.YaqlGrammarException, self._evaluator.validate, {'a': 1} ) def test_function_json_pp(self): self.assertEqual('"3"', self._evaluator.evaluate('json_pp($)', '3')) self.assertEqual('3', self._evaluator.evaluate('json_pp($)', 3)) self.assertEqual( '[\n 1,\n 2\n]', self._evaluator.evaluate('json_pp($)', [1, 2]) ) self.assertEqual( '{\n "a": "b"\n}', self._evaluator.evaluate('json_pp($)', {'a': 'b'}) ) self.assertEqual( '"Mistral\nis\nawesome"', self._evaluator.evaluate( 'json_pp($)', '\n'.join(['Mistral', 'is', 'awesome']) ) ) def test_function_json_pp_deprecation(self): with warnings.catch_warnings(record=True) as w: # Ensure warnings aren't suppressed from other tests. for name, mod in list(sys.modules.items()): getattr(mod, '__warningregistry__', dict()).clear() warnings.simplefilter('always') result = self._evaluator.evaluate('json_pp($)', '3') self.assertEqual('"3"', result) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) self.assertTrue(str(w[-1].message).startswith( "json_pp was deprecated in Queens and will be removed in the S " )) def test_function_json_dump(self): self.assertEqual('"3"', self._evaluator.evaluate('json_dump($)', '3')) self.assertEqual('3', self._evaluator.evaluate('json_dump($)', 3)) self.assertEqual( json.dumps([1, 2], indent=4), self._evaluator.evaluate('json_dump($)', [1, 2]) ) self.assertEqual( json.dumps({"a": "b"}, indent=4), self._evaluator.evaluate('json_dump($)', {'a': 'b'}) ) self.assertEqual( json.dumps('\n'.join(["Mistral", "is", "awesome"]), indent=4), self._evaluator.evaluate( 'json_dump($)', '\n'.join(['Mistral', 'is', 'awesome']) ) ) def test_function_uuid(self): uuid = self._evaluator.evaluate('uuid()', {}) self.assertTrue(utils.is_valid_uuid(uuid)) @mock.patch('mistral.db.v2.api.get_task_executions') @mock.patch('mistral.workflow.data_flow.get_task_execution_result') def test_filter_tasks_without_task_execution(self, task_execution_result, task_executions): task_execution_result.return_value = 'task_execution_result' time_now = utils.utc_now_sec() task = type("obj", (object,), { 'id': 'id', 'name': 'name', 'published': 'published', 'result': task_execution_result(), 'spec': 'spec', 'state': 'state', 'state_info': 'state_info', 'type': 'type', 'workflow_execution_id': 'workflow_execution_id', 'created_at': time_now, 'updated_at': time_now + datetime.timedelta(seconds=1), })() task_executions.return_value = [task] ctx = { '__task_execution': None, '__execution': { 'id': 'some' } } result = self._evaluator.evaluate('tasks(some)', ctx) self.assertEqual(1, len(result)) self.assertDictEqual( { 'id': task.id, 'name': task.name, 'published': task.published, 'result': task.result, 'spec': task.spec, 'state': task.state, 'state_info': task.state_info, 'type': task.type, 'workflow_execution_id': task.workflow_execution_id, 'created_at': task.created_at.isoformat(' '), 'updated_at': task.updated_at.isoformat(' ') }, result[0] ) def test_function_env(self): ctx = {'__env': 'some'} self.assertEqual(ctx['__env'], self._evaluator.evaluate('env()', ctx)) class InlineYAQLEvaluatorTest(base.BaseTest): def setUp(self): super(InlineYAQLEvaluatorTest, self).setUp() self._evaluator = expr.InlineYAQLEvaluator() def test_multiple_placeholders(self): expr_str = """ Statistics for tenant "<% $.project_id %>" Number of virtual machines: <% $.vm_count %> Number of active virtual machines: <% $.active_vm_count %> Number of networks: <% $.net_count %> -- Sincerely, Mistral Team. """ result = self._evaluator.evaluate( expr_str, { 'project_id': '1-2-3-4', 'vm_count': 28, 'active_vm_count': 0, 'net_count': 1 } ) expected_result = """ Statistics for tenant "1-2-3-4" Number of virtual machines: 28 Number of active virtual machines: 0 Number of networks: 1 -- Sincerely, Mistral Team. """ self.assertEqual(expected_result, result) def test_single_value_casting(self): self.assertEqual(3, self._evaluator.evaluate('<% $ %>', 3)) self.assertEqual('33', self._evaluator.evaluate('<% $ %><% $ %>', 3)) def test_function_string(self): self.assertEqual('3', self._evaluator.evaluate('<% str($) %>', '3')) self.assertEqual('3', self._evaluator.evaluate('<% str($) %>', 3)) def test_validate(self): self._evaluator.validate('There is no expression.') self._evaluator.validate('<% abc %>') self._evaluator.validate('<% 1 %>') self._evaluator.validate('<% 1 + 2 %>') self._evaluator.validate('<% $.a1 %>') self._evaluator.validate('<% $.a1 * $.a2 %>') self._evaluator.validate('<% $.a1 %> is <% $.a2 %>') self._evaluator.validate('The value is <% $.a1 %>.') def test_validate_failed(self): self.assertRaises(exc.YaqlGrammarException, self._evaluator.validate, 'The value is <% * %>.') self.assertRaises(exc.YaqlEvaluationException, self._evaluator.validate, [1, 2, 3]) self.assertRaises(exc.YaqlEvaluationException, self._evaluator.validate, {'a': 1}) def test_set_of_dicts(self): # This test makes sense only if YAQL expression output conversion # is enabled. self.override_config('convert_output_data', True, 'yaql') self.override_config('convert_sets_to_lists', True, 'yaql') def _restore_engine(old_engine): expr.YAQL_ENGINE = old_engine self.addCleanup(_restore_engine, expr.YAQL_ENGINE) expr.YAQL_ENGINE = expr.create_yaql_engine_class( CONF.yaql.keyword_operator, CONF.yaql.allow_delegates, expr.get_yaql_engine_options() ) my_list = [ { "k1": "v1", "k2": "v2" }, { "k11": "v11", "k12": "v12" } ] res = self._evaluator.evaluate( '<% $.my_list.toSet() %>', {"my_list": my_list} ) self.assertIsInstance(res, list) self.assertEqual(2, len(res)) # The order may be different so we can't use "assertListEqual". self.assertTrue(my_list[0] == res[0] or my_list[1] == res[0]) self.assertTrue(my_list[0] == res[1] or my_list[1] == res[1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/expressions/test_yaql_json_serialization.py0000644000175000017500000000554100000000000030643 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from yaql.language import utils as yaql_utils from mistral.tests.unit import base from mistral import utils class YaqlJsonSerializationTest(base.BaseTest): def test_serialize_frozen_dict(self): data = yaql_utils.FrozenDict(a=1, b=2, c=iter([1, 2, 3])) json_str = utils.to_json_str(data) self.assertIsNotNone(json_str) self.assertIn('"a": 1', json_str) self.assertIn('"b": 2', json_str) self.assertIn('"c": [1, 2, 3]', json_str) def test_serialize_generator(self): def _list_stream(_list): for i in _list: yield i gen = _list_stream( [1, yaql_utils.FrozenDict(a=1), _list_stream([12, 15])] ) self.assertEqual('[1, {"a": 1}, [12, 15]]', utils.to_json_str(gen)) def test_serialize_dict_of_generators(self): def _f(cnt): for i in range(1, cnt + 1): yield i data = {'numbers': _f(3)} self.assertEqual('{"numbers": [1, 2, 3]}', utils.to_json_str(data)) def test_serialize_range(self): self.assertEqual("[1, 2, 3, 4]", utils.to_json_str(range(1, 5))) def test_serialize_iterator_of_frozen_dicts(self): data = iter( [ yaql_utils.FrozenDict(a=1, b=2, c=iter([1, 2, 3])), yaql_utils.FrozenDict( a=11, b=yaql_utils.FrozenDict(b='222'), c=iter( [ 1, yaql_utils.FrozenDict( a=iter([4, yaql_utils.FrozenDict(a=99)]) ) ] ) ) ] ) json_str = utils.to_json_str(data) self.assertIsNotNone(json_str) # Checking the first item. self.assertIn('"a": 1', json_str) self.assertIn('"b": 2', json_str) self.assertIn('"c": [1, 2, 3]', json_str) # Checking the first item. self.assertIn('"a": 11', json_str) self.assertIn('"b": {"b": "222"}', json_str) self.assertIn('"c": [1, {"a": [4, {"a": 99}]}]', json_str) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.149568 mistral-10.0.0.0b3/mistral/tests/unit/hacking/0000755000175000017500000000000000000000000021313 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/hacking/__init__.py0000644000175000017500000000000000000000000023412 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/hacking/test_checks.py0000644000175000017500000001241000000000000024162 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap import mock import pycodestyle from mistral.hacking import checks from mistral.tests.unit import base from mistral.tests.unit.mstrlfixtures import hacking as hacking_fixtures class BaseLoggingCheckTest(base.BaseTest): def setUp(self): super(BaseLoggingCheckTest, self).setUp() self.code_ex = self.useFixture(self.get_fixture()) self.addCleanup(delattr, self, 'code_ex') def get_checker(self): return checks.CheckForLoggingIssues def get_fixture(self): return hacking_fixtures.HackingLogging() # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pycodestyle._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def run_check(self, code, checker, filename=None): pycodestyle.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pycodestyle.Checker(filename=filename, lines=lines) with mock.patch('pycodestyle.StandardReport.get_file_results'): checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): # Pull out the parts of the error that we'll match against. actual_errors = [e[:3] for e in self.run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_no_assert_equal_true_false(self): code = """ self.assertEqual(context_is_admin, True) self.assertEqual(context_is_admin, False) self.assertEqual(True, context_is_admin) self.assertEqual(False, context_is_admin) self.assertNotEqual(context_is_admin, True) self.assertNotEqual(context_is_admin, False) self.assertNotEqual(True, context_is_admin) self.assertNotEqual(False, context_is_admin) """ errors = [(1, 0, 'M319'), (2, 0, 'M319'), (3, 0, 'M319'), (4, 0, 'M319'), (5, 0, 'M319'), (6, 0, 'M319'), (7, 0, 'M319'), (8, 0, 'M319')] self._assert_has_errors(code, checks.no_assert_equal_true_false, expected_errors=errors) code = """ self.assertEqual(context_is_admin, stuff) self.assertNotEqual(context_is_admin, stuff) """ self._assert_has_no_errors(code, checks.no_assert_equal_true_false) def test_no_assert_true_false_is_not(self): code = """ self.assertTrue(test is None) self.assertTrue(False is my_variable) self.assertFalse(None is test) self.assertFalse(my_variable is False) """ errors = [(1, 0, 'M320'), (2, 0, 'M320'), (3, 0, 'M320'), (4, 0, 'M320')] self._assert_has_errors(code, checks.no_assert_true_false_is_not, expected_errors=errors) def test_check_python3_xrange(self): func = checks.check_python3_xrange self.assertEqual(1, len(list(func('for i in xrange(10)')))) self.assertEqual(1, len(list(func('for i in xrange (10)')))) self.assertEqual(0, len(list(func('for i in range(10)')))) self.assertEqual(0, len(list(func('for i in six.moves.range(10)')))) def test_dict_iteritems(self): self.assertEqual(1, len(list(checks.check_python3_no_iteritems( "obj.iteritems()")))) self.assertEqual(0, len(list(checks.check_python3_no_iteritems( "six.iteritems(ob))")))) def test_dict_iterkeys(self): self.assertEqual(1, len(list(checks.check_python3_no_iterkeys( "obj.iterkeys()")))) self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( "six.iterkeys(ob))")))) def test_dict_itervalues(self): self.assertEqual(1, len(list(checks.check_python3_no_itervalues( "obj.itervalues()")))) self.assertEqual(0, len(list(checks.check_python3_no_itervalues( "six.itervalues(ob))")))) class TestLoggingWithWarn(BaseLoggingCheckTest): def test_using_deprecated_warn(self): data = self.code_ex.assert_not_using_deprecated_warn code = self.code_ex.shared_imports + data['code'] errors = data['expected_errors'] self._assert_has_errors(code, checks.CheckForLoggingIssues, expected_errors=errors) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.149568 mistral-10.0.0.0b3/mistral/tests/unit/lang/0000755000175000017500000000000000000000000020630 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/__init__.py0000644000175000017500000000000000000000000022727 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/test_spec_caching.py0000644000175000017500000002155400000000000024656 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.services import workflows as wf_service from mistral.tests.unit import base from mistral.tests.unit.engine import base as engine_base from mistral.workflow import states class SpecificationCachingTest(base.DbTestCase): def test_workflow_spec_caching(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="Echo" """ wfs = wf_service.create_workflows(wf_text) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) wf_spec = spec_parser.get_workflow_spec_by_definition_id( wfs[0].id, wfs[0].updated_at ) self.assertIsNotNone(wf_spec) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) def test_workflow_spec_cache_update_via_workflow_service(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="Echo" """ wfs = wf_service.create_workflows(wf_text) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) wf_spec = spec_parser.get_workflow_spec_by_definition_id( wfs[0].id, wfs[0].updated_at ) self.assertEqual(1, len(wf_spec.get_tasks())) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) # Now update workflow definition and check that cache is updated too. wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="1" task2: action: std.echo output="2" """ wfs = wf_service.update_workflows(wf_text) self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) wf_spec = spec_parser.get_workflow_spec_by_definition_id( wfs[0].id, wfs[0].updated_at ) self.assertEqual(2, len(wf_spec.get_tasks())) self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) def test_workflow_spec_cache_update_via_workbook_service(self): wb_text = """ version: '2.0' name: wb workflows: wf: tasks: task1: action: std.echo output="Echo" """ wb_service.create_workbook_v2(wb_text) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) wf = db_api.get_workflow_definition('wb.wf') wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf.id, wf.updated_at ) self.assertEqual(1, len(wf_spec.get_tasks())) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) # Now update workflow definition and check that cache is updated too. wb_text = """ version: '2.0' name: wb workflows: wf: tasks: task1: action: std.echo output="1" task2: action: std.echo output="2" """ wb_service.update_workbook_v2(wb_text) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) wf = db_api.get_workflow_definition(wf.id) wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf.id, wf.updated_at ) self.assertEqual(2, len(wf_spec.get_tasks())) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) def test_cache_workflow_spec_by_execution_id(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="Echo" """ wfs = wf_service.create_workflows(wf_text) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) wf_def = wfs[0] wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at ) self.assertEqual(1, len(wf_spec.get_tasks())) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) with db_api.transaction(): wf_ex = db_api.create_workflow_execution({ 'id': '1-2-3-4', 'name': 'wf', 'workflow_id': wf_def.id, 'spec': wf_spec.to_dict(), 'state': states.RUNNING }) # Check that we can get a valid spec by execution id. wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id( wf_ex.id ) self.assertEqual(1, len(wf_spec_by_exec_id.get_tasks())) # Now update workflow definition and check that cache is updated too. wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="1" task2: action: std.echo output="2" """ wfs = wf_service.update_workflows(wf_text) self.assertEqual(1, spec_parser.get_wf_definition_spec_cache_size()) wf_spec = spec_parser.get_workflow_spec_by_definition_id( wfs[0].id, wfs[0].updated_at ) self.assertEqual(2, len(wf_spec.get_tasks())) self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) self.assertEqual(1, spec_parser.get_wf_execution_spec_cache_size()) # Now finally update execution cache and check that we can # get a valid spec by execution id. spec_parser.cache_workflow_spec_by_execution_id(wf_ex.id, wf_spec) wf_spec_by_exec_id = spec_parser.get_workflow_spec_by_execution_id( wf_ex.id ) self.assertEqual(2, len(wf_spec_by_exec_id.get_tasks())) class SpecificationCachingEngineTest(engine_base.EngineTestCase): def test_cache_workflow_spec_no_duplicates(self): wfs_text = """ version: '2.0' wf: tasks: task1: action: std.noop on-success: - task2 - task3 task2: workflow: sub_wf my_param="val1" task3: workflow: sub_wf my_param="val2" sub_wf: input: - my_param tasks: task1: action: std.echo output="Param value is <% $.my_param %>" """ wfs = wf_service.create_workflows(wfs_text) self.assertEqual(2, len(wfs)) self.assertEqual(0, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(0, spec_parser.get_wf_definition_spec_cache_size()) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) # We expect to have a cache entry for every workflow execution # but two of them should refer to the same object. self.assertEqual(3, spec_parser.get_wf_execution_spec_cache_size()) self.assertEqual(2, spec_parser.get_wf_definition_spec_cache_size()) sub_wf_execs = db_api.get_workflow_executions(name='sub_wf') self.assertEqual(2, len(sub_wf_execs)) spec1 = spec_parser.get_workflow_spec_by_execution_id( sub_wf_execs[0].id ) spec2 = spec_parser.get_workflow_spec_by_execution_id( sub_wf_execs[1].id ) self.assertIs(spec1, spec2) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.149568 mistral-10.0.0.0b3/mistral/tests/unit/lang/v2/0000755000175000017500000000000000000000000021157 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/v2/__init__.py0000644000175000017500000000000000000000000023256 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/v2/base.py0000644000175000017500000000765200000000000022455 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.tests.unit import base from mistral.utils import safe_yaml from mistral_lib import utils class WorkflowSpecValidationTestCase(base.BaseTest): def __init__(self, *args, **kwargs): super(WorkflowSpecValidationTestCase, self).__init__(*args, **kwargs) # The relative resource path is ./mistral/tests/resources/workbook/v2. self._resource_path = 'workbook/v2' self._spec_parser = spec_parser.get_workflow_list_spec_from_yaml self._dsl_blank = { 'version': '2.0', 'test': { 'type': 'direct' } } self._dsl_tasks = { 'get': { 'action': 'std.http', 'input': { 'url': 'https://www.openstack.org' } }, 'echo': { 'action': 'std.echo', 'input': { 'output': 'This is a test.' } }, 'email': { 'action': 'std.email', 'input': { 'from_addr': 'mistral@example.com', 'to_addrs': ['admin@example.com'], 'subject': 'Test', 'body': 'This is a test.', 'smtp_server': 'localhost', 'smtp_password': 'password' } } } def _parse_dsl_spec(self, dsl_file=None, add_tasks=False, changes=None, expect_error=False): if dsl_file and add_tasks: raise Exception('The add_tasks option is not a valid ' 'combination with the dsl_file option.') if dsl_file: dsl_yaml = base.get_resource(self._resource_path + '/' + dsl_file) if changes: dsl_dict = safe_yaml.safe_load(dsl_yaml) utils.merge_dicts(dsl_dict, changes) dsl_yaml = safe_yaml.safe_dump(dsl_dict, default_flow_style=False) else: dsl_dict = copy.deepcopy(self._dsl_blank) if add_tasks: dsl_dict['test']['tasks'] = copy.deepcopy(self._dsl_tasks) if changes: utils.merge_dicts(dsl_dict, changes) dsl_yaml = safe_yaml.safe_dump(dsl_dict, default_flow_style=False) if not expect_error: return self._spec_parser(dsl_yaml) else: return self.assertRaises( exc.DSLParsingException, self._spec_parser, dsl_yaml ) class WorkbookSpecValidationTestCase(WorkflowSpecValidationTestCase): def __init__(self, *args, **kwargs): super(WorkbookSpecValidationTestCase, self).__init__(*args, **kwargs) self._spec_parser = spec_parser.get_workbook_spec_from_yaml self._dsl_blank = { 'version': '2.0', 'name': 'test_wb' } def _parse_dsl_spec(self, dsl_file=None, add_tasks=False, changes=None, expect_error=False): return super(WorkbookSpecValidationTestCase, self)._parse_dsl_spec( dsl_file=dsl_file, add_tasks=False, changes=changes, expect_error=expect_error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/v2/test_actions.py0000644000175000017500000001051100000000000024226 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from mistral.tests.unit.lang.v2 import base from mistral_lib import utils class ActionSpecValidationTest(base.WorkbookSpecValidationTestCase): def test_base_required(self): actions = {'actions': {'a1': {}}} exception = self._parse_dsl_spec(changes=actions, expect_error=True) self.assertIn("'base' is a required property", str(exception)) def test_base(self): tests = [ ({'actions': {'a1': {'base': ''}}}, True), ({'actions': {'a1': {'base': None}}}, True), ({'actions': {'a1': {'base': 12345}}}, True), ({'actions': {'a1': {'base': 'std.noop'}}}, False), ({'actions': {'a1': {'base': 'std.echo output="foo"'}}}, False), ({'actions': {'a1': {'base': 'std.echo output="<% $.x %>"'}}}, False), ({'actions': {'a1': {'base': 'std.echo output="<% * %>"'}}}, True), ({'actions': {'a1': {'base': 'std.echo output="{{ _.x }}"'}}}, False), ({'actions': {'a1': {'base': 'std.echo output="{{ * }}"'}}}, True) ] for actions, expect_error in tests: self._parse_dsl_spec(changes=actions, expect_error=expect_error) def test_base_input(self): tests = [ ({'base-input': {}}, True), ({'base-input': None}, True), ({'base-input': {'k1': 'v1', 'k2': '<% $.v2 %>'}}, False), ({'base-input': {'k1': 'v1', 'k2': '<% * %>'}}, True), ({'base-input': {'k1': 'v1', 'k2': '{{ _.v2 }}'}}, False), ({'base-input': {'k1': 'v1', 'k2': '{{ * }}'}}, True) ] actions = { 'a1': { 'base': 'foobar' } } for base_inputs, expect_error in tests: overlay = {'actions': copy.deepcopy(actions)} utils.merge_dicts(overlay['actions']['a1'], base_inputs) self._parse_dsl_spec(changes=overlay, expect_error=expect_error) def test_input(self): tests = [ ({'input': ''}, True), ({'input': []}, True), ({'input': ['']}, True), ({'input': None}, True), ({'input': ['k1', 'k2']}, False), ({'input': ['k1', 12345]}, True), ({'input': ['k1', {'k2': 2}]}, False), ({'input': [{'k1': 1}, {'k2': 2}]}, False), ({'input': [{'k1': None}]}, False), ({'input': [{'k1': 1}, {'k1': 1}]}, True), ({'input': [{'k1': 1, 'k2': 2}]}, True) ] actions = { 'a1': { 'base': 'foobar' } } for inputs, expect_error in tests: overlay = {'actions': copy.deepcopy(actions)} utils.merge_dicts(overlay['actions']['a1'], inputs) self._parse_dsl_spec(changes=overlay, expect_error=expect_error) def test_output(self): tests = [ ({'output': None}, False), ({'output': False}, False), ({'output': 12345}, False), ({'output': 0.12345}, False), ({'output': 'foobar'}, False), ({'output': '<% $.x %>'}, False), ({'output': '<% * %>'}, True), ({'output': '{{ _.x }}'}, False), ({'output': '{{ * }}'}, True), ({'output': ['v1']}, False), ({'output': {'k1': 'v1'}}, False) ] actions = { 'a1': { 'base': 'foobar' } } for outputs, expect_error in tests: overlay = {'actions': copy.deepcopy(actions)} utils.merge_dicts(overlay['actions']['a1'], outputs) self._parse_dsl_spec(changes=overlay, expect_error=expect_error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/v2/test_tasks.py0000644000175000017500000006605700000000000023733 0ustar00coreycorey00000000000000# Copyright 2015 - Huawei Technologies Co. Ltd # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.lang.v2 import workflows from mistral.tests.unit.lang.v2 import base as v2_base from mistral_lib import utils class TaskSpecValidationTest(v2_base.WorkflowSpecValidationTestCase): def test_type_injection(self): tests = [ ({'type': 'direct'}, False), ({'type': 'reverse'}, False) ] for wf_type, expect_error in tests: overlay = {'test': wf_type} wfs_spec = self._parse_dsl_spec(add_tasks=True, changes=overlay, expect_error=expect_error) if not expect_error: self.assertIsInstance(wfs_spec, workflows.WorkflowListSpec) self.assertEqual(1, len(wfs_spec.get_workflows())) wf_spec = wfs_spec.get_workflows()[0] self.assertEqual(wf_type['type'], wf_spec.get_type()) for task in wf_spec.get_tasks(): self.assertEqual(task._data['type'], wf_type['type']) def test_action_or_workflow(self): tests = [ ({'action': 'std.noop'}, False), ({'action': 'std.http url="openstack.org"'}, False), ({'action': 'std.http url="openstack.org" timeout=10'}, False), ({'action': 'std.http url=<% $.url %>'}, False), ({'action': 'std.http url=<% $.url %> timeout=<% $.t %>'}, False), ({'action': 'std.http url=<% * %>'}, True), ({'action': 'std.http url={{ _.url }}'}, False), ({'action': 'std.http url={{ _.url }} timeout={{ _.t }}'}, False), ({'action': 'std.http url={{ $ }}'}, True), ({'workflow': 'test.wf'}, False), ({'workflow': 'test.wf k1="v1"'}, False), ({'workflow': 'test.wf k1="v1" k2="v2"'}, False), ({'workflow': 'test.wf k1=<% $.v1 %>'}, False), ({'workflow': 'test.wf k1=<% $.v1 %> k2=<% $.v2 %>'}, False), ({'workflow': 'test.wf k1=<% * %>'}, True), ({'workflow': 'test.wf k1={{ _.v1 }}'}, False), ({'workflow': 'test.wf k1={{ _.v1 }} k2={{ _.v2 }}'}, False), ({'workflow': 'test.wf k1={{ $ }}'}, True), ({'action': 'std.noop', 'workflow': 'test.wf'}, True), ({'action': 123}, True), ({'workflow': 123}, True), ({'action': ''}, True), ({'workflow': ''}, True), ({'action': None}, True), ({'workflow': None}, True) ] for task, expect_error in tests: overlay = {'test': {'tasks': {'task1': task}}} self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=expect_error ) def test_inputs(self): tests = [ ({'input': ''}, True), ({'input': {}}, True), ({'input': None}, True), ({'input': {'k1': 'v1'}}, False), ({'input': {'k1': '<% $.v1 %>'}}, False), ({'input': {'k1': '<% 1 + 2 %>'}}, False), ({'input': {'k1': '<% * %>'}}, True), ({'input': {'k1': '{{ _.v1 }}'}}, False), ({'input': {'k1': '{{ 1 + 2 }}'}}, False), ({'input': {'k1': '{{ * }}'}}, True) ] for task_input, expect_error in tests: overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} utils.merge_dicts(overlay['test']['tasks']['task1'], task_input) self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=expect_error ) def test_with_items(self): tests = [ ({'with-items': ''}, True), ({'with-items': []}, True), ({'with-items': ['']}, True), ({'with-items': None}, True), ({'with-items': 12345}, True), ({'with-items': 'x in y'}, True), ({'with-items': '<% $.y %>'}, True), ({'with-items': 'x in <% $.y %>'}, False), ({'with-items': ['x in [1, 2, 3]']}, False), ({'with-items': ['x in <% $.y %>']}, False), ({'with-items': ['x in <% $.y %>', 'i in [1, 2, 3]']}, False), ({'with-items': ['x in <% $.y %>', 'i in <% $.j %>']}, False), ({'with-items': ['x in <% * %>']}, True), ({'with-items': ['x in <% $.y %>', 'i in <% * %>']}, True), ({'with-items': '{{ _.y }}'}, True), ({'with-items': 'x in {{ _.y }}'}, False), ({'with-items': ['x in [1, 2, 3]']}, False), ({'with-items': ['x in {{ _.y }}']}, False), ({'with-items': ['x in {{ _.y }}', 'i in [1, 2, 3]']}, False), ({'with-items': ['x in {{ _.y }}', 'i in {{ _.j }}']}, False), ({'with-items': ['x in {{ * }}']}, True), ({'with-items': ['x in {{ _.y }}', 'i in {{ * }}']}, True) ] for with_item, expect_error in tests: overlay = {'test': {'tasks': {'get': with_item}}} self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_publish(self): tests = [ ({'publish': ''}, True), ({'publish': {}}, True), ({'publish': None}, True), ({'publish': {'k1': 'v1'}}, False), ({'publish': {'k1': '<% $.v1 %>'}}, False), ({'publish': {'k1': '<% 1 + 2 %>'}}, False), ({'publish': {'k1': '<% * %>'}}, True), ({'publish': {'k1': '{{ _.v1 }}'}}, False), ({'publish': {'k1': '{{ 1 + 2 }}'}}, False), ({'publish': {'k1': '{{ * }}'}}, True) ] for output, expect_error in tests: overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} utils.merge_dicts(overlay['test']['tasks']['task1'], output) self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=expect_error ) def test_publish_on_error(self): tests = [ ({'publish-on-error': ''}, True), ({'publish-on-error': {}}, True), ({'publish-on-error': None}, True), ({'publish-on-error': {'k1': 'v1'}}, False), ({'publish-on-error': {'k1': '<% $.v1 %>'}}, False), ({'publish-on-error': {'k1': '<% 1 + 2 %>'}}, False), ({'publish-on-error': {'k1': '<% * %>'}}, True), ({'publish-on-error': {'k1': '{{ _.v1 }}'}}, False), ({'publish-on-error': {'k1': '{{ 1 + 2 }}'}}, False), ({'publish-on-error': {'k1': '{{ * }}'}}, True) ] for output, expect_error in tests: overlay = {'test': {'tasks': {'task1': {'action': 'test.mock'}}}} utils.merge_dicts(overlay['test']['tasks']['task1'], output) self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=expect_error ) def test_policies(self): tests = [ ({'retry': {'count': 3, 'delay': 1}}, False), ({'retry': { 'continue-on': '<% 1 %>', 'delay': 2, 'break-on': '<% 1 %>', 'count': 2 }}, False), ({'retry': { 'count': 3, 'delay': 1, 'continue-on': '<% 1 %>' }}, False), ({'retry': {'count': '<% 3 %>', 'delay': 1}}, False), ({'retry': {'count': '<% * %>', 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': '<% 1 %>'}}, False), ({'retry': {'count': 3, 'delay': '<% * %>'}}, True), ({'retry': { 'continue-on': '{{ 1 }}', 'delay': 2, 'break-on': '{{ 1 }}', 'count': 2 }}, False), ({'retry': { 'count': 3, 'delay': 1, 'continue-on': '{{ 1 }}' }}, False), ({'retry': {'count': '{{ 3 }}', 'delay': 1}}, False), ({'retry': {'count': '{{ * }}', 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': '{{ 1 }}'}}, False), ({'retry': {'count': 3, 'delay': '{{ * }}'}}, True), ({'retry': {'count': -3, 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': -1}}, True), ({'retry': {'count': '3', 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': '1'}}, True), ({'retry': 'count=3 delay=1 break-on=<% false %>'}, False), ({'retry': 'count=3 delay=1 break-on={{ false }}'}, False), ({'retry': 'count=3 delay=1'}, False), ({'retry': 'coun=3 delay=1'}, True), ({'retry': None}, True), ({'wait-before': 1}, False), ({'wait-before': '<% 1 %>'}, False), ({'wait-before': '<% * %>'}, True), ({'wait-before': '{{ 1 }}'}, False), ({'wait-before': '{{ * }}'}, True), ({'wait-before': -1}, True), ({'wait-before': 1.1}, True), ({'wait-before': '1'}, True), ({'wait-after': 1}, False), ({'wait-after': '<% 1 %>'}, False), ({'wait-after': '<% * %>'}, True), ({'wait-after': '{{ 1 }}'}, False), ({'wait-after': '{{ * }}'}, True), ({'wait-after': -1}, True), ({'wait-after': 1.1}, True), ({'wait-after': '1'}, True), ({'timeout': 300}, False), ({'timeout': '<% 300 %>'}, False), ({'timeout': '<% * %>'}, True), ({'timeout': '{{ 300 }}'}, False), ({'timeout': '{{ * }}'}, True), ({'timeout': -300}, True), ({'timeout': 300.1}, True), ({'timeout': '300'}, True), ({'pause-before': False}, False), ({'pause-before': '<% False %>'}, False), ({'pause-before': '<% * %>'}, True), ({'pause-before': '{{ False }}'}, False), ({'pause-before': '{{ * }}'}, True), ({'pause-before': 'False'}, True), ({'concurrency': 10}, False), ({'concurrency': '<% 10 %>'}, False), ({'concurrency': '<% * %>'}, True), ({'concurrency': '{{ 10 }}'}, False), ({'concurrency': '{{ * }}'}, True), ({'concurrency': -10}, True), ({'concurrency': 10.1}, True), ({'concurrency': '10'}, True) ] for policy, expect_error in tests: overlay = {'test': {'tasks': {'get': policy}}} self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_direct_transition(self): tests = [ (['email'], False), (['email%'], True), ([{'email': '<% 1 %>'}], False), ([{'email': '<% 1 %>'}, {'email': '<% 1 %>'}], True), ([{'email': '<% 1 %>', 'more_email': '<% 2 %>'}], True), (['email'], False), ([{'email': '<% 1 %>'}, 'echo'], False), ([{'email': '<% $.v1 in $.v2 %>'}], False), ([{'email': '<% * %>'}], True), ([{'email': '{{ 1 }}'}], False), ([{'email': '{{ 1 }}'}, 'echo'], False), ([{'email': '{{ _.v1 in _.v2 }}'}], False), ([{'email': '{{ * }}'}], True), ('email', False), ('fail msg="<% task().result %>"', False), ('fail(msg=<% task() %>)', False), (None, True), ([''], True), ([], True), (['email', 'email'], True), (['email', 12345], True) ] for on_clause_key in ['on-error', 'on-success', 'on-complete']: for on_clause_value, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test']['tasks'], {'get': {on_clause_key: on_clause_value}}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_direct_transition_advanced_schema(self): tests = [ ({'on-success': {'publish': {'var1': 1234}}}, True), ({'on-success': {'publish': {'branch': {'var1': 1234}}}}, False), ( { 'on-success': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} } } }, False ), ( { 'on-success': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': '<% * %>'}, 'atomic': {'atomic_var1': '<% my_func() %>'} } } }, True ), ( { 'on-success': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': 'email' } }, False ), ( { 'on-success': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': ['email'] } }, False ), ( { 'on-success': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% 1 %>'}] } }, False ), ( { 'on-success': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% $.v1 and $.v2 %>'}] } }, False ), ( { 'on-success': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% * %>'}] } }, True ), ({'on-success': {'next': [{'email': '<% $.v1 %>'}]}}, False), ({'on-success': {'next': 'email'}}, False), ({'on-success': {'next': ['email']}}, False), ({'on-success': {'next': [{'email': 'email'}]}}, True), ({'on-success': {'next': [{'email': 'email', 'more_email': 'more_email'}]}}, True), ({'on-success': {'next': {'email': 'email'}}}, True), ({'on-error': {'publish': {'var1': 1234}}}, True), ({'on-error': {'publish': {'branch': {'var1': 1234}}}}, False), ( { 'on-error': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} } } }, False ), ( { 'on-error': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': '<% * %>'}, 'atomic': {'atomic_var1': '<% my_func() %>'} } } }, True ), ( { 'on-error': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': 'email' } }, False ), ( { 'on-error': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': ['email'] } }, False ), ( { 'on-error': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% 1 %>'}] } }, False ), ( { 'on-error': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% $.v1 and $.v2 %>'}] } }, False ), ( { 'on-error': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% * %>'}] } }, True ), ({'on-error': {'next': [{'email': '<% $.v1 %>'}]}}, False), ({'on-error': {'next': 'email'}}, False), ({'on-error': {'next': ['email']}}, False), ({'on-error': {'next': [{'email': 'email'}]}}, True), ({'on-error': {'next': [{'email': 'email', 'more_email': 'more_email'}]}}, True), ({'on-error': {'next': {'email': 'email'}}}, True), ({'on-complete': {'publish': {'var1': 1234}}}, True), ({'on-complete': {'publish': {'branch': {'var1': 1234}}}}, False), ( { 'on-complete': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} } } }, False ), ( { 'on-complete': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': '<% * %>'}, 'atomic': {'atomic_var1': '<% my_func() %>'} } } }, True ), ( { 'on-complete': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': 'email' } }, False ), ( { 'on-complete': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': ['email'] } }, False ), ( { 'on-complete': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% 1 %>'}] } }, False ), ( { 'on-complete': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% $.v1 and $.v2 %>'}] } }, False ), ( { 'on-complete': { 'publish': { 'branch': {'var1': 1234}, 'global': {'global_var1': 'val'}, 'atomic': {'atomic_var1': '<% my_func() %>'} }, 'next': [{'email': '<% * %>'}] } }, True ), ({'on-complete': {'next': [{'email': '<% $.v1 %>'}]}}, False), ({'on-complete': {'next': 'email'}}, False), ({'on-complete': {'next': ['email']}}, False), ({'on-complete': {'next': [{'email': 'email'}]}}, True), ({'on-complete': {'next': [{'email': 'email', 'more_email': 'more_email'}]}}, True), ({'on-complete': {'next': {'email': 'email'}}}, True) ] for transition, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test']['tasks'], {'get': transition}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_join(self): tests = [ ({'join': ''}, True), ({'join': None}, True), ({'join': 'all'}, False), ({'join': 'one'}, False), ({'join': 0}, False), ({'join': 2}, False), ({'join': 3}, True), ({'join': '3'}, True), ({'join': -3}, True) ] on_success = {'on-success': ['email']} for join, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'email': join}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_requires(self): tests = [ ({'requires': ''}, True), ({'requires': []}, True), ({'requires': ['']}, True), ({'requires': None}, True), ({'requires': 12345}, True), ({'requires': ['echo']}, False), ({'requires': ['echo', 'get']}, False), ({'requires': 'echo'}, False), ] for require, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test'], {'type': 'reverse'}) utils.merge_dicts(overlay['test']['tasks'], {'email': require}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_keep_result(self): tests = [ ({'keep-result': ''}, True), ({'keep-result': []}, True), ({'keep-result': 'asd'}, True), ({'keep-result': None}, True), ({'keep-result': 12345}, True), ({'keep-result': True}, False), ({'keep-result': False}, False), ({'keep-result': "<% 'a' in $.val %>"}, False), ({'keep-result': '<% 1 + 2 %>'}, False), ({'keep-result': '<% * %>'}, True), ({'keep-result': "{{ 'a' in _.val }}"}, False), ({'keep-result': '{{ 1 + 2 }}'}, False), ({'keep-result': '{{ * }}'}, True) ] for keep_result, expect_error in tests: overlay = {'test': {'tasks': {}}} utils.merge_dicts(overlay['test']['tasks'], {'email': keep_result}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_safe_rerurn(self): tests = [ ({'safe-rerun': True}, False), ({'safe-rerun': False}, False), ({'safe-rerun': '<% false %>'}, False), ({'safe-rerun': '<% true %>'}, False), ({'safe-rerun': '<% * %>'}, True), ({'safe-rerun': None}, True) ] for default, expect_error in tests: overlay = {'test': {'task-defaults': {}}} utils.merge_dicts(overlay['test']['task-defaults'], default) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/v2/test_workbook.py0000644000175000017500000003515500000000000024436 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import re import yaml from mistral import exceptions as exc from mistral.lang.v2 import workbook from mistral.tests.unit.lang.v2 import base class WorkbookSpecValidationTest(base.WorkbookSpecValidationTestCase): def test_build_valid_workbook_spec(self): wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') # Workbook. act_specs = wb_spec.get_actions() wf_specs = wb_spec.get_workflows() self.assertEqual('2.0', wb_spec.get_version()) self.assertEqual('my_workbook', wb_spec.get_name()) self.assertEqual('This is a test workbook', wb_spec.get_description()) self.assertListEqual(['test', 'v2'], wb_spec.get_tags()) self.assertIsNotNone(act_specs) self.assertIsNotNone(wf_specs) # Actions. action_spec = act_specs.get('action1') self.assertIsNotNone(action_spec) self.assertEqual('2.0', action_spec.get_version()) self.assertEqual('action1', action_spec.get_name()) self.assertEqual( 'This is a test ad-hoc action', action_spec.get_description() ) self.assertListEqual(['test', 'v2'], action_spec.get_tags()) self.assertEqual('std.echo', action_spec.get_base()) self.assertDictEqual( {'output': 'Hello <% $.name %>!'}, action_spec.get_base_input() ) self.assertDictEqual({}, action_spec.get_input()) self.assertEqual('<% $ %>', action_spec.get_output()) # Workflows. self.assertEqual(2, len(wf_specs)) wf1_spec = wf_specs.get('wf1') self.assertEqual('2.0', wf1_spec.get_version()) self.assertEqual('wf1', wf1_spec.get_name()) self.assertEqual( 'This is a test workflow', wf1_spec.get_description() ) self.assertListEqual(['test', 'v2'], wf1_spec.get_tags()) self.assertEqual('reverse', wf1_spec.get_type()) self.assertEqual(2, len(wf1_spec.get_tasks())) # Tasks. task1_spec = wf1_spec.get_tasks().get('task1') self.assertIsNotNone(task1_spec) self.assertEqual('2.0', task1_spec.get_version()) self.assertEqual('task1', task1_spec.get_name()) self.assertEqual('This is a test task', task1_spec.get_description()) self.assertEqual('action1', task1_spec.get_action_name()) self.assertEqual({'name': '<% $.name %>'}, task1_spec.get_input()) policies = task1_spec.get_policies() self.assertEqual(2, policies.get_wait_before()) self.assertEqual(5, policies.get_wait_after()) self.assertEqual(3, policies.get_concurrency()) retry_spec = policies.get_retry() self.assertEqual(10, retry_spec.get_count()) self.assertEqual(30, retry_spec.get_delay()) self.assertEqual('<% $.my_val = 10 %>', retry_spec.get_break_on()) task2_spec = wf1_spec.get_tasks().get('task2') self.assertIsNotNone(task2_spec) self.assertEqual('2.0', task2_spec.get_version()) self.assertEqual('task2', task2_spec.get_name()) self.assertEqual('std.echo', task2_spec.get_action_name()) self.assertIsNone(task2_spec.get_workflow_name()) self.assertEqual( {'output': 'Thanks <% $.name %>!'}, task2_spec.get_input() ) wf2_spec = wf_specs.get('wf2') self.assertEqual('2.0', wf2_spec.get_version()) self.assertEqual('wf2', wf2_spec.get_name()) self.assertListEqual(['test', 'v2'], wf2_spec.get_tags()) self.assertEqual('direct', wf2_spec.get_type()) self.assertEqual(11, len(wf2_spec.get_tasks())) task_defaults_spec = wf2_spec.get_task_defaults() self.assertListEqual( [('fail', '<% $.my_val = 0 %>', {})], task_defaults_spec.get_on_error().get_next() ) self.assertListEqual( [('pause', '', {})], task_defaults_spec.get_on_success().get_next() ) self.assertListEqual( [('succeed', '', {})], task_defaults_spec.get_on_complete().get_next() ) task3_spec = wf2_spec.get_tasks().get('task3') self.assertIsNotNone(task3_spec) self.assertEqual('2.0', task3_spec.get_version()) self.assertEqual('task3', task3_spec.get_name()) self.assertIsNone(task3_spec.get_action_name()) self.assertEqual('wf1', task3_spec.get_workflow_name()) self.assertEqual( { 'name': 'John Doe', 'age': 32, 'param1': None, 'param2': False }, task3_spec.get_input() ) self.assertListEqual( [('task4', '<% $.my_val = 1 %>', {})], task3_spec.get_on_error().get_next() ) self.assertListEqual( [('task5', '<% $.my_val = 2 %>', {})], task3_spec.get_on_success().get_next() ) self.assertListEqual( [('task6', '<% $.my_val = 3 %>', {})], task3_spec.get_on_complete().get_next() ) task7_spec = wf2_spec.get_tasks().get('task7') self.assertEqual( { 'is_true': True, 'object_list': [1, None, 'str'], 'is_string': '50' }, task7_spec.get_input() ) self.assertEqual( {'vm_info': '<% $.vms %>'}, task7_spec.get_with_items() ) task8_spec = wf2_spec.get_tasks().get('task8') self.assertEqual( { 'itemX': '<% $.arrayI %>', "itemY": '<% $.arrayJ %>' }, task8_spec.get_with_items() ) self.assertEqual( { 'expr_list': ['<% $.v %>', '<% $.k %>'], 'expr': '<% $.value %>', }, task8_spec.get_input() ) self.assertEqual('nova', task8_spec.get_target()) task9_spec = wf2_spec.get_tasks().get('task9') self.assertEqual('all', task9_spec.get_join()) task10_spec = wf2_spec.get_tasks().get('task10') self.assertEqual(2, task10_spec.get_join()) task11_spec = wf2_spec.get_tasks().get('task11') self.assertEqual('one', task11_spec.get_join()) task12_spec = wf2_spec.get_tasks().get('task12') self.assertDictEqual( { 'url': 'http://site.com?q=<% $.query %>', 'params': '' }, task12_spec.get_input() ) task13_spec = wf2_spec.get_tasks().get('task13') self.assertEqual('std.noop', task13_spec.get_action_name()) self.assertEqual('No-op task', task13_spec.get_description()) def test_adhoc_action_with_base_in_one_string(self): wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') act_specs = wb_spec.get_actions() action_spec = act_specs.get("action2") self.assertEqual('std.echo', action_spec.get_base()) self.assertEqual( {'output': 'Echo output'}, action_spec.get_base_input() ) def test_spec_to_dict(self): wb_spec = self._parse_dsl_spec(dsl_file='my_workbook.yaml') d = wb_spec.to_dict() self.assertEqual('2.0', d['version']) self.assertEqual('2.0', d['workflows']['version']) self.assertEqual('2.0', d['workflows']['wf1']['version']) def test_version_required(self): dsl_dict = copy.deepcopy(self._dsl_blank) dsl_dict.pop('version', None) # TODO(m4dcoder): Check required property error when v1 is deprecated. # The version property is not required for v1 workbook whereas it is # a required property in v2. For backward compatibility, if no version # is not provided, the workbook spec parser defaults to v1 and the # required property exception is not triggered. However, a different # spec validation error returns due to drastically different schema # between workbook versions. self.assertRaises( exc.DSLParsingException, self._spec_parser, yaml.safe_dump(dsl_dict) ) def test_version(self): tests = [ ({'version': None}, True), ({'version': ''}, True), ({'version': '1.0'}, True), ({'version': '2.0'}, False), ({'version': 2.0}, False), ({'version': 2}, False) ] for version, expect_error in tests: self._parse_dsl_spec(changes=version, expect_error=expect_error) def test_name_required(self): dsl_dict = copy.deepcopy(self._dsl_blank) dsl_dict.pop('name', None) exception = self.assertRaises( exc.DSLParsingException, self._spec_parser, yaml.safe_dump(dsl_dict) ) self.assertIn("'name' is a required property", str(exception)) def test_name(self): tests = [ ({'name': ''}, True), ({'name': None}, True), ({'name': 12345}, True), ({'name': 'foobar'}, False) ] for name, expect_error in tests: self._parse_dsl_spec(changes=name, expect_error=expect_error) def test_description(self): tests = [ ({'description': ''}, True), ({'description': None}, True), ({'description': 12345}, True), ({'description': 'This is a test workflow.'}, False) ] for description, expect_error in tests: self._parse_dsl_spec( changes=description, expect_error=expect_error ) def test_tags(self): tests = [ ({'tags': ''}, True), ({'tags': ['']}, True), ({'tags': None}, True), ({'tags': 12345}, True), ({'tags': ['foo', 'bar']}, False), ({'tags': ['foobar', 'foobar']}, True) ] for tags, expect_error in tests: self._parse_dsl_spec(changes=tags, expect_error=expect_error) def test_actions(self): actions = { 'version': '2.0', 'noop': { 'base': 'std.noop' }, 'echo': { 'base': 'std.echo' } } tests = [ ({'actions': []}, True), ({'actions': {}}, True), ({'actions': None}, True), ({'actions': {'version': None}}, True), ({'actions': {'version': ''}}, True), ({'actions': {'version': '1.0'}}, True), ({'actions': {'version': '2.0'}}, False), ({'actions': {'version': 2.0}}, False), ({'actions': {'version': 2}}, False), ({'actions': {'noop': actions['noop']}}, False), ({'actions': {'version': '2.0', 'noop': 'std.noop'}}, True), ({'actions': actions}, False) ] for adhoc_actions, expect_error in tests: self._parse_dsl_spec( changes=adhoc_actions, expect_error=expect_error ) def test_workflows(self): workflows = { 'version': '2.0', 'wf1': { 'tasks': { 'noop': { 'action': 'std.noop' } } }, 'wf2': { 'tasks': { 'echo': { 'action': 'std.echo output="This is a test."' } } } } tests = [ # ({'workflows': []}, True), # ({'workflows': {}}, True), # ({'workflows': None}, True), # ({'workflows': {'version': None}}, True), # ({'workflows': {'version': ''}}, True), # ({'workflows': {'version': '1.0'}}, True), # ({'workflows': {'version': '2.0'}}, False), # ({'workflows': {'version': 2.0}}, False), # ({'workflows': {'version': 2}}, False), # ({'workflows': {'wf1': workflows['wf1']}}, False), ({'workflows': {'version': '2.0', 'wf1': 'wf1'}}, True), ({'workflows': workflows}, False) ] for workflows, expect_error in tests: self._parse_dsl_spec(changes=workflows, expect_error=expect_error) def test_workflow_name_validation(self): wb_spec = self._parse_dsl_spec(dsl_file='workbook_schema_test.yaml') d = wb_spec.to_dict() self.assertEqual('2.0', d['version']) self.assertEqual('2.0', d['workflows']['version']) workflow_names = ['workflowversion', 'versionworkflow', 'workflowversionworkflow', 'version_workflow'] action_names = ['actionversion', 'versionaction', 'actionversionaction'] for name in workflow_names: self.assertEqual('2.0', d['workflows'][name]['version']) self.assertEqual(name, d['workflows'][name]['name']) for name in action_names: self.assertEqual('2.0', d['actions'][name]['version']) self.assertEqual(name, d['actions'][name]['name']) def test_name_regex(self): # We want to match a string containing version at any point. valid_names = ( "workflowversion", "versionworkflow", "workflowversionworkflow", "version_workflow", "version-workflow", ) for valid in valid_names: result = re.match(workbook.NON_VERSION_WORD_REGEX, valid) self.assertIsNotNone( result, "Expected match for: {}".format(valid) ) # ... except, we don't want to match a string that isn't just one word # or is exactly "version" invalid_names = ("version", "my workflow") for invalid in invalid_names: result = re.match(workbook.NON_VERSION_WORD_REGEX, invalid) self.assertIsNone( result, "Didn't expected match for: {}".format(invalid) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/lang/v2/test_workflows.py0000644000175000017500000004220500000000000024630 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import yaml from mistral import exceptions as exc from mistral.tests.unit.lang.v2 import base from mistral_lib import utils class WorkflowSpecValidationTest(base.WorkflowSpecValidationTestCase): def test_workflow_types(self): tests = [ ({'type': 'direct'}, False), ({'type': 'reverse'}, False), ({'type': 'circular'}, True), ({'type': None}, True) ] for wf_type, expect_error in tests: overlay = {'test': wf_type} self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_direct_workflow(self): overlay = {'test': {'type': 'direct', 'tasks': {}}} join = {'join': 'all'} on_success = {'on-success': ['email']} utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'email': join}) wfs_spec = self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=False ) self.assertEqual(1, len(wfs_spec.get_workflows())) self.assertEqual('test', wfs_spec.get_workflows()[0].get_name()) self.assertEqual('direct', wfs_spec.get_workflows()[0].get_type()) def test_direct_workflow_invalid_task(self): overlay = { 'test': { 'type': 'direct', 'tasks': {} } } requires = {'requires': ['echo', 'get']} utils.merge_dicts(overlay['test']['tasks'], {'email': requires}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=True ) def test_direct_workflow_no_start_tasks(self): overlay = { 'test': { 'type': 'direct', 'tasks': { 'task1': {'on-complete': 'task2'}, 'task2': {'on-complete': 'task1'} } } } self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=True ) def test_direct_workflow_invalid_join(self): tests = [ ({'task3': {'join': 2}}, False), ({'task3': {'join': 5}}, True), ({'task3': {'join': 1}}, False), ({'task3': {'join': 'one'}}, False), ({'task3': {'join': 'all'}}, False), ({'task4': {'join': 'all'}}, True), ({'task4': {'join': 1}}, True), ({'task4': {'join': 'one'}}, True) ] for test in tests: overlay = { 'test': { 'type': 'direct', 'tasks': { 'task1': {'on-complete': 'task3'}, 'task2': {'on-complete': 'task3'} } } } utils.merge_dicts(overlay['test']['tasks'], test[0]) self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=test[1] ) def test_reverse_workflow(self): overlay = {'test': {'type': 'reverse', 'tasks': {}}} require = {'requires': ['echo', 'get']} utils.merge_dicts(overlay['test']['tasks'], {'email': require}) wfs_spec = self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=False ) self.assertEqual(1, len(wfs_spec.get_workflows())) self.assertEqual('test', wfs_spec.get_workflows()[0].get_name()) self.assertEqual('reverse', wfs_spec.get_workflows()[0].get_type()) def test_reverse_workflow_invalid_task(self): overlay = {'test': {'type': 'reverse', 'tasks': {}}} join = {'join': 'all'} on_success = {'on-success': ['email']} utils.merge_dicts(overlay['test']['tasks'], {'get': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'echo': on_success}) utils.merge_dicts(overlay['test']['tasks'], {'email': join}) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=True ) def test_version_required(self): dsl_dict = copy.deepcopy(self._dsl_blank) dsl_dict.pop('version', None) exception = self.assertRaises( exc.DSLParsingException, self._spec_parser, yaml.safe_dump(dsl_dict) ) self.assertIn("'version' is a required property", str(exception)) def test_version(self): tests = [ ({'version': None}, True), ({'version': ''}, True), ({'version': '2.0'}, False), ({'version': 2.0}, False), ({'version': 2}, False) ] for version, expect_error in tests: self._parse_dsl_spec( add_tasks=True, changes=version, expect_error=expect_error ) def test_inputs(self): tests = [ ({'input': ['var1', 'var2']}, False), ({'input': ['var1', 'var1']}, True), ({'input': [12345]}, True), ({'input': [None]}, True), ({'input': ['']}, True), ({'input': None}, True), ({'input': []}, True), ({'input': ['var1', {'var2': 2}]}, False), ({'input': [{'var1': 1}, {'var2': 2}]}, False), ({'input': [{'var1': None}]}, False), ({'input': [{'var1': 1}, {'var1': 1}]}, True), ({'input': [{'var1': 1, 'var2': 2}]}, True) ] for wf_input, expect_error in tests: overlay = {'test': wf_input} self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_outputs(self): tests = [ ({'output': {'k1': 'a', 'k2': 1, 'k3': True, 'k4': None}}, False), ({'output': {'k1': '<% $.v1 %>'}}, False), ({'output': {'k1': '<% 1 + 2 %>'}}, False), ({'output': {'k1': '<% * %>'}}, True), ({'output': []}, True), ({'output': 'whatever'}, True), ({'output': None}, True), ({'output': {}}, True) ] for wf_output, expect_error in tests: overlay = {'test': wf_output} self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_vars(self): tests = [ ({'vars': {'v1': 'a', 'v2': 1, 'v3': True, 'v4': None}}, False), ({'vars': {'v1': '<% $.input_var1 %>'}}, False), ({'vars': {'v1': '<% 1 + 2 %>'}}, False), ({'vars': {'v1': '<% * %>'}}, True), ({'vars': {'v1': '{{ _.input_var1 }}'}}, False), ({'vars': {'v1': '{{ 1 + 2 }}'}}, False), ({'vars': {'v1': '{{ * }}'}}, True), ({'vars': []}, True), ({'vars': 'whatever'}, True), ({'vars': None}, True), ({'vars': {}}, True) ] for wf_vars, expect_error in tests: overlay = {'test': wf_vars} self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_tasks_required(self): exception = self._parse_dsl_spec( add_tasks=False, expect_error=True ) self.assertIn("'tasks' is a required property", str(exception)) def test_tasks(self): tests = [ ({'tasks': {}}, True), ({'tasks': None}, True), ({'tasks': self._dsl_tasks}, False) ] for wf_tasks, expect_error in tests: overlay = {'test': wf_tasks} self._parse_dsl_spec( add_tasks=False, changes=overlay, expect_error=expect_error ) def test_task_defaults(self): tests = [ ({'on-success': ['email']}, False), ({'on-success': [{'email': '<% 1 %>'}]}, False), ({'on-success': [{'email': '<% 1 %>'}, 'echo']}, False), ({'on-success': [{'email': '<% $.v1 in $.v2 %>'}]}, False), ({'on-success': [{'email': '<% * %>'}]}, True), ({'on-success': [{'email': '{{ 1 }}'}]}, False), ({'on-success': [{'email': '{{ 1 }}'}, 'echo']}, False), ({'on-success': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), ({'on-success': [{'email': '{{ * }}'}]}, True), ({'on-success': 'email'}, False), ({'on-success': None}, True), ({'on-success': ['']}, True), ({'on-success': []}, True), ({'on-success': ['email', 'email']}, True), ({'on-success': ['email', 12345]}, True), ({'on-error': ['email']}, False), ({'on-error': [{'email': '<% 1 %>'}]}, False), ({'on-error': [{'email': '<% 1 %>'}, 'echo']}, False), ({'on-error': [{'email': '<% $.v1 in $.v2 %>'}]}, False), ({'on-error': [{'email': '<% * %>'}]}, True), ({'on-error': [{'email': '{{ 1 }}'}]}, False), ({'on-error': [{'email': '{{ 1 }}'}, 'echo']}, False), ({'on-error': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), ({'on-error': [{'email': '{{ * }}'}]}, True), ({'on-error': 'email'}, False), ({'on-error': None}, True), ({'on-error': ['']}, True), ({'on-error': []}, True), ({'on-error': ['email', 'email']}, True), ({'on-error': ['email', 12345]}, True), ({'on-complete': ['email']}, False), ({'on-complete': [{'email': '<% 1 %>'}]}, False), ({'on-complete': [{'email': '<% 1 %>'}, 'echo']}, False), ({'on-complete': [{'email': '<% $.v1 in $.v2 %>'}]}, False), ({'on-complete': [{'email': '<% * %>'}]}, True), ({'on-complete': [{'email': '{{ 1 }}'}]}, False), ({'on-complete': [{'email': '{{ 1 }}'}, 'echo']}, False), ({'on-complete': [{'email': '{{ _.v1 in _.v2 }}'}]}, False), ({'on-complete': [{'email': '{{ * }}'}]}, True), ({'on-complete': 'email'}, False), ({'on-complete': None}, True), ({'on-complete': ['']}, True), ({'on-complete': []}, True), ({'on-complete': ['email', 'email']}, True), ({'on-complete': ['email', 12345]}, True), ({'requires': ''}, True), ({'requires': []}, True), ({'requires': ['']}, True), ({'requires': None}, True), ({'requires': 12345}, True), ({'requires': ['echo']}, False), ({'requires': ['echo', 'get']}, False), ({'requires': 'echo'}, False), ({'retry': {'count': 3, 'delay': 1}}, False), ({'retry': {'count': '<% 3 %>', 'delay': 1}}, False), ({'retry': {'count': '<% * %>', 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': '<% 1 %>'}}, False), ({'retry': {'count': 3, 'delay': '<% * %>'}}, True), ({'retry': {'count': '{{ 3 }}', 'delay': 1}}, False), ({'retry': {'count': '{{ * }}', 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': '{{ 1 }}'}}, False), ({'retry': {'count': 3, 'delay': '{{ * }}'}}, True), ({'retry': {'count': -3, 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': -1}}, True), ({'retry': {'count': '3', 'delay': 1}}, True), ({'retry': {'count': 3, 'delay': '1'}}, True), ({'retry': None}, True), ({'wait-before': 1}, False), ({'wait-before': '<% 1 %>'}, False), ({'wait-before': '<% * %>'}, True), ({'wait-before': '{{ 1 }}'}, False), ({'wait-before': '{{ * }}'}, True), ({'wait-before': -1}, True), ({'wait-before': 1.1}, True), ({'wait-before': '1'}, True), ({'wait-after': 1}, False), ({'wait-after': '<% 1 %>'}, False), ({'wait-after': '<% * %>'}, True), ({'wait-after': '{{ 1 }}'}, False), ({'wait-after': '{{ * }}'}, True), ({'wait-after': -1}, True), ({'wait-after': 1.1}, True), ({'wait-after': '1'}, True), ({'timeout': 300}, False), ({'timeout': '<% 300 %>'}, False), ({'timeout': '<% * %>'}, True), ({'timeout': '{{ 300 }}'}, False), ({'timeout': '{{ * }}'}, True), ({'timeout': -300}, True), ({'timeout': 300.1}, True), ({'timeout': '300'}, True), ({'pause-before': False}, False), ({'pause-before': '<% False %>'}, False), ({'pause-before': '<% * %>'}, True), ({'pause-before': '{{ False }}'}, False), ({'pause-before': '{{ * }}'}, True), ({'pause-before': 'False'}, True), ({'concurrency': 10}, False), ({'concurrency': '<% 10 %>'}, False), ({'concurrency': '<% * %>'}, True), ({'concurrency': '{{ 10 }}'}, False), ({'concurrency': '{{ * }}'}, True), ({'concurrency': -10}, True), ({'concurrency': 10.1}, True), ({'concurrency': '10'}, True), ({'safe-rerun': True}, False), ({'safe-rerun': False}, False), ({'safe-rerun': '<% false %>'}, False), ({'safe-rerun': '<% true %>'}, False), ({'safe-rerun': '<% * %>'}, True), ({'safe-rerun': None}, True) ] for default, expect_error in tests: overlay = {'test': {'task-defaults': {}}} utils.merge_dicts(overlay['test']['task-defaults'], default) self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) def test_invalid_item(self): overlay = {'name': 'invalid'} exception = self._parse_dsl_spec(changes=overlay, expect_error=True) self.assertIn("Invalid DSL", str(exception)) def test_invalid_name(self): invalid_wf = { 'version': '2.0', 'b98180ba-48a0-4e26-ab2e-50dc224f6fd1': { 'type': 'direct', 'tasks': {'t1': {'action': 'std.noop'}} } } dsl_yaml = yaml.safe_dump(invalid_wf, default_flow_style=False) exception = self.assertRaises( exc.InvalidModelException, self._spec_parser, dsl_yaml ) self.assertIn( "Workflow name cannot be in the format of UUID", str(exception) ) def test_task_name(self): wf = { 'version': '2.0', 'workflowname': { 'type': 'direct', 'tasks': {'task1': {'action': 'std.noop', 'on-success': ['t2-task']}, 't2-task': {'action': 'std.noop', 'on-success': ['t3.task']}, 't3.task': {'action': 'std.noop', 'on-success': ['t4,task']}, 't4,task': {'action': 'std.noop', 'on-success': ['t5+task']}, 't5+task': {'action': 'std.noop', 'on-success': ['t6$task']}, 't6$task': {'action': 'std.noop', 'on-success': ['t7%task']}, 't7%task': {'action': 'std.noop', 'on-success': ['t8^task']}, 't8^task': {'action': 'std.noop', }}}} dsl_yaml = yaml.safe_dump(wf, default_flow_style=False) self._spec_parser(dsl_yaml) def test_tags(self): tests = [ ({'tags': ''}, True), ({'tags': []}, True), ({'tags': ['']}, True), ({'tags': ['tag']}, False), ({'tags': ['tag', 'tag']}, True), ({'tags': None}, True) ] for wf_tags, expect_error in tests: overlay = {'test': wf_tags} self._parse_dsl_spec( add_tasks=True, changes=overlay, expect_error=expect_error ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.149568 mistral-10.0.0.0b3/mistral/tests/unit/mstrlfixtures/0000755000175000017500000000000000000000000022642 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/mstrlfixtures/__init__.py0000644000175000017500000000000000000000000024741 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/mstrlfixtures/hacking.py0000644000175000017500000000253300000000000024623 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morganfainberg) This file shouldn't have flake8 run on it as it has # code examples that will fail normal CI pep8/flake8 tests. This is expected. # The code has been moved here to ensure that proper tests occur on the # hacking/test_checks test cases. # flake8: noqa import fixtures class HackingLogging(fixtures.Fixture): shared_imports = """ import logging from oslo_log import log from oslo_log import log as logging """ assert_not_using_deprecated_warn = { 'code': """ # Logger.warn has been deprecated in Python3 in favor of # Logger.warning LOG = log.getLogger(__name__) LOG.warn('text') """, 'expected_errors': [ (8, 9, 'M001'), ], } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/mstrlfixtures/policy_fixtures.py0000644000175000017500000000317600000000000026453 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from mistral.api import access_control as acl from mistral import policies from oslo_config import cfg from oslo_policy import opts as policy_opts from oslo_policy import policy as oslo_policy class PolicyFixture(fixtures.Fixture): def setUp(self): super(PolicyFixture, self).setUp() policy_opts.set_defaults(cfg.CONF) acl._ENFORCER = oslo_policy.Enforcer(cfg.CONF) acl._ENFORCER.register_defaults(policies.list_rules()) acl._ENFORCER.load_rules() self.addCleanup(acl._ENFORCER.clear) def register_rules(self, rules): enf = acl._ENFORCER for rule_name, rule_check_str in rules.items(): enf.register_default(oslo_policy.RuleDefault(rule_name, rule_check_str)) def change_policy_definition(self, rules): enf = acl._ENFORCER for rule_name, rule_check_str in rules.items(): enf.rules[rule_name] = oslo_policy.RuleDefault( rule_name, rule_check_str).check ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.149568 mistral-10.0.0.0b3/mistral/tests/unit/notifiers/0000755000175000017500000000000000000000000021711 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/notifiers/__init__.py0000644000175000017500000000000000000000000024010 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/notifiers/base.py0000644000175000017500000000374100000000000023202 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from mistral.tests.unit.engine import base as engine_test_base LOG = logging.getLogger(__name__) class NotifierTestCase(engine_test_base.EngineTestCase): def await_workflow_success(self, wf_ex_id, post_delay=1): # Override the original wait method to add a delay to allow enough # time for the notification events to get processed. super(NotifierTestCase, self).await_workflow_success(wf_ex_id) self._sleep(post_delay) def await_workflow_error(self, wf_ex_id, post_delay=1): # Override the original wait method to add a delay to allow enough # time for the notification events to get processed. super(NotifierTestCase, self).await_workflow_error(wf_ex_id) self._sleep(post_delay) def await_workflow_paused(self, wf_ex_id, post_delay=1): # Override the original wait method to add a delay to allow enough # time for the notification events to get processed. super(NotifierTestCase, self).await_workflow_paused(wf_ex_id) self._sleep(post_delay) def await_workflow_cancelled(self, wf_ex_id, post_delay=1): # Override the original wait method to add a delay to allow enough # time for the notification events to get processed. super(NotifierTestCase, self).await_workflow_cancelled(wf_ex_id) self._sleep(post_delay) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/notifiers/test_notifier_servers.py0000644000175000017500000001540200000000000026714 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from stevedore import exception as sd_exc from mistral import context from mistral.db.v2 import api as db_api from mistral.notifiers import base as notif from mistral.notifiers import default_notifier as d_notif from mistral.notifiers import notification_events as events from mistral.notifiers import remote_notifier as r_notif from mistral.services import workflows as wf_svc from mistral.tests.unit.notifiers import base from mistral.workflow import states # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') EVENT_LOGS = [] def publisher_process(ctx, ex_id, data, event, timestamp, **kwargs): if not isinstance(ctx, context.MistralContext): raise TypeError('ctx is not type of MistralContext.') EVENT_LOGS.append((ex_id, event)) def notifier_process(ex_id, data, event, timestamp, publishers): EVENT_LOGS.append((ex_id, event)) class ServerPluginTest(base.NotifierTestCase): def tearDown(self): notif.cleanup() super(ServerPluginTest, self).tearDown() def test_get_bad_notifier(self): self.assertRaises(sd_exc.NoMatches, notif.get_notifier, 'foobar') @mock.patch.object( r_notif.RemoteNotifier, 'notify', mock.MagicMock(return_value=None) ) class LocalNotifServerTest(base.NotifierTestCase): @classmethod def setUpClass(cls): super(LocalNotifServerTest, cls).setUpClass() cfg.CONF.set_default('type', 'local', group='notifier') @classmethod def tearDownClass(cls): cfg.CONF.set_default('type', 'remote', group='notifier') super(LocalNotifServerTest, cls).tearDownClass() def setUp(self): super(LocalNotifServerTest, self).setUp() self.publisher = notif.get_notification_publisher('webhook') self.publisher.publish = mock.MagicMock(side_effect=publisher_process) self.publisher.publish.reset_mock() del EVENT_LOGS[:] def tearDown(self): notif.cleanup() super(LocalNotifServerTest, self).tearDown() def test_get_notifier(self): notifier = notif.get_notifier(cfg.CONF.notifier.type) self.assertEqual('local', cfg.CONF.notifier.type) self.assertIsInstance(notifier, d_notif.DefaultNotifier) def test_notify(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notif_options = [{'type': 'webhook'}] wf_ex = self.engine.start_workflow( 'wf', '', wf_input={}, notify=notif_options ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertFalse(r_notif.RemoteNotifier.notify.called) self.assertListEqual(expected_order, EVENT_LOGS) @mock.patch.object( r_notif.RemoteNotifier, 'notify', mock.MagicMock(side_effect=notifier_process) ) class RemoteNotifServerTest(base.NotifierTestCase): @classmethod def setUpClass(cls): super(RemoteNotifServerTest, cls).setUpClass() cfg.CONF.set_default('type', 'remote', group='notifier') def setUp(self): super(RemoteNotifServerTest, self).setUp() del EVENT_LOGS[:] def tearDown(self): notif.cleanup() super(RemoteNotifServerTest, self).tearDown() def test_get_notifier(self): notifier = notif.get_notifier(cfg.CONF.notifier.type) self.assertEqual('remote', cfg.CONF.notifier.type) self.assertIsInstance(notifier, r_notif.RemoteNotifier) def test_notify(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notif_options = [{'type': 'foobar'}] wf_ex = self.engine.start_workflow( 'wf', '', wf_input={}, notify=notif_options ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(r_notif.RemoteNotifier.notify.called) self.assertListEqual(expected_order, EVENT_LOGS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/notifiers/test_notify.py0000644000175000017500000012545100000000000024642 0ustar00coreycorey00000000000000# Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import mock from oslo_config import cfg from mistral import context from mistral.db.v2 import api as db_api from mistral.notifiers import base as notif from mistral.notifiers import notification_events as events from mistral.services import workbooks as wb_svc from mistral.services import workflows as wf_svc from mistral.tests.unit.notifiers import base from mistral.workflow import states from mistral_lib import actions as ml_actions # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') EVENT_LOGS = [] def log_event(ctx, ex_id, data, event, timestamp, **kwargs): if not isinstance(ctx, context.MistralContext): raise TypeError('ctx is not type of MistralContext.') EVENT_LOGS.append((ex_id, event)) class NotifyEventsTest(base.NotifierTestCase): def setUp(self): super(NotifyEventsTest, self).setUp() self.publishers = { 'wbhk': notif.get_notification_publisher('webhook'), 'noop': notif.get_notification_publisher('noop') } self.publishers['wbhk'].publish = mock.MagicMock(side_effect=log_event) self.publishers['wbhk'].publish.reset_mock() self.publishers['noop'].publish = mock.MagicMock(side_effect=log_event) self.publishers['noop'].publish.reset_mock() del EVENT_LOGS[:] cfg.CONF.set_default('type', 'local', group='notifier') def tearDown(self): cfg.CONF.set_default('notify', None, group='notifier') super(NotifyEventsTest, self).tearDown() def test_notify_all_explicit(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [ { 'type': 'webhook', 'event_types': events.EVENTS } ] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) self.assertTrue(self.publishers['wbhk'].publish.called) self.assertEqual(6, len(EVENT_LOGS)) self.assertIn((wf_ex.id, events.WORKFLOW_LAUNCHED), EVENT_LOGS) self.assertIn((t1_ex.id, events.TASK_LAUNCHED), EVENT_LOGS) self.assertIn((t1_ex.id, events.TASK_SUCCEEDED), EVENT_LOGS) self.assertIn((t2_ex.id, events.TASK_LAUNCHED), EVENT_LOGS) self.assertIn((t2_ex.id, events.TASK_SUCCEEDED), EVENT_LOGS) self.assertIn((wf_ex.id, events.WORKFLOW_SUCCEEDED), EVENT_LOGS) def test_notify_all_implicit(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) self.assertTrue(self.publishers['wbhk'].publish.called) self.assertEqual(6, len(EVENT_LOGS)) self.assertIn((wf_ex.id, events.WORKFLOW_LAUNCHED), EVENT_LOGS) self.assertIn((t1_ex.id, events.TASK_LAUNCHED), EVENT_LOGS) self.assertIn((t1_ex.id, events.TASK_SUCCEEDED), EVENT_LOGS) self.assertIn((t2_ex.id, events.TASK_LAUNCHED), EVENT_LOGS) self.assertIn((t2_ex.id, events.TASK_SUCCEEDED), EVENT_LOGS) self.assertIn((wf_ex.id, events.WORKFLOW_SUCCEEDED), EVENT_LOGS) def test_notify_order(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [ {'type': 'webhook'} ] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_with_event_filter(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [ { 'type': 'webhook', 'event_types': [ events.WORKFLOW_LAUNCHED, events.WORKFLOW_SUCCEEDED ] } ] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) self.assertTrue(self.publishers['wbhk'].publish.called) self.assertEqual(2, len(EVENT_LOGS)) self.assertIn((wf_ex.id, events.WORKFLOW_LAUNCHED), EVENT_LOGS) self.assertIn((wf_ex.id, events.WORKFLOW_SUCCEEDED), EVENT_LOGS) def test_notify_multiple(self): self.assertFalse(self.publishers['wbhk'].publish.called) self.assertFalse(self.publishers['noop'].publish.called) wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [ {'type': 'webhook'}, {'type': 'noop'} ] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertTrue(self.publishers['noop'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_from_cfg(self): self.assertFalse(self.publishers['wbhk'].publish.called) self.assertFalse(self.publishers['noop'].publish.called) wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [ {'type': 'webhook'}, {'type': 'noop'} ] cfg.CONF.set_default( 'notify', json.dumps(notify_options), group='notifier' ) wf_ex = self.engine.start_workflow('wf', '') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertTrue(self.publishers['noop'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_from_cfg_and_params(self): self.assertFalse(self.publishers['wbhk'].publish.called) self.assertFalse(self.publishers['noop'].publish.called) wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) cfg.CONF.set_default( 'notify', json.dumps([{'type': 'noop'}]), group='notifier' ) params = {'notify': [{'type': 'webhook'}]} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertTrue(self.publishers['noop'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_workbook_notify(self): wb_def = """ version: '2.0' name: wb workflows: wf1: tasks: t1: workflow: wf2 on-success: - t2 t2: action: std.noop wf2: tasks: t1: action: std.noop """ wb_svc.create_workbook_v2(wb_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf1_ex = self.engine.start_workflow('wb.wf1', '', **params) self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_exs = wf1_ex.task_executions wf1_t1_ex = self._assert_single_item(wf1_task_exs, name='t1') wf1_t2_ex = self._assert_single_item(wf1_task_exs, name='t2') wf1_t1_act_exs = db_api.get_workflow_executions( task_execution_id=wf1_t1_ex.id ) wf2_ex = wf1_t1_act_exs[0] wf2_task_exs = wf2_ex.task_executions wf2_t1_ex = self._assert_single_item(wf2_task_exs, name='t1') self.assertEqual(states.SUCCESS, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) self.assertEqual(2, len(wf1_task_exs)) self.assertEqual(states.SUCCESS, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) self.assertEqual(states.SUCCESS, wf1_t2_ex.state) self.assertIsNone(wf1_t2_ex.state_info) self.assertEqual(1, len(wf1_t1_act_exs)) self.assertEqual(states.SUCCESS, wf2_ex.state) self.assertIsNone(wf2_ex.state_info) self.assertEqual(1, len(wf2_task_exs)) self.assertEqual(states.SUCCESS, wf2_t1_ex.state) self.assertIsNone(wf2_t1_ex.state_info) expected_order = [ (wf1_ex.id, events.WORKFLOW_LAUNCHED), (wf1_t1_ex.id, events.TASK_LAUNCHED), (wf2_ex.id, events.WORKFLOW_LAUNCHED), (wf2_t1_ex.id, events.TASK_LAUNCHED), (wf2_t1_ex.id, events.TASK_SUCCEEDED), (wf2_ex.id, events.WORKFLOW_SUCCEEDED), (wf1_t1_ex.id, events.TASK_SUCCEEDED), (wf1_t2_ex.id, events.TASK_LAUNCHED), (wf1_t2_ex.id, events.TASK_SUCCEEDED), (wf1_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_task_error(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.fail """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.ERROR, t2_ex.state) self.assertIsNotNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_FAILED), (wf_ex.id, events.WORKFLOW_FAILED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_task_transition_fail(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-complete: - fail """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(1, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_FAILED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_with_items_task(self): wf_def = """ version: '2.0' wf: tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) self._sleep(1) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_pause_resume(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Pause the workflow. self.engine.pause_workflow(wf_ex.id) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) # Workflow is paused but the task is still running as expected. self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_PAUSED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Complete action execution of task 1. self.engine.on_action_complete( t1_act_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(1, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_PAUSED), (t1_ex.id, events.TASK_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Resume the workflow. self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_PAUSED), (t1_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_RESUMED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_pause_resume_task(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Pause the action execution of task 1. self.engine.on_action_update(t1_act_exs[0].id, states.PAUSED) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.PAUSED, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.PAUSED, t1_act_exs[0].state) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_PAUSED), (wf_ex.id, events.WORKFLOW_PAUSED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Resume the action execution of task 1. self.engine.on_action_update(t1_act_exs[0].id, states.RUNNING) self.await_task_running(t1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Complete action execution of task 1. self.engine.on_action_complete( t1_act_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) # Wait for the workflow execution to complete. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) # TASK_RESUMED comes before WORKFLOW_RESUMED because # this test resumed the workflow with on_action_update. expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_PAUSED), (wf_ex.id, events.WORKFLOW_PAUSED), (t1_ex.id, events.TASK_RESUMED), (wf_ex.id, events.WORKFLOW_RESUMED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_cancel(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Cancel the workflow. self.engine.stop_workflow(wf_ex.id, states.CANCELLED) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) # Workflow is cancelled but the task is still running as expected. self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_CANCELLED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Complete action execution of task 1. self.engine.on_action_complete( t1_act_exs[0].id, ml_actions.Result(data={'result': 'foobar'}) ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.CANCELLED, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(1, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_CANCELLED), (t1_ex.id, events.TASK_SUCCEEDED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_cancel_task(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Cancel the action execution of task 1. self.engine.on_action_update(t1_act_exs[0].id, states.CANCELLED) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.CANCELLED, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.CANCELLED, t1_act_exs[0].state) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_CANCELLED), (wf_ex.id, events.WORKFLOW_CANCELLED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) def test_notify_task_input_error(self): wf_def = """--- version: '2.0' wf: tasks: task1: input: url: <% $.ItWillBeError %> action: std.http on-error: task2 task2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(1, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='task1') self.assertEqual(states.ERROR, t1_ex.state) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_FAILED), (wf_ex.id, events.WORKFLOW_FAILED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) @mock.patch('mistral.actions.std_actions.NoOpAction.run', mock.MagicMock( side_effect=[Exception(), None, None])) def test_notify_rerun_task(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') self.assertEqual(states.ERROR, t1_ex.state) self.assertEqual(1, len(task_exs)) self.engine.rerun_workflow(t1_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertEqual(2, len(task_exs)) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_FAILED), (wf_ex.id, events.WORKFLOW_FAILED), # rerun (wf_ex.id, events.WORKFLOW_RERUN), (t1_ex.id, events.TASK_RERUN), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED), ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) @mock.patch('mistral.actions.std_actions.NoOpAction.run', mock.MagicMock( side_effect=[Exception(), None, None, None])) def test_notify_rerun_nested_workflow(self): wf_def = """ wf_1: tasks: wf_1_t1: workflow: wf_2 on-success: - wf_1_t2 wf_1_t2: action: std.noop version: '2.0' wf_2: tasks: wf_2_t1: action: std.noop on-success: - wf_2_t2 wf_2_t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_1_ex = self.engine.start_workflow('wf_1', '', **params) self.await_workflow_error(wf_1_ex.id) with db_api.transaction(): wf_exs = db_api.get_workflow_executions() self._assert_single_item(wf_exs, name='wf_1', state=states.ERROR) self._assert_single_item(wf_exs, name='wf_2', state=states.ERROR) task_exs = db_api.get_task_executions() self._assert_single_item(task_exs, name='wf_1_t1', state=states.ERROR) wf_2_t1 = self._assert_single_item(task_exs, name='wf_2_t1', state=states.ERROR) self.assertEqual(2, len(task_exs)) self.assertEqual(2, len(wf_exs)) self.engine.rerun_workflow(wf_2_t1.id) self.await_workflow_success(wf_1_ex.id) with db_api.transaction(): wf_exs = db_api.get_workflow_executions() wf_1_ex = self._assert_single_item(wf_exs, name='wf_1', state=states.SUCCESS) wf_2_ex = self._assert_single_item(wf_exs, name='wf_2', state=states.SUCCESS) task_wf_1_exs = wf_1_ex.task_executions wf_1_t1 = self._assert_single_item(task_wf_1_exs, name='wf_1_t1', state=states.SUCCESS) wf_1_t2 = self._assert_single_item(task_wf_1_exs, name='wf_1_t2', state=states.SUCCESS) task_wf_2_exs = wf_2_ex.task_executions wf_2_t1 = self._assert_single_item(task_wf_2_exs, name='wf_2_t1', state=states.SUCCESS) wf_2_t2 = self._assert_single_item(task_wf_2_exs, name='wf_2_t2', state=states.SUCCESS) self.assertEqual(2, len(task_wf_1_exs)) self.assertEqual(2, len(task_wf_2_exs)) self.assertEqual(2, len(wf_exs)) expected_order = [ (wf_1_ex.id, events.WORKFLOW_LAUNCHED), (wf_1_t1.id, events.TASK_LAUNCHED), (wf_2_ex.id, events.WORKFLOW_LAUNCHED), (wf_2_t1.id, events.TASK_LAUNCHED), (wf_2_t1.id, events.TASK_FAILED), (wf_2_ex.id, events.WORKFLOW_FAILED), (wf_1_t1.id, events.TASK_FAILED), (wf_1_ex.id, events.WORKFLOW_FAILED), # rerun (wf_2_ex.id, events.WORKFLOW_RERUN), (wf_1_ex.id, events.WORKFLOW_RERUN), (wf_1_t1.id, events.TASK_RERUN), (wf_2_t1.id, events.TASK_RERUN), (wf_2_t1.id, events.TASK_SUCCEEDED), (wf_2_t2.id, events.TASK_LAUNCHED), (wf_2_t2.id, events.TASK_SUCCEEDED), (wf_2_ex.id, events.WORKFLOW_SUCCEEDED), (wf_1_t1.id, events.TASK_SUCCEEDED), (wf_1_t2.id, events.TASK_LAUNCHED), (wf_1_t2.id, events.TASK_SUCCEEDED), (wf_1_ex.id, events.WORKFLOW_SUCCEEDED), ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.153568 mistral-10.0.0.0b3/mistral/tests/unit/policies/0000755000175000017500000000000000000000000021516 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/policies/__init__.py0000644000175000017500000000000000000000000023615 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/policies/test_actions.py0000644000175000017500000001734200000000000024576 0ustar00coreycorey00000000000000# Copyright 2018 OVH SAS. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral.tests.unit.api import base from mistral.tests.unit.mstrlfixtures import policy_fixtures MOCK_DELETE = mock.MagicMock(return_value=None) ACTION_DEFINITION = """ --- version: '2.0' my_action: description: My super cool action. tags: ['test', 'v2'] base: std.echo base-input: output: "{$.str1}{$.str2}" """ ACTION_DB = models.ActionDefinition( id='123e4567-e89b-12d3-a456-426655440000', name='my_action', is_system=False, description='My super cool action.', tags=['test', 'v2'], definition=ACTION_DEFINITION ) MOCK_ACTION = mock.MagicMock(return_value=ACTION_DB) class TestActionPolicy(base.APITest): """Test action related policies Policies to test: - actions:create - actions:delete - actions:get - actions:list - actions:publicize (on POST & PUT) - actions:update """ def setUp(self): self.policy = self.useFixture(policy_fixtures.PolicyFixture()) super(TestActionPolicy, self).setUp() @mock.patch.object(db_api, "create_action_definition") def test_action_create_not_allowed(self, mock_obj): self.policy.change_policy_definition( {"actions:create": "role:FAKE"} ) resp = self.app.post( '/v2/actions', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "create_action_definition") def test_action_create_allowed(self, mock_obj): self.policy.change_policy_definition( {"actions:create": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.post( '/v2/actions', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(201, resp.status_int) @mock.patch.object(db_api, "create_action_definition") def test_action_create_public_not_allowed(self, mock_obj): self.policy.change_policy_definition({ "actions:create": "role:FAKE or rule:admin_or_owner", "actions:publicize": "role:FAKE" }) resp = self.app.post( '/v2/actions?scope=public', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "create_action_definition") def test_action_create_public_allowed(self, mock_obj): self.policy.change_policy_definition({ "actions:create": "role:FAKE or rule:admin_or_owner", "actions:publicize": "role:FAKE or rule:admin_or_owner" }) resp = self.app.post( '/v2/actions?scope=public', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(201, resp.status_int) @mock.patch.object(db_api, "delete_action_definition", MOCK_DELETE) @mock.patch.object(db_api, "get_action_definition", MOCK_ACTION) def test_action_delete_not_allowed(self): self.policy.change_policy_definition( {"actions:delete": "role:FAKE"} ) resp = self.app.delete( '/v2/actions/123', expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "delete_action_definition", MOCK_DELETE) @mock.patch.object(db_api, "get_action_definition", MOCK_ACTION) def test_action_delete_allowed(self): self.policy.change_policy_definition( {"actions:delete": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.delete( '/v2/actions/123', expect_errors=True ) self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, "get_action_definition", MOCK_ACTION) def test_action_get_not_allowed(self): self.policy.change_policy_definition( {"actions:get": "role:FAKE"} ) resp = self.app.get( '/v2/actions/123', expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "get_action_definition", MOCK_ACTION) def test_action_get_allowed(self): self.policy.change_policy_definition( {"actions:get": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.get( '/v2/actions/123', expect_errors=True ) self.assertEqual(200, resp.status_int) def test_action_list_not_allowed(self): self.policy.change_policy_definition( {"actions:list": "role:FAKE"} ) resp = self.app.get( '/v2/actions', expect_errors=True ) self.assertEqual(403, resp.status_int) def test_action_list_allowed(self): self.policy.change_policy_definition( {"actions:list": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.get( '/v2/actions', expect_errors=True ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, "update_action_definition") def test_action_update_not_allowed(self, mock_obj): self.policy.change_policy_definition( {"actions:update": "role:FAKE"} ) resp = self.app.put( '/v2/actions', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "update_action_definition") def test_action_update_allowed(self, mock_obj): self.policy.change_policy_definition( {"actions:update": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.put( '/v2/actions', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, "update_action_definition") def test_action_update_public_not_allowed(self, mock_obj): self.policy.change_policy_definition({ "actions:update": "role:FAKE or rule:admin_or_owner", "actions:publicize": "role:FAKE" }) resp = self.app.put( '/v2/actions?scope=public', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "update_action_definition") def test_action_update_public_allowed(self, mock_obj): self.policy.change_policy_definition({ "actions:update": "role:FAKE or rule:admin_or_owner", "actions:publicize": "role:FAKE or rule:admin_or_owner" }) resp = self.app.put( '/v2/actions?scope=public', ACTION_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/policies/test_workflows.py0000644000175000017500000002210500000000000025164 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # Copyright 2018 OVH SAS. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral.tests.unit.api import base from mistral.tests.unit.mstrlfixtures import policy_fixtures MOCK_DELETE = mock.MagicMock(return_value=None) WF_DEFINITION = """ --- version: '2.0' flow: type: direct input: - param1 tasks: task1: action: std.echo output="Hi" """ WF_DB = models.WorkflowDefinition( id='123e4567-e89b-12d3-a456-426655440000', name='flow', definition=WF_DEFINITION, created_at=datetime.datetime(1970, 1, 1), updated_at=datetime.datetime(1970, 1, 1), spec={'input': ['param1']} ) MOCK_WF = mock.MagicMock(return_value=WF_DB) class TestWorkflowPolicy(base.APITest): """Test workflow related policies Policies to test: - workflows:create - workflows:delete - workflows:get - workflows:list - workflows:list:all_projects - workflows:publicize (on POST & PUT) - workflows:update """ def setUp(self): self.policy = self.useFixture(policy_fixtures.PolicyFixture()) super(TestWorkflowPolicy, self).setUp() @mock.patch.object(db_api, "create_workflow_definition") def test_workflow_create_not_allowed(self, mock_obj): self.policy.change_policy_definition( {"workflows:create": "role:FAKE"} ) resp = self.app.post( '/v2/workflows', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "create_workflow_definition") def test_workflow_create_allowed(self, mock_obj): spec_mock = mock_obj.return_value.get.return_value spec_mock.get.return_value = {} self.policy.change_policy_definition( {"workflows:create": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.post( '/v2/workflows', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(201, resp.status_int) @mock.patch.object(db_api, "create_workflow_definition") def test_workflow_create_public_not_allowed(self, mock_obj): self.policy.change_policy_definition({ "workflows:create": "role:FAKE or rule:admin_or_owner", "workflows:publicize": "role:FAKE" }) resp = self.app.post( '/v2/workflows?scope=public', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "create_workflow_definition") def test_workflow_create_public_allowed(self, mock_obj): spec_mock = mock_obj.return_value.get.return_value spec_mock.get.return_value = {} self.policy.change_policy_definition({ "workflows:create": "role:FAKE or rule:admin_or_owner", "workflows:publicize": "role:FAKE or rule:admin_or_owner" }) resp = self.app.post( '/v2/workflows?scope=public', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(201, resp.status_int) @mock.patch.object(db_api, "delete_workflow_definition", MOCK_DELETE) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) def test_workflow_delete_not_allowed(self): self.policy.change_policy_definition( {"workflows:delete": "role:FAKE"} ) resp = self.app.delete( '/v2/workflows/123', expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "delete_workflow_definition", MOCK_DELETE) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) def test_workflow_delete_allowed(self): self.policy.change_policy_definition( {"workflows:delete": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.delete( '/v2/workflows/123', expect_errors=True ) self.assertEqual(204, resp.status_int) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) def test_workflow_get_not_allowed(self): self.policy.change_policy_definition( {"workflows:get": "role:FAKE"} ) resp = self.app.get( '/v2/workflows/123', expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "get_workflow_definition", MOCK_WF) def test_workflow_get_allowed(self): self.policy.change_policy_definition( {"workflows:get": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.get( '/v2/workflows/123', expect_errors=True ) self.assertEqual(200, resp.status_int) def test_workflow_list_not_allowed(self): self.policy.change_policy_definition( {"workflows:list": "role:FAKE"} ) resp = self.app.get( '/v2/workflows', expect_errors=True ) self.assertEqual(403, resp.status_int) def test_workflow_list_allowed(self): self.policy.change_policy_definition( {"workflows:list": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.get( '/v2/workflows', expect_errors=True ) self.assertEqual(200, resp.status_int) def test_workflow_list_all_not_allowed(self): self.policy.change_policy_definition({ "workflows:list": "role:FAKE or rule:admin_or_owner", "workflows:list:all_projects": "role:FAKE" }) resp = self.app.get( '/v2/workflows?all_projects=1', expect_errors=True ) self.assertEqual(403, resp.status_int) def test_workflow_list_all_allowed(self): self.policy.change_policy_definition({ "workflows:list": "role:FAKE or rule:admin_or_owner", "workflows:list:all_projects": "role:FAKE or rule:admin_or_owner" }) resp = self.app.get( '/v2/workflows?all_projects=1', expect_errors=True ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, "update_workflow_definition") def test_workflow_update_not_allowed(self, mock_obj): self.policy.change_policy_definition( {"workflows:update": "role:FAKE"} ) resp = self.app.put( '/v2/workflows', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "update_workflow_definition") def test_workflow_update_allowed(self, mock_obj): spec_mock = mock_obj.return_value.get.return_value spec_mock.get.return_value = {} self.policy.change_policy_definition( {"workflows:update": "role:FAKE or rule:admin_or_owner"} ) resp = self.app.put( '/v2/workflows', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) @mock.patch.object(db_api, "update_workflow_definition") def test_workflow_update_public_not_allowed(self, mock_obj): self.policy.change_policy_definition({ "workflows:update": "role:FAKE or rule:admin_or_owner", "workflows:publicize": "role:FAKE" }) resp = self.app.put( '/v2/workflows?scope=public', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(403, resp.status_int) @mock.patch.object(db_api, "update_workflow_definition") def test_workflow_update_public_allowed(self, mock_obj): spec_mock = mock_obj.return_value.get.return_value spec_mock.get.return_value = {} self.policy.change_policy_definition({ "workflows:update": "role:FAKE or rule:admin_or_owner", "workflows:publicize": "role:FAKE or rule:admin_or_owner" }) resp = self.app.put( '/v2/workflows?scope=public', WF_DEFINITION, headers={'Content-Type': 'text/plain'}, expect_errors=True ) self.assertEqual(200, resp.status_int) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.153568 mistral-10.0.0.0b3/mistral/tests/unit/rpc/0000755000175000017500000000000000000000000020473 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/__init__.py0000644000175000017500000000000000000000000022572 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.153568 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/0000755000175000017500000000000000000000000021610 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/__init__.py0000644000175000017500000000000000000000000023707 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/base.py0000644000175000017500000000174300000000000023101 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mistral import config as cfg from mistral.rpc.kombu import base as kombu_base from mistral.tests.unit import base class KombuTestCase(base.BaseTest): def setUp(self): super(KombuTestCase, self).setUp() kombu_base.set_transport_options(check_backend=False) cfg.CONF.set_default('transport_url', 'rabbit://localhost:567') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/fake_kombu.py0000644000175000017500000000236200000000000024270 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from kombu import mixins as mx import mock # Hack for making tests works with kombu listener mixins = mx producer = mock.MagicMock() producers = mock.MagicMock() producers.__getitem__ = lambda *args, **kwargs: producer connection = mock.MagicMock() connections = mock.MagicMock() connections.__getitem__ = lambda *args, **kwargs: connection serialization = mock.MagicMock() def BrokerConnection(*args, **kwargs): return mock.MagicMock() def Exchange(*args, **kwargs): return mock.MagicMock() def Queue(*args, **kwargs): return mock.MagicMock() def Consumer(*args, **kwargs): return mock.MagicMock() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/test_kombu_client.py0000644000175000017500000000562200000000000025701 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mistral import exceptions as exc from mistral.tests.unit.rpc.kombu import base from mistral.tests.unit.rpc.kombu import fake_kombu import mock from six import moves with mock.patch.dict('sys.modules', kombu=fake_kombu): from mistral.rpc.kombu import base as kombu_base from mistral.rpc.kombu import kombu_client class TestException(exc.MistralException): pass class KombuClientTest(base.KombuTestCase): _RESPONSE = "response" def setUp(self): super(KombuClientTest, self).setUp() conf = mock.MagicMock() listener_class = kombu_client.kombu_listener.KombuRPCListener kombu_client.kombu_listener.KombuRPCListener = mock.MagicMock() def restore_listener(): kombu_client.kombu_listener.KombuRPCListener = listener_class self.addCleanup(restore_listener) self.client = kombu_client.KombuRPCClient(conf) self.ctx = type( 'context', (object,), {'to_dict': lambda self: {}} )() def test_sync_call_result_get(self): self.client._listener.get_result = mock.MagicMock( return_value={ kombu_base.TYPE: None, kombu_base.RESULT: self.client._serialize_message({ 'body': self._RESPONSE }) } ) response = self.client.sync_call(self.ctx, 'method') self.assertEqual(response, self._RESPONSE) def test_sync_call_result_not_get(self): self.client._listener.get_result = mock.MagicMock( side_effect=moves.queue.Empty ) self.assertRaises( exc.MistralException, self.client.sync_call, self.ctx, 'method_not_found' ) def test_sync_call_result_type_error(self): def side_effect(*args, **kwargs): return { kombu_base.TYPE: 'error', kombu_base.RESULT: TestException() } self.client._wait_for_result = mock.MagicMock(side_effect=side_effect) self.assertRaises( TestException, self.client.sync_call, self.ctx, 'method' ) def test_async_call(self): self.assertIsNone(self.client.async_call(self.ctx, 'method')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/test_kombu_hosts.py0000644000175000017500000000616700000000000025570 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.rpc.kombu import kombu_hosts from mistral.tests.unit import base from oslo_config import cfg import functools import oslo_messaging HOST_1 = 'rabbitmq_1' PORT_1 = 5671 HOST_2 = 'rabbitmq_2' PORT_2 = 5672 USER_1 = 'u_mistral_1' PASSWORD_1 = 'p_mistral_1' USER_2 = 'u_mistral_2' PASSWORD_2 = 'p_mistral_2' VIRTUAL_HOST_1 = 'vhost_1' VIRTUAL_HOST_2 = 'vhost_2' class KombuHostsTest(base.BaseTest): def setUp(self): super(KombuHostsTest, self).setUp() # Oslo messaging set a default config option oslo_messaging.get_transport(cfg.CONF) def assert_transports_host(self, expected, result): sorted_by_host = functools.partial(sorted, key=lambda x: x.hostname) self.assertListEqual(sorted_by_host(expected), sorted_by_host(result)) def test_transport_url(self): self.override_config( 'transport_url', 'rabbit://{user}:{password}@{host}:{port}/{virtual_host}'.format( user=USER_1, port=PORT_1, host=HOST_1, password=PASSWORD_1, virtual_host=VIRTUAL_HOST_1 )) hosts = kombu_hosts.KombuHosts(cfg.CONF) self.assertEqual(VIRTUAL_HOST_1, hosts.virtual_host) self.assert_transports_host([oslo_messaging.TransportHost( hostname=HOST_1, port=PORT_1, username=USER_1, password=PASSWORD_1, )], hosts.hosts) def test_transport_url_multiple_hosts(self): self.override_config( 'transport_url', 'rabbit://{user_1}:{password_1}@{host_1}:{port_1},' '{user_2}:{password_2}@{host_2}:{port_2}/{virtual_host}'.format( user_1=USER_1, password_1=PASSWORD_1, port_1=PORT_1, host_1=HOST_1, user_2=USER_2, password_2=PASSWORD_2, host_2=HOST_2, port_2=PORT_2, virtual_host=VIRTUAL_HOST_1 )) hosts = kombu_hosts.KombuHosts(cfg.CONF) self.assertEqual(VIRTUAL_HOST_1, hosts.virtual_host) self.assert_transports_host( [ oslo_messaging.TransportHost( hostname=HOST_1, port=PORT_1, username=USER_1, password=PASSWORD_1 ), oslo_messaging.TransportHost( hostname=HOST_2, port=PORT_2, username=USER_2, password=PASSWORD_2 ) ], hosts.hosts ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/test_kombu_listener.py0000644000175000017500000001530300000000000026245 0ustar00coreycorey00000000000000# Copyright (c) 2017 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mistral import exceptions as exc from mistral.tests.unit.rpc.kombu import base from mistral.tests.unit.rpc.kombu import fake_kombu from mistral_lib import utils import mock from six import moves with mock.patch.dict('sys.modules', kombu=fake_kombu): from mistral.rpc.kombu import base as kombu_base from mistral.rpc.kombu import kombu_listener class TestException(exc.MistralException): pass class KombuListenerTest(base.KombuTestCase): def setUp(self): super(KombuListenerTest, self).setUp() self.listener = kombu_listener.KombuRPCListener( [mock.MagicMock()], mock.MagicMock() ) self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() def test_add_listener(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertEqual( type(self.listener._results.get(correlation_id)), moves.queue.Queue ) self.assertEqual(0, self.listener._results[correlation_id].qsize()) def test_remove_listener_correlation_id_in_results(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertEqual( type(self.listener._results.get(correlation_id)), moves.queue.Queue ) self.listener.remove_listener(correlation_id) self.assertIsNone( self.listener._results.get(correlation_id) ) def test_remove_listener_correlation_id_not_in_results(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertEqual( type(self.listener._results.get(correlation_id)), moves.queue.Queue ) self.listener.remove_listener(utils.generate_unicode_uuid()) self.assertEqual( type(self.listener._results.get(correlation_id)), moves.queue.Queue ) @mock.patch('threading.Thread') def test_start_thread_not_set(self, thread_class_mock): thread_mock = mock.MagicMock() thread_class_mock.return_value = thread_mock self.listener.start() self.assertTrue(thread_mock.daemon) self.assertEqual(thread_mock.start.call_count, 1) @mock.patch('threading.Thread') def test_start_thread_set(self, thread_class_mock): thread_mock = mock.MagicMock() thread_class_mock.return_value = thread_mock self.listener._thread = mock.MagicMock() self.listener.start() self.assertEqual(thread_mock.start.call_count, 0) def test_get_result_results_in_queue(self): expected_result = 'abcd' correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.listener._results.get(correlation_id).put(expected_result) result = self.listener.get_result(correlation_id, 5) self.assertEqual(result, expected_result) def test_get_result_not_in_queue(self): correlation_id = utils.generate_unicode_uuid() self.listener.add_listener(correlation_id) self.assertRaises( moves.queue.Empty, self.listener.get_result, correlation_id, 1 # timeout ) def test_get_result_lack_of_queue(self): correlation_id = utils.generate_unicode_uuid() self.assertRaises( KeyError, self.listener.get_result, correlation_id, 1 # timeout ) def test__on_response_message_ack_fail(self): message = mock.MagicMock() message.ack.side_effect = Exception('Test Exception') response = 'response' kombu_listener.LOG = mock.MagicMock() self.listener.on_message(response, message) self.assertEqual(kombu_listener.LOG.debug.call_count, 1) self.assertEqual(kombu_listener.LOG.exception.call_count, 1) def test__on_response_message_ack_ok_corr_id_not_match(self): message = mock.MagicMock() message.properties = mock.MagicMock() message.properties.__getitem__ = lambda *args, **kwargs: True response = 'response' kombu_listener.LOG = mock.MagicMock() self.listener.on_message(response, message) self.assertEqual(kombu_listener.LOG.debug.call_count, 3) self.assertEqual(kombu_listener.LOG.exception.call_count, 0) def test__on_response_message_ack_ok_messsage_type_error(self): correlation_id = utils.generate_unicode_uuid() message = mock.MagicMock() message.properties = dict() message.properties['type'] = 'error' message.properties['correlation_id'] = correlation_id response = TestException('response') kombu_listener.LOG = mock.MagicMock() self.listener.add_listener(correlation_id) self.listener.on_message(response, message) self.assertEqual(kombu_listener.LOG.debug.call_count, 2) self.assertEqual(kombu_listener.LOG.exception.call_count, 0) result = self.listener.get_result(correlation_id, 5) self.assertDictEqual( result, { kombu_base.TYPE: 'error', kombu_base.RESULT: response } ) def test__on_response_message_ack_ok(self): correlation_id = utils.generate_unicode_uuid() message = mock.MagicMock() message.properties = dict() message.properties['type'] = None message.properties['correlation_id'] = correlation_id response = 'response' kombu_listener.LOG = mock.MagicMock() self.listener.add_listener(correlation_id) self.listener.on_message(response, message) self.assertEqual(kombu_listener.LOG.debug.call_count, 2) self.assertEqual(kombu_listener.LOG.exception.call_count, 0) result = self.listener.get_result(correlation_id, 5) self.assertDictEqual( result, { kombu_base.TYPE: None, kombu_base.RESULT: response } ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/rpc/kombu/test_kombu_server.py0000644000175000017500000002313200000000000025725 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import futurist from mistral import context from mistral import exceptions as exc from mistral.tests.unit.rpc.kombu import base from mistral.tests.unit.rpc.kombu import fake_kombu import mock import socket from stevedore import driver with mock.patch.dict('sys.modules', kombu=fake_kombu): from mistral.rpc.kombu import kombu_server class TestException(exc.MistralError): pass class KombuServerTest(base.KombuTestCase): def setUp(self): super(KombuServerTest, self).setUp() self.conf = mock.MagicMock() self.server = kombu_server.KombuRPCServer(self.conf) self.ctx = type('context', (object,), {'to_dict': lambda self: {}})() def test_is_running_is_running(self): self.server._running.set() self.assertTrue(self.server.is_running) def test_is_running_is_not_running(self): self.server._running.clear() self.assertFalse(self.server.is_running) def test_stop(self): self.server.stop() self.assertFalse(self.server.is_running) def test_publish_message(self): body = 'body' reply_to = 'reply_to' corr_id = 'corr_id' type = 'type' acquire_mock = mock.MagicMock() fake_kombu.producer.acquire.return_value = acquire_mock enter_mock = mock.MagicMock() acquire_mock.__enter__.return_value = enter_mock self.server.publish_message(body, reply_to, corr_id, type) enter_mock.publish.assert_called_once_with( body={'body': '"body"'}, exchange='openstack', routing_key=reply_to, correlation_id=corr_id, type=type, serializer='json' ) def test_run_launch_successfully(self): acquire_mock = mock.MagicMock() acquire_mock.drain_events.side_effect = TestException() fake_kombu.connection.acquire.return_value = acquire_mock self.assertRaises(TestException, self.server._run, 'blocking') self.assertTrue(self.server.is_running) def test_run_launch_successfully_than_stop(self): def side_effect(*args, **kwargs): self.assertTrue(self.server.is_running) raise KeyboardInterrupt acquire_mock = mock.MagicMock() acquire_mock.drain_events.side_effect = side_effect fake_kombu.connection.acquire.return_value = acquire_mock self.server._run('blocking') self.assertFalse(self.server.is_running) self.assertEqual(self.server._sleep_time, 1) def test_run_socket_error_reconnect(self): def side_effect(*args, **kwargs): if acquire_mock.drain_events.call_count == 1: raise socket.error() raise TestException() acquire_mock = mock.MagicMock() acquire_mock.drain_events.side_effect = side_effect fake_kombu.connection.acquire.return_value = acquire_mock self.assertRaises(TestException, self.server._run, 'blocking') self.assertEqual(self.server._sleep_time, 1) def test_run_socket_timeout_still_running(self): def side_effect(*args, **kwargs): if acquire_mock.drain_events.call_count == 0: raise socket.timeout() raise TestException() acquire_mock = mock.MagicMock() acquire_mock.drain_events.side_effect = side_effect fake_kombu.connection.acquire.return_value = acquire_mock self.assertRaises( TestException, self.server._run, 'blocking' ) self.assertTrue(self.server.is_running) def test_run_keyboard_interrupt_not_running(self): acquire_mock = mock.MagicMock() acquire_mock.drain_events.side_effect = KeyboardInterrupt() fake_kombu.connection.acquire.return_value = acquire_mock self.assertIsNone(self.server.run()) self.assertFalse(self.server.is_running) @mock.patch.object( kombu_server.KombuRPCServer, '_on_message', mock.MagicMock() ) @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') def test__on_message_safe_message_processing_ok(self, publish_message): message = mock.MagicMock() self.server._on_message_safe(None, message) self.assertEqual(message.ack.call_count, 1) self.assertEqual(publish_message.call_count, 0) @mock.patch.object(kombu_server.KombuRPCServer, '_on_message') @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') def test__on_message_safe_message_processing_raise( self, publish_message, _on_message ): reply_to = 'reply_to' correlation_id = 'corr_id' message = mock.MagicMock() message.properties = { 'reply_to': reply_to, 'correlation_id': correlation_id } test_exception = TestException() _on_message.side_effect = test_exception self.server._on_message_safe(None, message) self.assertEqual(message.ack.call_count, 1) self.assertEqual(publish_message.call_count, 1) @mock.patch.object( kombu_server.KombuRPCServer, '_get_rpc_method', mock.MagicMock(return_value=None) ) def test__on_message_rpc_method_not_found(self): request = { 'rpc_ctx': {}, 'rpc_method': 'not_found_method', 'arguments': {} } message = mock.MagicMock() message.properties = { 'reply_to': None, 'correlation_id': None } self.assertRaises( exc.MistralException, self.server._on_message, request, message ) @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') @mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method') @mock.patch('mistral.context.MistralContext.from_dict') def test__on_message_is_async(self, mock_get_context, get_rpc_method, publish_message): result = 'result' request = { 'async': True, 'rpc_ctx': {}, 'rpc_method': 'found_method', 'arguments': self.server._serialize_message({ 'a': 1, 'b': 2 }) } message = mock.MagicMock() message.properties = { 'reply_to': None, 'correlation_id': None } message.delivery_info.get.return_value = False rpc_method = mock.MagicMock(return_value=result) get_rpc_method.return_value = rpc_method ctx = context.MistralContext() mock_get_context.return_value = ctx self.server._on_message(request, message) rpc_method.assert_called_once_with( rpc_ctx=ctx, a=1, b=2 ) self.assertEqual(publish_message.call_count, 0) @mock.patch.object(kombu_server.KombuRPCServer, 'publish_message') @mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method') @mock.patch('mistral.context.MistralContext.from_dict') def test__on_message_is_sync(self, mock_get_context, get_rpc_method, publish_message): result = 'result' request = { 'async': False, 'rpc_ctx': {}, 'rpc_method': 'found_method', 'arguments': self.server._serialize_message({ 'a': 1, 'b': 2 }) } reply_to = 'reply_to' correlation_id = 'corr_id' message = mock.MagicMock() message.properties = { 'reply_to': reply_to, 'correlation_id': correlation_id } message.delivery_info.get.return_value = False rpc_method = mock.MagicMock(return_value=result) get_rpc_method.return_value = rpc_method ctx = context.MistralContext() mock_get_context.return_value = ctx self.server._on_message(request, message) rpc_method.assert_called_once_with( rpc_ctx=ctx, a=1, b=2 ) publish_message.assert_called_once_with( result, reply_to, correlation_id ) def test__prepare_worker(self): self.server._prepare_worker('blocking') self.assertEqual( futurist.SynchronousExecutor, type(self.server._worker) ) self.server._prepare_worker('threading') self.assertEqual( futurist.ThreadPoolExecutor, type(self.server._worker) ) self.server._prepare_worker('eventlet') self.assertEqual( futurist.GreenThreadPoolExecutor, type(self.server._worker) ) @mock.patch('stevedore.driver.DriverManager') def test__prepare_worker_no_valid_executor(self, driver_manager_mock): driver_manager_mock.side_effect = driver.NoMatches() self.assertRaises( driver.NoMatches, self.server._prepare_worker, 'non_valid_executor' ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.153568 mistral-10.0.0.0b3/mistral/tests/unit/scheduler/0000755000175000017500000000000000000000000021665 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/scheduler/__init__.py0000644000175000017500000000000000000000000023764 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/scheduler/test_default_scheduler.py0000644000175000017500000001552700000000000026772 0ustar00coreycorey00000000000000# Copyright 2018 - Nokia Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import event from eventlet import semaphore from eventlet import timeout import datetime import mock from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.scheduler import base as scheduler_base from mistral.scheduler import default_scheduler from mistral.tests.unit import base CONF = cfg.CONF TARGET_METHOD_PATH = ( 'mistral.tests.unit.scheduler.test_default_scheduler.target_method' ) def target_method(): pass class DefaultSchedulerTest(base.DbTestCase): def setUp(self): super(DefaultSchedulerTest, self).setUp() # This Timeout object is needed to raise an exception if the test took # longer than a configured number of seconds. self.timeout = timeout.Timeout(seconds=15) # Synchronization primitives to control when a scheduled invoked # method is allowed to enter the method and exit from it to perform # all needed checks. self.target_mtd_started = event.Event() self.target_mtd_finished = event.Event() self.target_mtd_lock = semaphore.Semaphore(0) self.override_config('fixed_delay', 1, 'scheduler') self.override_config('random_delay', 1, 'scheduler') self.override_config('batch_size', 100, 'scheduler') self.scheduler = default_scheduler.DefaultScheduler(CONF.scheduler) self.scheduler.start() self.addCleanup(self.scheduler.stop, True) self.addCleanup(self.timeout.cancel) def target_method(self, *args, **kwargs): self.target_mtd_started.send() self.target_mtd_lock.acquire() # Note: Potentially we can do something else here. No-op for now. self.target_mtd_finished.send() def _wait_target_method_start(self): self.target_mtd_started.wait() def _unlock_target_method(self): self.target_mtd_lock.release() def _wait_target_method_end(self): self.target_mtd_finished.wait() @mock.patch(TARGET_METHOD_PATH) def test_schedule_called_once(self, method): # Delegate from the module function to the method of the test class. method.side_effect = self.target_method job = scheduler_base.SchedulerJob( run_after=1, func_name=TARGET_METHOD_PATH, func_args={'name': 'task', 'id': '321'} ) self.scheduler.schedule(job) self._wait_target_method_start() # Check that the persistent job has been created and captured. scheduled_jobs = db_api.get_scheduled_jobs() self.assertEqual(1, len(scheduled_jobs)) self.assertTrue(self.scheduler.has_scheduled_jobs()) self.assertTrue(self.scheduler.has_scheduled_jobs(processing=True)) self.assertFalse(self.scheduler.has_scheduled_jobs(processing=False)) self.assertTrue( self.scheduler.has_scheduled_jobs(key=None, processing=True) ) self.assertFalse( self.scheduler.has_scheduled_jobs(key=None, processing=False) ) self.assertFalse(self.scheduler.has_scheduled_jobs(key='foobar')) self.assertFalse( self.scheduler.has_scheduled_jobs(key='foobar', processing=True) ) self.assertFalse( self.scheduler.has_scheduled_jobs(key='foobar', processing=False) ) captured_at = scheduled_jobs[0].captured_at self.assertIsNotNone(captured_at) self.assertTrue( datetime.datetime.utcnow() - captured_at < datetime.timedelta(seconds=3) ) self._unlock_target_method() self._wait_target_method_end() method.assert_called_once_with(name='task', id='321') # After the job is processed the persistent object must be deleted. self._await(lambda: not db_api.get_scheduled_jobs()) @mock.patch(TARGET_METHOD_PATH) def test_pickup_from_job_store(self, method): # Delegate from the module function to the method of the test class. method.side_effect = self.target_method self.override_config('pickup_job_after', 1, 'scheduler') # 1. Create a scheduled job in Job Store. execute_at = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) db_api.create_scheduled_job({ 'run_after': 1, 'func_name': TARGET_METHOD_PATH, 'func_args': {'name': 'task', 'id': '321'}, 'execute_at': execute_at, 'captured_at': None, 'auth_ctx': {} }) self.assertEqual(1, len(db_api.get_scheduled_jobs())) self._unlock_target_method() self._wait_target_method_end() # 2. Wait till Scheduler picks up the job and processes it. self._await(lambda: not db_api.get_scheduled_jobs()) method.assert_called_once_with(name='task', id='321') @mock.patch(TARGET_METHOD_PATH) def test_recapture_job(self, method): # Delegate from the module function to the method of the test class. method.side_effect = self.target_method self.override_config('pickup_job_after', 1, 'scheduler') self.override_config('captured_job_timeout', 3, 'scheduler') # 1. Create a scheduled job in Job Store marked as captured in one # second in the future. It can be captured again only after 3 # seconds after that according to the config option. captured_at = datetime.datetime.utcnow() + datetime.timedelta( seconds=1 ) before_ts = datetime.datetime.utcnow() db_api.create_scheduled_job({ 'run_after': 1, 'func_name': TARGET_METHOD_PATH, 'func_args': {'name': 'task', 'id': '321'}, 'execute_at': datetime.datetime.utcnow(), 'captured_at': captured_at, 'auth_ctx': {} }) self.assertEqual(1, len(db_api.get_scheduled_jobs())) self._unlock_target_method() self._wait_target_method_end() # 2. Wait till Scheduler picks up the job and processes it. self._await(lambda: not db_api.get_scheduled_jobs()) method.assert_called_once_with(name='task', id='321') # At least 3 seconds should have passed. self.assertTrue( datetime.datetime.utcnow() - before_ts >= datetime.timedelta(seconds=3) ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.153568 mistral-10.0.0.0b3/mistral/tests/unit/services/0000755000175000017500000000000000000000000021532 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/__init__.py0000644000175000017500000000000000000000000023631 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_action_manager.py0000644000175000017500000000360600000000000026117 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.tests.unit import base class ActionManagerTest(base.DbTestCase): def test_action_input(self): std_http = db_api.get_action_definition("std.http") std_email = db_api.get_action_definition("std.email") http_action_input = ( 'url, method="GET", params=null, body=null, ' 'json=null, headers=null, cookies=null, auth=null, ' 'timeout=null, allow_redirects=null, ' 'proxies=null, verify=null' ) self.assertEqual(http_action_input, std_http.input) std_email_input = ( "from_addr, to_addrs, smtp_server, reply_to=null, cc_addrs=null, " "bcc_addrs=null, smtp_password=null, subject=null, body=null, " "html_body=null" ) self.assertEqual(std_email_input, std_email.input) def test_action_description(self): std_http = db_api.get_action_definition("std.http") std_echo = db_api.get_action_definition("std.echo") self.assertIn("HTTP action", std_http.description) self.assertIn("param body: (optional) Dictionary, bytes", std_http.description) self.assertIn("This action just returns a configured value", std_echo.description) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_action_service.py0000644000175000017500000001261700000000000026147 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.exceptions import DBEntityNotFoundError from mistral.lang import parser as spec_parser from mistral.services import actions as action_service from mistral.tests.unit import base from mistral_lib import utils # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') ACTION_LIST = """ --- version: '2.0' action1: tags: [test, v2] base: std.echo output='Hi' output: result: $ action2: base: std.echo output='Hey' output: result: $ """ UPDATED_ACTION_LIST = """ --- version: '2.0' action1: base: std.echo output='Hi' input: - param1 output: result: $ """ NAMESPACE = 'test_namespace' class ActionServiceTest(base.DbTestCase): def setUp(self): super(ActionServiceTest, self).setUp() self.addCleanup(db_api.delete_action_definitions, name='action1') self.addCleanup(db_api.delete_action_definitions, name='action2') def test_create_actions(self): db_actions = action_service.create_actions(ACTION_LIST) self.assertEqual(2, len(db_actions)) # Action 1. action1_db = self._assert_single_item(db_actions, name='action1') action1_spec = spec_parser.get_action_spec(action1_db.spec) self.assertEqual('action1', action1_spec.get_name()) self.assertListEqual(['test', 'v2'], action1_spec.get_tags()) self.assertEqual('std.echo', action1_spec.get_base()) self.assertDictEqual({'output': 'Hi'}, action1_spec.get_base_input()) # Action 2. action2_db = self._assert_single_item(db_actions, name='action2') self.assertEqual('', action2_db.namespace) action2_spec = spec_parser.get_action_spec(action2_db.spec) self.assertEqual('action2', action2_spec.get_name()) self.assertEqual('std.echo', action1_spec.get_base()) self.assertDictEqual({'output': 'Hey'}, action2_spec.get_base_input()) def test_create_actions_in_namespace(self): db_actions = action_service.create_actions(ACTION_LIST, namespace=NAMESPACE) self.assertEqual(2, len(db_actions)) action1_db = self._assert_single_item(db_actions, name='action1') self.assertEqual(NAMESPACE, action1_db.namespace) action2_db = self._assert_single_item(db_actions, name='action2') self.assertEqual(NAMESPACE, action2_db.namespace) self.assertRaises( DBEntityNotFoundError, db_api.get_action_definition, name='action1', namespace='' ) def test_update_actions(self): db_actions = action_service.create_actions(ACTION_LIST, namespace=NAMESPACE) self.assertEqual(2, len(db_actions)) action1_db = self._assert_single_item(db_actions, name='action1') action1_spec = spec_parser.get_action_spec(action1_db.spec) self.assertEqual('action1', action1_spec.get_name()) self.assertEqual('std.echo', action1_spec.get_base()) self.assertDictEqual({'output': 'Hi'}, action1_spec.get_base_input()) self.assertDictEqual({}, action1_spec.get_input()) db_actions = action_service.update_actions(UPDATED_ACTION_LIST, namespace=NAMESPACE) # Action 1. action1_db = self._assert_single_item(db_actions, name='action1') action1_spec = spec_parser.get_action_spec(action1_db.spec) self.assertEqual('action1', action1_spec.get_name()) self.assertListEqual([], action1_spec.get_tags()) self.assertEqual('std.echo', action1_spec.get_base()) self.assertDictEqual({'output': 'Hi'}, action1_spec.get_base_input()) self.assertIn('param1', action1_spec.get_input()) self.assertIs( action1_spec.get_input().get('param1'), utils.NotDefined ) self.assertRaises( DBEntityNotFoundError, action_service.update_actions, UPDATED_ACTION_LIST, namespace='' ) def test_delete_action(self): # Create action. action_service.create_actions(ACTION_LIST, namespace=NAMESPACE) action = db_api.get_action_definition('action1', namespace=NAMESPACE) self.assertEqual(NAMESPACE, action.get('namespace')) self.assertEqual('action1', action.get('name')) # Delete action. db_api.delete_action_definition('action1', namespace=NAMESPACE) self.assertRaises( DBEntityNotFoundError, db_api.get_action_definition, name='action1', namespace=NAMESPACE ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_event_engine.py0000644000175000017500000002006400000000000025613 0ustar00coreycorey00000000000000# Copyright 2016 Catalyst IT Ltd # Copyright 2017 Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import time import mock from oslo_config import cfg from mistral import context as auth_context from mistral.db.v2.sqlalchemy import api as db_api from mistral.event_engine import default_event_engine as evt_eng from mistral.rpc import clients as rpc from mistral.services import workflows from mistral.tests.unit import base WORKFLOW_LIST = """ --- version: '2.0' my_wf: type: direct tasks: task1: action: std.echo output='Hi!' """ EXCHANGE_TOPIC = ('openstack', 'notification') EVENT_TYPE = 'compute.instance.create.start' EVENT_TRIGGER = { 'name': 'trigger1', 'workflow_id': '', 'workflow_input': {}, 'workflow_params': {}, 'exchange': 'openstack', 'topic': 'notification', 'event': EVENT_TYPE, } cfg.CONF.set_default('auth_enable', False, group='pecan') class EventEngineTest(base.DbTestCase): def setUp(self): super(EventEngineTest, self).setUp() self.wf = workflows.create_workflows(WORKFLOW_LIST)[0] EVENT_TRIGGER['workflow_id'] = self.wf.id @mock.patch.object(rpc, 'get_engine_client', mock.Mock()) def test_event_engine_start_with_no_triggers(self): e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) self.assertEqual(0, len(e_engine.event_triggers_map)) self.assertEqual(0, len(e_engine.exchange_topic_events_map)) self.assertEqual(0, len(e_engine.exchange_topic_listener_map)) @mock.patch('mistral.messaging.start_listener') @mock.patch.object(rpc, 'get_engine_client', mock.Mock()) def test_event_engine_start_with_triggers(self, mock_start): trigger = db_api.create_event_trigger(EVENT_TRIGGER) e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) self.assertEqual(1, len(e_engine.exchange_topic_events_map)) self.assertEqual( EVENT_TYPE, list(e_engine.exchange_topic_events_map[EXCHANGE_TOPIC])[0] ) self.assertEqual(1, len(e_engine.event_triggers_map)) self.assertEqual(1, len(e_engine.event_triggers_map[EVENT_TYPE])) self._assert_dict_contains_subset( trigger.to_dict(), e_engine.event_triggers_map[EVENT_TYPE][0] ) self.assertEqual(1, len(e_engine.exchange_topic_listener_map)) @mock.patch('mistral.messaging.start_listener') @mock.patch.object(rpc, 'get_engine_client', mock.Mock()) def test_event_engine_public_trigger(self, mock_start): t = copy.deepcopy(EVENT_TRIGGER) # Create public trigger as an admin self.ctx = base.get_context(default=False, admin=True) auth_context.set_ctx(self.ctx) t['scope'] = 'public' t['project_id'] = self.ctx.tenant trigger = db_api.create_event_trigger(t) # Switch to the user. self.ctx = base.get_context(default=True) auth_context.set_ctx(self.ctx) e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) event = { 'event_type': EVENT_TYPE, 'payload': {}, 'publisher': 'fake_publisher', 'timestamp': '', 'context': { 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user' }, } # Moreover, assert that trigger.project_id != event.project_id self.assertNotEqual( trigger.project_id, event['context']['project_id'] ) with mock.patch.object(e_engine, 'engine_client') as client_mock: e_engine.event_queue.put(event) time.sleep(1) self.assertEqual(1, client_mock.start_workflow.call_count) args, kwargs = client_mock.start_workflow.call_args self.assertEqual( (EVENT_TRIGGER['workflow_id'], '', None, {}), args ) self.assertDictEqual( { 'service': 'fake_publisher', 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user', 'timestamp': '' }, kwargs['event_params'] ) @mock.patch('mistral.messaging.start_listener') @mock.patch.object(rpc, 'get_engine_client', mock.Mock()) def test_process_event_queue(self, mock_start): EVENT_TRIGGER['project_id'] = self.ctx.project_id db_api.create_event_trigger(EVENT_TRIGGER) e_engine = evt_eng.DefaultEventEngine() self.addCleanup(e_engine.handler_tg.stop) event = { 'event_type': EVENT_TYPE, 'payload': {}, 'publisher': 'fake_publisher', 'timestamp': '', 'context': { 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user' }, } with mock.patch.object(e_engine, 'engine_client') as client_mock: e_engine.event_queue.put(event) time.sleep(1) self.assertEqual(1, client_mock.start_workflow.call_count) args, kwargs = client_mock.start_workflow.call_args self.assertEqual( (EVENT_TRIGGER['workflow_id'], '', None, {}), args ) self.assertDictEqual( { 'service': 'fake_publisher', 'project_id': '%s' % self.ctx.project_id, 'user_id': 'fake_user', 'timestamp': '' }, kwargs['event_params'] ) class NotificationsConverterTest(base.BaseTest): def test_convert(self): definition_cfg = [ { 'event_types': EVENT_TYPE, 'properties': {'resource_id': '<% $.payload.instance_id %>'} } ] converter = evt_eng.NotificationsConverter() converter.definitions = [evt_eng.EventDefinition(event_def) for event_def in reversed(definition_cfg)] notification = { 'event_type': EVENT_TYPE, 'payload': {'instance_id': '12345'}, 'publisher': 'fake_publisher', 'timestamp': '', 'context': {'project_id': 'fake_project', 'user_id': 'fake_user'} } event = converter.convert(EVENT_TYPE, notification) self.assertDictEqual( {'resource_id': '12345'}, event ) def test_convert_event_type_not_defined(self): definition_cfg = [ { 'event_types': EVENT_TYPE, 'properties': {'resource_id': '<% $.payload.instance_id %>'} } ] converter = evt_eng.NotificationsConverter() converter.definitions = [evt_eng.EventDefinition(event_def) for event_def in reversed(definition_cfg)] notification = { 'event_type': 'fake_event', 'payload': {'instance_id': '12345'}, 'publisher': 'fake_publisher', 'timestamp': '', 'context': {'project_id': 'fake_project', 'user_id': 'fake_user'} } event = converter.convert('fake_event', notification) self.assertDictEqual( { 'service': 'fake_publisher', 'project_id': 'fake_project', 'user_id': 'fake_user', 'timestamp': '' }, event ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_expiration_policy.py0000644000175000017500000003255600000000000026717 0ustar00coreycorey00000000000000# Copyright 2015 - Alcatel-lucent, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from mistral import context as ctx from mistral.db.v2 import api as db_api from mistral.services import expiration_policy from mistral.services.expiration_policy import ExecutionExpirationPolicy from mistral.tests.unit import base from mistral.tests.unit.base import get_context from mistral_lib import utils from oslo_config import cfg def _create_workflow_executions(): time_now = utils.utc_now_sec() wf_execs = [ { 'id': 'success_expired', 'name': 'success_expired', 'created_at': time_now - datetime.timedelta(minutes=60), 'updated_at': time_now - datetime.timedelta(minutes=59), 'workflow_name': 'test_exec', 'state': "SUCCESS", }, { 'id': 'error_expired', 'name': 'error_expired', 'created_at': time_now - datetime.timedelta(days=3, minutes=10), 'updated_at': time_now - datetime.timedelta(days=3), 'workflow_name': 'test_exec', 'state': "ERROR", }, { 'id': 'running_not_expired', 'name': 'running_not_expired', 'created_at': time_now - datetime.timedelta(days=3, minutes=10), 'updated_at': time_now - datetime.timedelta(days=3), 'workflow_name': 'test_exec', 'state': "RUNNING", }, { 'id': 'running_not_expired2', 'name': 'running_not_expired2', 'created_at': time_now - datetime.timedelta(days=3, minutes=10), 'updated_at': time_now - datetime.timedelta(days=4), 'workflow_name': 'test_exec', 'state': "RUNNING", }, { 'id': 'success_not_expired', 'name': 'success_not_expired', 'created_at': time_now - datetime.timedelta(minutes=15), 'updated_at': time_now - datetime.timedelta(minutes=5), 'workflow_name': 'test_exec', 'state': "SUCCESS", }, { 'id': 'abc', 'name': 'cancelled_expired', 'created_at': time_now - datetime.timedelta(minutes=60), 'updated_at': time_now - datetime.timedelta(minutes=59), 'workflow_name': 'test_exec', 'state': "CANCELLED", }, { 'id': 'cancelled_not_expired', 'name': 'cancelled_not_expired', 'created_at': time_now - datetime.timedelta(minutes=15), 'updated_at': time_now - datetime.timedelta(minutes=6), 'workflow_name': 'test_exec', 'state': "CANCELLED", } ] for wf_exec in wf_execs: db_api.create_workflow_execution(wf_exec) # Create a nested workflow execution. db_api.create_task_execution( { 'id': 'running_not_expired', 'workflow_execution_id': 'success_not_expired', 'name': 'my_task' } ) db_api.create_workflow_execution( { 'id': 'expired_but_not_a_parent', 'name': 'expired_but_not_a_parent', 'created_at': time_now - datetime.timedelta(days=15), 'updated_at': time_now - datetime.timedelta(days=10), 'workflow_name': 'test_exec', 'state': "SUCCESS", 'task_execution_id': 'running_not_expired' } ) def _switch_context(is_default, is_admin): ctx.set_ctx(get_context(is_default, is_admin)) class ExpirationPolicyTest(base.DbTestCase): def test_expiration_policy_for_executions_with_different_project_id(self): # Delete execution uses a secured filtering and we need # to verify that admin able to do that for other projects. cfg.CONF.set_default('auth_enable', True, group='pecan') # Since we are removing other projects execution, # we want to load the executions with other project_id. _switch_context(False, False) _create_workflow_executions() now = datetime.datetime.utcnow() # This execution has a parent wf and testing that we are # querying only for parent wfs. exec_child = db_api.get_workflow_execution('expired_but_not_a_parent') self.assertEqual('running_not_expired', exec_child.task_execution_id) # Call for all expired wfs execs. execs = db_api.get_expired_executions(now) # Should be only 5, the RUNNING execution shouldn't return, # so the child wf (that has parent task id). self.assertEqual(5, len(execs)) # Switch context to Admin since expiration policy running as Admin. _switch_context(True, True) _set_expiration_policy_config(evaluation_interval=1, older_than=30) expiration_policy.run_execution_expiration_policy(self, ctx) # Only non_expired available (update_at < older_than). execs = db_api.get_expired_executions(now) self.assertEqual(2, len(execs)) self.assertListEqual( [ 'cancelled_not_expired', 'success_not_expired' ], sorted([ex.id for ex in execs]) ) _set_expiration_policy_config(evaluation_interval=1, older_than=5) expiration_policy.run_execution_expiration_policy(self, ctx) execs = db_api.get_expired_executions(now) self.assertEqual(0, len(execs)) def test_expiration_policy_for_executions_with_ignored_states(self): _create_workflow_executions() now = datetime.datetime.utcnow() _set_expiration_policy_config( evaluation_interval=1, older_than=30, ignored_states=['SUCCESS'] ) expiration_policy.run_execution_expiration_policy(self, ctx) execs = db_api.get_expired_executions(now) self.assertEqual(1, len(execs)) self.assertEqual('cancelled_not_expired', execs[0].get('id')) _set_expiration_policy_config( evaluation_interval=1, older_than=30, ignored_states=['SUCCESS', 'CANCELLED'] ) expiration_policy.run_execution_expiration_policy(self, ctx) execs = db_api.get_expired_executions(now) self.assertEqual(0, len(execs)) def test_expiration_policy_invalid_ignored_states(self): _set_expiration_policy_config( evaluation_interval=1, older_than=30, ignored_states=['RUNNING'] ) self.assertRaises(ValueError, expiration_policy.setup) def test_deletion_of_expired_executions_with_batch_size_scenario1(self): """scenario1 This test will use batch_size of 3, 5 expired executions and different values of "older_than" which is 30 and 5 minutes respectively. Expected_result: All expired executions are successfully deleted. """ _create_workflow_executions() now = datetime.datetime.utcnow() _set_expiration_policy_config( evaluation_interval=1, older_than=30, batch_size=3 ) expiration_policy.run_execution_expiration_policy(self, ctx) execs = db_api.get_expired_executions(now) self.assertEqual(2, len(execs)) _set_expiration_policy_config(evaluation_interval=1, older_than=5) expiration_policy.run_execution_expiration_policy(self, ctx) execs = db_api.get_expired_executions(now) self.assertEqual(0, len(execs)) def test_deletion_of_expired_executions_with_batch_size_scenario2(self): """scenario2 This test will use batch_size of 2, 5 expired executions with value of "older_than" that is 5 minutes. Expected_result: All expired executions are successfully deleted. """ _create_workflow_executions() now = datetime.datetime.utcnow() _set_expiration_policy_config( evaluation_interval=1, older_than=5, batch_size=2 ) expiration_policy.run_execution_expiration_policy(self, ctx) execs = db_api.get_expired_executions(now) self.assertEqual(0, len(execs)) def test_expiration_policy_for_executions_with_max_executions_scen1(self): """scenario1 Tests the max_executions logic with max_finished_executions = 'total not expired and completed executions' - 1 """ _create_workflow_executions() _set_expiration_policy_config( evaluation_interval=1, older_than=30, mfe=1 ) expiration_policy.run_execution_expiration_policy(self, ctx) # Assert the two running executions # (running_not_expired, running_not_expired2), # the sub execution (expired_but_not_a_parent) and the one allowed # finished execution (success_not_expired) are there. execs = db_api.get_workflow_executions() self.assertEqual(4, len(execs)) self.assertListEqual( [ 'expired_but_not_a_parent', 'running_not_expired', 'running_not_expired2', 'success_not_expired' ], sorted([ex.id for ex in execs]) ) def test_expiration_policy_for_executions_with_max_executions_scen2(self): """scenario2 Tests the max_executions logic with: max_finished_executions > total completed executions """ _create_workflow_executions() _set_expiration_policy_config( evaluation_interval=1, older_than=30, mfe=100 ) expiration_policy.run_execution_expiration_policy(self, ctx) # Assert the two running executions # (running_not_expired, running_not_expired2), the sub execution # (expired_but_not_a_parent) and the all finished execution # (success_not_expired, 'cancelled_not_expired') are there. execs = db_api.get_workflow_executions() self.assertEqual(5, len(execs)) self.assertListEqual( [ 'cancelled_not_expired', 'expired_but_not_a_parent', 'running_not_expired', 'running_not_expired2', 'success_not_expired' ], sorted([ex.id for ex in execs]) ) def test_periodic_task_parameters(self): _set_expiration_policy_config( evaluation_interval=17, older_than=13 ) e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) self.assertEqual( 17 * 60, e_policy._periodic_spacing['run_execution_expiration_policy'] ) def test_periodic_task_scheduling(self): def _assert_scheduling(expiration_policy_config, should_schedule): ExecutionExpirationPolicy._periodic_tasks = [] _set_expiration_policy_config(*expiration_policy_config) e_policy = expiration_policy.ExecutionExpirationPolicy(cfg.CONF) if should_schedule: self.assertTrue( e_policy._periodic_tasks, "Periodic task should have been created." ) else: self.assertFalse( e_policy._periodic_tasks, "Periodic task shouldn't have been created." ) _assert_scheduling([1, 1, None, None], True) _assert_scheduling([1, None, 1, None], True) _assert_scheduling([1, 1, 1, None], True) _assert_scheduling([1, None, None, None], False) _assert_scheduling([None, 1, 1, None], False) _assert_scheduling([None, 1, 1, None], False) _assert_scheduling([1, 0, 0, 0], False) _assert_scheduling([0, 1, 1, 0], False) _assert_scheduling([0, 1, 1, 0], False) def tearDown(self): """Restores the size limit config to default.""" super(ExpirationPolicyTest, self).tearDown() cfg.CONF.set_default('auth_enable', False, group='pecan') ctx.set_ctx(None) _set_expiration_policy_config(None, None, None, None) def _set_expiration_policy_config(evaluation_interval, older_than, mfe=0, batch_size=0, ignored_states=[]): cfg.CONF.set_default( 'evaluation_interval', evaluation_interval, group='execution_expiration_policy' ) cfg.CONF.set_default( 'older_than', older_than, group='execution_expiration_policy' ) cfg.CONF.set_default( 'max_finished_executions', mfe, group='execution_expiration_policy' ) cfg.CONF.set_default( 'batch_size', batch_size, group='execution_expiration_policy' ) cfg.CONF.set_default( 'ignored_states', ignored_states, group='execution_expiration_policy' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_legacy_scheduler.py0000644000175000017500000002635700000000000026462 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import eventlet import mock from eventlet import queue from eventlet import timeout from oslo_config import cfg from mistral import context as auth_context from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.scheduler import base as sched_base from mistral.services import legacy_scheduler from mistral.tests.unit import base from mistral_lib import actions as ml_actions CONF = cfg.CONF TARGET_METHOD_PATH = ( 'mistral.tests.unit.services.test_legacy_scheduler.target_method' ) DELAY = 1.5 def get_time_delay(delay=DELAY * 2): return datetime.datetime.utcnow() + datetime.timedelta(seconds=delay) def target_method(): pass class LegacySchedulerTest(base.DbTestCase): def setUp(self): super(LegacySchedulerTest, self).setUp() self.timeout = timeout.Timeout(seconds=10) self.queue = queue.Queue() self.override_config('fixed_delay', 1, 'scheduler') self.override_config('random_delay', 0, 'scheduler') self.override_config('batch_size', 100, 'scheduler') self.scheduler = legacy_scheduler.LegacyScheduler(CONF.scheduler) self.scheduler.start() self.addCleanup(self.scheduler.stop, True) self.addCleanup(self.timeout.cancel) def target_method(self, *args, **kwargs): self.queue.put(item="item") def target_check_context_method(self, expected_project_id): actual_project_id = auth_context.ctx().project_id self.queue.put(item=(expected_project_id == actual_project_id)) @mock.patch(TARGET_METHOD_PATH) def test_scheduler_with_factory(self, factory): target_method_name = 'run_something' factory.return_value = type( 'something', (object,), { target_method_name: mock.MagicMock(side_effect=self.target_method) } ) job = sched_base.SchedulerJob( run_after=DELAY, target_factory_func_name=TARGET_METHOD_PATH, func_name=target_method_name, func_args={'name': 'task', 'id': '123'}, key='my_job_key' ) self.scheduler.schedule(job) calls = db_api.get_delayed_calls_to_start(get_time_delay()) call = self._assert_single_item( calls, target_method_name=target_method_name, key='my_job_key' ) self.assertIn('name', call['method_arguments']) self.queue.get() factory().run_something.assert_called_once_with(name='task', id='123') calls = db_api.get_delayed_calls_to_start(get_time_delay()) self.assertEqual(0, len(calls)) @mock.patch(TARGET_METHOD_PATH) def test_scheduler_without_factory(self, method): method.side_effect = self.target_method job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'name': 'task', 'id': '321'}, key='my_job_key' ) self.scheduler.schedule(job) calls = db_api.get_delayed_calls_to_start(get_time_delay()) call = self._assert_single_item( calls, target_method_name=TARGET_METHOD_PATH, key='my_job_key' ) self.assertIn('name', call['method_arguments']) self.queue.get() method.assert_called_once_with(name='task', id='321') calls = db_api.get_delayed_calls_to_start(get_time_delay()) self.assertEqual(0, len(calls)) @mock.patch(TARGET_METHOD_PATH) def test_scheduler_call_target_method_with_correct_auth(self, method): method.side_effect = self.target_check_context_method default_context = base.get_context(default=True) auth_context.set_ctx(default_context) default_project_id = default_context.project_id job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'expected_project_id': default_project_id} ) self.scheduler.schedule(job) second_context = base.get_context(default=False) auth_context.set_ctx(second_context) second_project_id = second_context.project_id job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'expected_project_id': second_project_id} ) self.scheduler.schedule(job) self.assertNotEqual(default_project_id, second_project_id) for _ in range(2): self.assertTrue(self.queue.get()) @mock.patch(TARGET_METHOD_PATH) def test_scheduler_with_serializer(self, factory): target_method_name = 'run_something' factory.return_value = type( 'something', (object,), { target_method_name: mock.MagicMock(side_effect=self.target_method) } ) task_result = ml_actions.Result('data', 'error') method_args = { 'name': 'task', 'id': '123', 'result': task_result } serializers = { 'result': 'mistral.workflow.utils.ResultSerializer' } job = sched_base.SchedulerJob( run_after=DELAY, target_factory_func_name=TARGET_METHOD_PATH, func_name=target_method_name, func_args=method_args, func_arg_serializers=serializers ) self.scheduler.schedule(job) calls = db_api.get_delayed_calls_to_start(get_time_delay()) call = self._assert_single_item( calls, target_method_name=target_method_name ) self.assertIn('name', call['method_arguments']) self.queue.get() result = factory().run_something.call_args[1].get('result') self.assertIsInstance(result, ml_actions.Result) self.assertEqual('data', result.data) self.assertEqual('error', result.error) calls = db_api.get_delayed_calls_to_start(get_time_delay()) self.assertEqual(0, len(calls)) @mock.patch(TARGET_METHOD_PATH) def test_scheduler_multi_instance(self, method): method.side_effect = self.target_method second_scheduler = legacy_scheduler.LegacyScheduler(CONF.scheduler) second_scheduler.start() self.addCleanup(second_scheduler.stop, True) job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'name': 'task', 'id': '321'}, ) second_scheduler.schedule(job) calls = db_api.get_delayed_calls_to_start(get_time_delay()) self._assert_single_item(calls, target_method_name=TARGET_METHOD_PATH) self.queue.get() method.assert_called_once_with(name='task', id='321') calls = db_api.get_delayed_calls_to_start(get_time_delay()) self.assertEqual(0, len(calls)) @mock.patch(TARGET_METHOD_PATH) def test_scheduler_delete_calls(self, method): method.side_effect = self.target_method job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'name': 'task', 'id': '321'}, ) self.scheduler.schedule(job) calls = db_api.get_delayed_calls_to_start(get_time_delay()) self._assert_single_item(calls, target_method_name=TARGET_METHOD_PATH) self.queue.get() eventlet.sleep(0.1) self.assertRaises( exc.DBEntityNotFoundError, db_api.get_delayed_call, calls[0].id ) @mock.patch(TARGET_METHOD_PATH) def test_processing_true_does_not_return_in_get_delayed_calls_to_start( self, method): method.side_effect = self.target_method values = { 'factory_method_path': None, 'target_method_name': TARGET_METHOD_PATH, 'execution_time': get_time_delay(), 'auth_context': None, 'serializers': None, 'method_arguments': None, 'processing': True } call = db_api.create_delayed_call(values) calls = db_api.get_delayed_calls_to_start(get_time_delay(10)) self.assertEqual(0, len(calls)) db_api.delete_delayed_call(call.id) @mock.patch.object(db_api, 'update_delayed_call') def test_scheduler_doesnt_handle_calls_the_failed_on_update( self, update_delayed_call): def update_call_failed(id, values, query_filter): self.queue.put("item") return None, 0 update_delayed_call.side_effect = update_call_failed job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'name': 'task', 'id': '321'}, ) self.scheduler.schedule(job) calls = db_api.get_delayed_calls_to_start(get_time_delay()) self.queue.get() eventlet.sleep(1) update_delayed_call.assert_called_with( id=calls[0].id, values=mock.ANY, query_filter=mock.ANY ) # If the scheduler does handel calls that failed on update # DBEntityNotFoundException will raise. db_api.get_delayed_call(calls[0].id) db_api.delete_delayed_call(calls[0].id) def test_scheduler_with_custom_batch_size(self): self.scheduler.stop() number_delayed_calls = 5 processed_calls_at_time = [] real_delete_calls_method = \ legacy_scheduler.LegacyScheduler.delete_calls @staticmethod def delete_calls_counter(delayed_calls): real_delete_calls_method(delayed_calls) for _ in range(len(delayed_calls)): self.queue.put("item") processed_calls_at_time.append(len(delayed_calls)) legacy_scheduler.LegacyScheduler.delete_calls = delete_calls_counter # Create 5 delayed calls. for i in range(number_delayed_calls): job = sched_base.SchedulerJob( run_after=DELAY, func_name=TARGET_METHOD_PATH, func_args={'name': 'task', 'id': i}, ) self.scheduler.schedule(job) # Start scheduler which process 2 calls at a time. self.override_config('batch_size', 2, 'scheduler') self.scheduler = legacy_scheduler.LegacyScheduler(CONF.scheduler) self.scheduler.start() # Wait when all of calls will be processed for _ in range(number_delayed_calls): self.queue.get() self.assertListEqual([1, 2, 2], sorted(processed_calls_at_time)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_trigger_service.py0000644000175000017500000002453500000000000026337 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import eventlet import mock from oslo_config import cfg from mistral import exceptions as exc from mistral.rpc import clients as rpc from mistral.services import periodic from mistral.services import security from mistral.services import triggers as t_s from mistral.services import workflows from mistral.tests.unit import base from mistral_lib import utils # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKFLOW_LIST = """ --- version: '2.0' my_wf: type: direct tasks: task1: action: std.echo output='Hi!' """ advance_cron_trigger_orig = periodic.advance_cron_trigger def new_advance_cron_trigger(ct): """Wrap the original advance_cron_trigger method. This method makes sure that the other coroutines will also run while this thread is executing. Without explicitly passing control to another coroutine the process_cron_triggers_v2 will finish looping over all the cron triggers in one coroutine without any sharing at all. """ eventlet.sleep() modified = advance_cron_trigger_orig(ct) eventlet.sleep() return modified class TriggerServiceV2Test(base.DbTestCase): def setUp(self): super(TriggerServiceV2Test, self).setUp() self.wf = workflows.create_workflows(WORKFLOW_LIST)[0] def test_trigger_create(self): trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25) ) self.assertEqual( datetime.datetime(2010, 8, 25, 0, 5), trigger.next_execution_time ) next_time = t_s.get_next_execution_time( trigger['pattern'], trigger.next_execution_time ) self.assertEqual(datetime.datetime(2010, 8, 25, 0, 10), next_time) def test_trigger_create_with_wf_id(self): trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), None, {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25), workflow_id=self.wf.id ) self.assertEqual(self.wf.name, trigger.workflow_name) def test_trigger_create_the_same_first_time_or_count(self): t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 2, datetime.datetime(2010, 8, 25) ) t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 4, datetime.datetime(2010, 8, 25) ) t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "5353-12-25 13:37", 2, datetime.datetime(2010, 8, 25) ) # Creations above should be ok. # But creation with the same count and first time # simultaneously leads to error. self.assertRaises( exc.DBDuplicateEntryError, t_s.create_cron_trigger, 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/5 * * * *', "4242-12-25 13:37", 2, None ) def test_trigger_create_wrong_workflow_input(self): wf_with_input = """--- version: '2.0' some_wf: input: - some_var tasks: some_task: action: std.echo output=<% $.some_var %> """ workflows.create_workflows(wf_with_input) exception = self.assertRaises( exc.InputException, t_s.create_cron_trigger, 'trigger-%s' % utils.generate_unicode_uuid(), 'some_wf', {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25) ) self.assertIn('Invalid input', str(exception)) self.assertIn('some_wf', str(exception)) def test_oneshot_trigger_create(self): trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, None, "4242-12-25 13:37", None, datetime.datetime(2010, 8, 25) ) self.assertEqual( datetime.datetime(4242, 12, 25, 13, 37), trigger.next_execution_time ) @mock.patch.object(security, 'create_trust', type('trust', (object,), {'id': 'my_trust_id'})) def test_create_trust_in_trigger(self): cfg.CONF.set_default('auth_enable', True, group='pecan') self.addCleanup( cfg.CONF.set_default, 'auth_enable', False, group='pecan' ) trigger = t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '*/2 * * * *', None, None, datetime.datetime(2010, 8, 25) ) self.assertEqual('my_trust_id', trigger.trust_id) @mock.patch.object(security, 'create_trust', type('trust', (object,), {'id': 'my_trust_id'})) @mock.patch.object(security, 'create_context') @mock.patch.object(rpc.EngineClient, 'start_workflow', mock.Mock()) @mock.patch( 'mistral.services.periodic.advance_cron_trigger', mock.MagicMock(side_effect=new_advance_cron_trigger) ) @mock.patch.object(security, 'delete_trust') def test_create_delete_trust_in_trigger(self, delete_trust, create_ctx): create_ctx.return_value = self.ctx cfg.CONF.set_default('auth_enable', True, group='pecan') trigger_thread = periodic.setup() self.addCleanup(trigger_thread.stop) self.addCleanup( cfg.CONF.set_default, 'auth_enable', False, group='pecan' ) t_s.create_cron_trigger( 'trigger-%s' % utils.generate_unicode_uuid(), self.wf.name, {}, {}, '* * * * * *', None, 1, datetime.datetime(2010, 8, 25) ) eventlet.sleep(1) self.assertEqual(0, delete_trust.call_count) def test_get_trigger_in_correct_orders(self): t1_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t1_name, self.wf.name, {}, pattern='*/5 * * * *', start_time=datetime.datetime(2010, 8, 25) ) t2_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t2_name, self.wf.name, {}, pattern='*/1 * * * *', start_time=datetime.datetime(2010, 8, 22) ) t3_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t3_name, self.wf.name, {}, pattern='*/2 * * * *', start_time=datetime.datetime(2010, 9, 21) ) t4_name = 'trigger-%s' % utils.generate_unicode_uuid() t_s.create_cron_trigger( t4_name, self.wf.name, {}, pattern='*/3 * * * *', start_time=datetime.datetime.utcnow() + datetime.timedelta(0, 50) ) trigger_names = [t.name for t in t_s.get_next_cron_triggers()] self.assertEqual([t2_name, t1_name, t3_name], trigger_names) @mock.patch( 'mistral.services.periodic.advance_cron_trigger', mock.MagicMock(side_effect=new_advance_cron_trigger) ) @mock.patch.object(rpc.EngineClient, 'start_workflow') def test_single_execution_with_multiple_processes(self, start_wf_mock): def stop_thread_groups(): print('Killing cron trigger threads...') [tg.stop() for tg in self.trigger_threads] self.trigger_threads = [ periodic.setup(), periodic.setup(), periodic.setup() ] self.addCleanup(stop_thread_groups) trigger_count = 5 t_s.create_cron_trigger( 'ct1', self.wf.name, {}, {}, '* * * * * */1', # Every second None, trigger_count, datetime.datetime(2010, 8, 25) ) # Wait until there are 'trigger_count' executions. self._await( lambda: self._wait_for_single_execution_with_multiple_processes( trigger_count, start_wf_mock ) ) # Wait some more and make sure there are no more than 'trigger_count' # executions. eventlet.sleep(5) self.assertEqual(trigger_count, start_wf_mock.call_count) def _wait_for_single_execution_with_multiple_processes(self, trigger_count, start_wf_mock): eventlet.sleep(1) return trigger_count == start_wf_mock.call_count def test_get_next_execution_time(self): pattern = '*/20 * * * *' start_time = datetime.datetime(2016, 3, 22, 23, 40) result = t_s.get_next_execution_time(pattern, start_time) self.assertEqual(result, datetime.datetime(2016, 3, 23, 0, 0)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_workbook_service.py0000644000175000017500000002121700000000000026523 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.tests.unit import base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK = """ --- version: '2.0' name: my_wb tags: [test] actions: concat: base: std.echo base-input: output: "{$.str1}{$.str2}" workflows: wf1: #Sample Comment 1 type: reverse tags: [wf_test] input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" wf2: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf1 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" """ WORKBOOK_WF1_DEFINITION = """wf1: #Sample Comment 1 type: reverse tags: [wf_test] input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" """ WORKBOOK_WF2_DEFINITION = """wf2: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf1 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" """ UPDATED_WORKBOOK = """ --- version: '2.0' name: my_wb tags: [test] actions: concat: base: std.echo base-input: output: "{$.str1}{$.str2}" workflows: wf1: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf2 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" wf2: type: reverse input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" """ UPDATED_WORKBOOK_WF1_DEFINITION = """wf1: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf2 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" """ UPDATED_WORKBOOK_WF2_DEFINITION = """wf2: type: reverse input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" """ ACTION_DEFINITION = """concat: base: std.echo base-input: output: "{$.str1}{$.str2}" """ class WorkbookServiceTest(base.DbTestCase): def test_create_workbook(self): namespace = 'test_workbook_service_0123_namespace' wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace) self.assertIsNotNone(wb_db) self.assertEqual('my_wb', wb_db.name) self.assertEqual(namespace, wb_db.namespace) self.assertEqual(WORKBOOK, wb_db.definition) self.assertIsNotNone(wb_db.spec) self.assertListEqual(['test'], wb_db.tags) db_actions = db_api.get_action_definitions( name='my_wb.concat', namespace=namespace ) self.assertEqual(1, len(db_actions)) # Action. action_db = self._assert_single_item(db_actions, name='my_wb.concat') self.assertFalse(action_db.is_system) action_spec = spec_parser.get_action_spec(action_db.spec) self.assertEqual('concat', action_spec.get_name()) self.assertEqual('std.echo', action_spec.get_base()) self.assertEqual(ACTION_DEFINITION, action_db.definition) db_wfs = db_api.get_workflow_definitions() self.assertEqual(2, len(db_wfs)) # Workflow 1. wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1') wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec) self.assertEqual('wf1', wf1_spec.get_name()) self.assertEqual('reverse', wf1_spec.get_type()) self.assertListEqual(['wf_test'], wf1_spec.get_tags()) self.assertListEqual(['wf_test'], wf1_db.tags) self.assertEqual(namespace, wf1_db.namespace) self.assertEqual(WORKBOOK_WF1_DEFINITION, wf1_db.definition) # Workflow 2. wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2') wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec) self.assertEqual('wf2', wf2_spec.get_name()) self.assertEqual('direct', wf2_spec.get_type()) self.assertEqual(namespace, wf2_db.namespace) self.assertEqual(WORKBOOK_WF2_DEFINITION, wf2_db.definition) def test_create_same_workbook_in_different_namespaces(self): first_namespace = 'first_namespace' second_namespace = 'second_namespace' first_wb = wb_service.create_workbook_v2(WORKBOOK, namespace=first_namespace) self.assertIsNotNone(first_wb) self.assertEqual('my_wb', first_wb.name) self.assertEqual(first_namespace, first_wb.namespace) second_wb = wb_service.create_workbook_v2(WORKBOOK, namespace=second_namespace) self.assertIsNotNone(second_wb) self.assertEqual('my_wb', second_wb.name) self.assertEqual(second_namespace, second_wb.namespace) def test_create_workbook_with_default_namespace(self): wb_db = wb_service.create_workbook_v2(WORKBOOK) self.assertIsNotNone(wb_db) self.assertEqual('my_wb', wb_db.name) self.assertEqual('', wb_db.namespace) db_api.delete_workbook('my_wb') def test_update_workbook(self): namespace = 'test_workbook_service_0123_namespace' # Create workbook. wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace) self.assertIsNotNone(wb_db) self.assertEqual(2, len(db_api.get_workflow_definitions())) # Update workbook. wb_db = wb_service.update_workbook_v2( UPDATED_WORKBOOK, namespace=namespace ) self.assertIsNotNone(wb_db) self.assertEqual('my_wb', wb_db.name) self.assertEqual(namespace, wb_db.namespace) self.assertEqual(UPDATED_WORKBOOK, wb_db.definition) self.assertListEqual(['test'], wb_db.tags) db_wfs = db_api.get_workflow_definitions() self.assertEqual(2, len(db_wfs)) # Workflow 1. wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1') wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec) self.assertEqual('wf1', wf1_spec.get_name()) self.assertEqual('direct', wf1_spec.get_type()) self.assertEqual(namespace, wf1_db.namespace) self.assertEqual(UPDATED_WORKBOOK_WF1_DEFINITION, wf1_db.definition) # Workflow 2. wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2') wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec) self.assertEqual('wf2', wf2_spec.get_name()) self.assertEqual('reverse', wf2_spec.get_type()) self.assertEqual(namespace, wf2_db.namespace) self.assertEqual(UPDATED_WORKBOOK_WF2_DEFINITION, wf2_db.definition) def test_delete_workbook(self): namespace = 'pqr' # Create workbook. wb_service.create_workbook_v2(WORKBOOK, namespace=namespace) db_wfs = db_api.get_workflow_definitions() db_actions = db_api.get_action_definitions(name='my_wb.concat', namespace=namespace) self.assertEqual(2, len(db_wfs)) self.assertEqual(1, len(db_actions)) db_api.delete_workbook('my_wb', namespace=namespace) db_wfs = db_api.get_workflow_definitions() db_actions = db_api.get_action_definitions(name='my_wb.concat', namespace=namespace) # Deleting workbook shouldn't delete workflows and actions self.assertEqual(2, len(db_wfs)) self.assertEqual(1, len(db_actions)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/services/test_workflow_service.py0000644000175000017500000003003500000000000026536 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from oslo_config import cfg from mistral.db.v2.sqlalchemy import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.lang.v2 import tasks from mistral.lang.v2 import workflows from mistral.services import workflows as wf_service from mistral.tests.unit import base from mistral.workflow import states from mistral_lib import utils # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKFLOW_LIST = """ --- version: '2.0' wf1: tags: [test, v2] type: reverse input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" wf2: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf1 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" """ UPDATED_WORKFLOW_LIST = """ --- version: '2.0' wf1: type: reverse input: - param1 - param2 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}{$.param2}" publish: result: "{$}" """ WORKFLOW_WITH_VAR_TASK_NAME = """ --- version: '2.0' engine_command_{task_name}: tasks: {task_name}: action: nova.servers_list """ WORKFLOW = WORKFLOW_WITH_VAR_TASK_NAME.format(task_name='task1') INVALID_WORKFLOW = """ --- verstion: '2.0' wf: tasks: task1: action: std.echo output="Task 1" """ INVALID_WORKFLOW_1 = """ --- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 # The task "task2" doesn't exist. task3: action: std.noop """ WORKFLOW_WITH_LONG_TASK_NAME = """ --- version: '2.0' test_workflow: tasks: {long_task_name}: action: std.noop """ WORKFLOW_WITH_LONG_JOIN_TASK_NAME = """ --- version: '2.0' test_workflow: tasks: task1: on-success: - {long_task_name} {long_task_name}: join: all """ WORKFLOWS_WITH_KEY_ORDER = """ --- version: '2.0' wf1: tasks: task1: publish: we: 1 dont_want: 2 to_be_sorted: 3 wf2: tasks: task1: action: std.noop """ class WorkflowServiceTest(base.DbTestCase): def test_create_workflows(self): db_wfs = wf_service.create_workflows(WORKFLOW_LIST) self.assertEqual(2, len(db_wfs)) # Workflow 1. wf1_db = self._assert_single_item(db_wfs, name='wf1') wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec) self.assertEqual('wf1', wf1_spec.get_name()) self.assertListEqual(['test', 'v2'], wf1_spec.get_tags()) self.assertEqual('reverse', wf1_spec.get_type()) # Workflow 2. wf2_db = self._assert_single_item(db_wfs, name='wf2') wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec) self.assertEqual('wf2', wf2_spec.get_name()) self.assertEqual('direct', wf2_spec.get_type()) def test_preserve_key_ordering_in_workflow_definition(self): db_wfs = wf_service.create_workflows(WORKFLOWS_WITH_KEY_ORDER) self.assertEqual(2, len(db_wfs)) wf1_db = self._assert_single_item(db_wfs, name='wf1') wf1_def = wf1_db.definition published_values = wf1_def.splitlines()[-3:] wf1_publish = [ item.strip() for item in published_values ] self.assertEqual( ['we: 1', 'dont_want: 2', 'to_be_sorted: 3'], wf1_publish ) def test_engine_commands_are_valid_task_names(self): for name in workflows.ENGINE_COMMANDS: wf_text = WORKFLOW_WITH_VAR_TASK_NAME.format(task_name=name) wf_defs = wf_service.create_workflows(wf_text) self.assertIsNotNone(wf_defs) self.assertEqual(1, len(wf_defs)) def test_update_workflows(self): db_wfs = wf_service.create_workflows(WORKFLOW_LIST) self.assertEqual(2, len(db_wfs)) # Workflow 1. wf1_db = self._assert_single_item(db_wfs, name='wf1') wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec) self.assertEqual('wf1', wf1_spec.get_name()) self.assertEqual('reverse', wf1_spec.get_type()) self.assertIn('param1', wf1_spec.get_input()) self.assertIs( wf1_spec.get_input().get('param1'), utils.NotDefined ) db_wfs = wf_service.update_workflows(UPDATED_WORKFLOW_LIST) self.assertEqual(1, len(db_wfs)) wf1_db = self._assert_single_item(db_wfs, name='wf1') wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec) self.assertEqual('wf1', wf1_spec.get_name()) self.assertListEqual([], wf1_spec.get_tags()) self.assertEqual('reverse', wf1_spec.get_type()) self.assertIn('param1', wf1_spec.get_input()) self.assertIn('param2', wf1_spec.get_input()) self.assertIs( wf1_spec.get_input().get('param1'), utils.NotDefined ) self.assertIs( wf1_spec.get_input().get('param2'), utils.NotDefined ) def test_update_non_existing_workflow_failed(self): exception = self.assertRaises( exc.DBEntityNotFoundError, wf_service.update_workflows, WORKFLOW ) self.assertIn("Workflow not found", str(exception)) def test_invalid_workflow_list(self): exception = self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, INVALID_WORKFLOW ) self.assertIn("Invalid DSL", str(exception)) def test_update_workflow_execution_env(self): wf_exec_template = { 'spec': {}, 'start_params': {'task': 'my_task1'}, 'state': 'PAUSED', 'state_info': None, 'params': {'env': {'k1': 'abc'}}, 'created_at': None, 'updated_at': None, 'context': {'__env': {'k1': 'fee fi fo fum'}}, 'task_id': None, 'trust_id': None, 'description': None, 'output': None } states_permitted = [ states.IDLE, states.PAUSED, states.ERROR ] update_env = {'k1': 'foobar'} for state in states_permitted: wf_exec = copy.deepcopy(wf_exec_template) wf_exec['state'] = state with db_api.transaction(): created = db_api.create_workflow_execution(wf_exec) self.assertIsNone(created.updated_at) updated = wf_service.update_workflow_execution_env( created, update_env ) self.assertDictEqual(update_env, updated.params['env']) fetched = db_api.get_workflow_execution(created.id) self.assertEqual(updated, fetched) self.assertIsNotNone(fetched.updated_at) def test_update_workflow_execution_env_wrong_state(self): wf_exec_template = { 'spec': {}, 'start_params': {'task': 'my_task1'}, 'state': 'PAUSED', 'state_info': None, 'params': {'env': {'k1': 'abc'}}, 'created_at': None, 'updated_at': None, 'context': {'__env': {'k1': 'fee fi fo fum'}}, 'task_id': None, 'trust_id': None, 'description': None, 'output': None } states_not_permitted = [ states.RUNNING, states.RUNNING_DELAYED, states.SUCCESS, states.WAITING ] update_env = {'k1': 'foobar'} for state in states_not_permitted: wf_exec = copy.deepcopy(wf_exec_template) wf_exec['state'] = state with db_api.transaction(): created = db_api.create_workflow_execution(wf_exec) self.assertIsNone(created.updated_at) self.assertRaises( exc.NotAllowedException, wf_service.update_workflow_execution_env, created, update_env ) fetched = db_api.get_workflow_execution(created.id) self.assertDictEqual( wf_exec['params']['env'], fetched.params['env'] ) self.assertDictEqual( wf_exec['context']['__env'], fetched.context['__env'] ) def test_with_long_task_name(self): long_task_name = utils.generate_string(tasks.MAX_LENGTH_TASK_NAME + 1) workflow = WORKFLOW_WITH_LONG_TASK_NAME.format( long_task_name=long_task_name ) self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, workflow ) def test_upper_bound_length_task_name(self): long_task_name = utils.generate_string(tasks.MAX_LENGTH_TASK_NAME) wf_text = WORKFLOW_WITH_LONG_TASK_NAME.format( long_task_name=long_task_name ) wf_defs = wf_service.create_workflows(wf_text) self.assertIsNotNone(wf_defs) self.assertEqual(1, len(wf_defs)) def test_with_long_join_task_name(self): long_task_name = utils.generate_string( tasks.MAX_LENGTH_JOIN_TASK_NAME + 1 ) wf_text = WORKFLOW_WITH_LONG_JOIN_TASK_NAME.format( long_task_name=long_task_name ) self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, wf_text ) def test_upper_bound_length_join_task_name(self): long_task_name = utils.generate_string(tasks.MAX_LENGTH_JOIN_TASK_NAME) wf_text = WORKFLOW_WITH_LONG_JOIN_TASK_NAME.format( long_task_name=long_task_name ) wf_defs = wf_service.create_workflows(wf_text) self.assertIsNotNone(wf_defs) self.assertEqual(1, len(wf_defs)) def test_validation_mode_enabled_by_default(self): self.override_config('validation_mode', 'enabled', 'api') self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, INVALID_WORKFLOW_1 ) wf_defs = wf_service.create_workflows( INVALID_WORKFLOW_1, validate=False ) # The workflow is created but it will never succeed since it's broken. self.assertIsNotNone(wf_defs) self.assertEqual(1, len(wf_defs)) def test_validation_mode_always_enabled(self): self.override_config('validation_mode', 'mandatory', 'api') self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, INVALID_WORKFLOW_1 ) self.assertRaises( exc.InvalidModelException, wf_service.create_workflows, INVALID_WORKFLOW_1, validate=False ) def test_validation_mode_always_disabled(self): self.override_config('validation_mode', 'disabled', 'api') wf_defs = wf_service.create_workflows(INVALID_WORKFLOW_1) self.assertIsNotNone(wf_defs) self.assertEqual(1, len(wf_defs)) db_api.delete_workflow_definition(wf_defs[0].id) wf_service.create_workflows(INVALID_WORKFLOW_1, validate=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/test_command_dispatcher.py0000644000175000017500000000471700000000000025155 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.engine import dispatcher from mistral.tests.unit import base from mistral.workflow import commands def _print_commands(cmds): print("commands:") for cmd in cmds: if isinstance(cmd, commands.RunTask): print("%s, %s, %s" % (type(cmd), cmd.is_waiting(), cmd.unique_key)) else: print("%s" % type(cmd)) class CommandDispatcherTest(base.BaseTest): def test_rearrange_commands(self): no_wait = commands.RunTask(None, None, None, None) fail = commands.FailWorkflow(None, None, None, None) succeed = commands.SucceedWorkflow(None, None, None, None) wait1 = commands.RunTask(None, None, None, None) wait1.wait = True wait1.unique_key = 'wait1' wait2 = commands.RunTask(None, None, None, None) wait2.wait = True wait2.unique_key = 'wait2' wait3 = commands.RunTask(None, None, None, None) wait3.wait = True wait3.unique_key = 'wait3' # 'set state' command is the first, others must be ignored. initial = [fail, no_wait, wait1, wait3, wait2] expected = [fail] cmds = dispatcher._rearrange_commands(initial) self.assertEqual(expected, cmds) # 'set state' command is the last, tasks before it must be sorted. initial = [no_wait, wait2, wait1, wait3, succeed] expected = [no_wait, wait1, wait2, wait3, succeed] cmds = dispatcher._rearrange_commands(initial) self.assertEqual(expected, cmds) # 'set state' command is in the middle, tasks before it must be sorted # and the task after it must be ignored. initial = [wait3, wait2, no_wait, succeed, wait1] expected = [no_wait, wait2, wait3, succeed] cmds = dispatcher._rearrange_commands(initial) self.assertEqual(expected, cmds) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/test_context.py0000644000175000017500000000322100000000000023002 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral import context from mistral import exceptions from mistral.tests.unit.engine import base class ContextTest(base.EngineTestCase): def test_target_insecure(self): # Defaults to False if X-Target-Auth-Uri isn't passed. headers = context._extract_mistral_auth_params({ 'X-Target-Insecure': 'True', }) self.assertFalse(headers['insecure']) headers = { "X-Target-Auth-Uri": "uri", 'X-Target-Auth-Token': 'Token', } params = context._extract_mistral_auth_params(headers) self.assertFalse(params['insecure']) headers['X-Target-Insecure'] = 'True' params = context._extract_mistral_auth_params(headers) self.assertTrue(params['insecure']) headers['X-Target-Insecure'] = 'False' params = context._extract_mistral_auth_params(headers) self.assertFalse(params['insecure']) headers['X-Target-Insecure'] = 'S3cure' self.assertRaises( exceptions.MistralException, context._extract_mistral_auth_params, headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/test_coordination.py0000644000175000017500000001051000000000000024005 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg import six from mistral.service import coordination from mistral.tests.unit import base class ServiceCoordinatorTest(base.BaseTest): def test_start(self): cfg.CONF.set_default( 'backend_url', 'zake://', 'coordination' ) coordinator = coordination.ServiceCoordinator('fake_id') coordinator.start() self.assertTrue(coordinator.is_active()) def test_start_without_backend(self): cfg.CONF.set_default('backend_url', None, 'coordination') coordinator = coordination.ServiceCoordinator() coordinator.start() self.assertFalse(coordinator.is_active()) def test_stop_not_active(self): cfg.CONF.set_default('backend_url', None, 'coordination') coordinator = coordination.ServiceCoordinator() coordinator.start() coordinator.stop() self.assertFalse(coordinator.is_active()) def test_stop(self): cfg.CONF.set_default( 'backend_url', 'zake://', 'coordination' ) coordinator = coordination.ServiceCoordinator() coordinator.start() coordinator.stop() self.assertFalse(coordinator.is_active()) def test_join_group_not_active(self): cfg.CONF.set_default('backend_url', None, 'coordination') coordinator = coordination.ServiceCoordinator() coordinator.start() coordinator.join_group('fake_group') members = coordinator.get_members('fake_group') self.assertFalse(coordinator.is_active()) self.assertEqual(0, len(members)) def test_join_group_and_get_members(self): cfg.CONF.set_default( 'backend_url', 'zake://', 'coordination' ) coordinator = coordination.ServiceCoordinator(my_id='fake_id') coordinator.start() coordinator.join_group('fake_group') members = coordinator.get_members('fake_group') self.assertEqual(1, len(members)) self.assertItemsEqual((six.b('fake_id'),), members) def test_join_group_and_leave_group(self): cfg.CONF.set_default( 'backend_url', 'zake://', 'coordination' ) coordinator = coordination.ServiceCoordinator(my_id='fake_id') coordinator.start() coordinator.join_group('fake_group') members_before = coordinator.get_members('fake_group') coordinator.leave_group('fake_group') members_after = coordinator.get_members('fake_group') self.assertEqual(1, len(members_before)) self.assertEqual(set([six.b('fake_id')]), members_before) self.assertEqual(0, len(members_after)) self.assertEqual(set([]), members_after) class ServiceTest(base.BaseTest): def setUp(self): super(ServiceTest, self).setUp() # Re-initialize the global service coordinator object, in order to use # new coordination configuration. coordination.cleanup_service_coordinator() @mock.patch('mistral_lib.utils.get_process_identifier', return_value='fake_id') def test_register_membership(self, mock_get_identifier): cfg.CONF.set_default('backend_url', 'zake://', 'coordination') srv = coordination.Service('fake_group') srv.register_membership() self.addCleanup(srv.stop) srv_coordinator = coordination.get_service_coordinator() self.assertIsNotNone(srv_coordinator) self.assertTrue(srv_coordinator.is_active()) members = srv_coordinator.get_members('fake_group') mock_get_identifier.assert_called_once_with() self.assertEqual(set([six.b('fake_id')]), members) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/test_exception_base.py0000644000175000017500000000463200000000000024315 0ustar00coreycorey00000000000000# Copyright 2014 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from mistral import exceptions from mistral.tests.unit import base from mistral_lib.utils import inspect_utils class ExceptionTest(base.BaseTest): """Test cases for exception code.""" def test_nf_with_message(self): exc = exceptions.DBEntityNotFoundError('check_for_this') self.assertIn('check_for_this', six.text_type(exc)) self.assertEqual(404, exc.http_code) def test_nf_with_no_message(self): exc = exceptions.DBEntityNotFoundError() self.assertIn("Object not found", six.text_type(exc)) self.assertEqual(404, exc.http_code,) def test_duplicate_obj_code(self): exc = exceptions.DBDuplicateEntryError() self.assertIn("Database object already exists", six.text_type(exc)) self.assertEqual(409, exc.http_code,) def test_default_code(self): exc = exceptions.EngineException() self.assertEqual(500, exc.http_code) def test_default_message(self): exc = exceptions.EngineException() self.assertIn("An unknown exception occurred", six.text_type(exc)) def test_one_param_initializer(self): # NOTE: this test is needed because at some places in the code we # have to assume that every class derived from MistralException # has an initializer with one "message" parameter. # Let's traverse the MistralException class hierarchy recursively # and check if it has the required initializer. base_classes = [exceptions.MistralException] for base_class in base_classes: for subclass in base_class.__subclasses__(): arg_list = inspect_utils.get_arg_list(subclass.__init__) self.assertEqual(1, len(arg_list)) self.assertEqual('message', arg_list[0]) base_classes.extend(subclass.__subclasses__()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/test_expressions.py0000644000175000017500000001305500000000000023706 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral import exceptions as exc from mistral import expressions as expr from mistral.tests.unit import base DATA = { "server": { "id": "03ea824a-aa24-4105-9131-66c48ae54acf", "name": "cloud-fedora", "status": "ACTIVE" }, "status": "OK" } SERVERS = { "servers": [ {'name': 'centos'}, {'name': 'ubuntu'}, {'name': 'fedora'} ] } class ExpressionsTest(base.BaseTest): def test_evaluate_complex_expressions(self): data = { 'a': 1, 'b': 2, 'c': 3, 'd': True, 'e': False, 'f': 10.1, 'g': 10, 'h': [1, 2, 3, 4, 5], 'i': 'We are OpenStack!', 'j': 'World', 'k': 'Mistral', 'l': 'awesome', 'm': 'the way we roll' } test_cases = [ ('<% $.a + $.b * $.c %>', 7), ('<%($.a + $.b) * $.c %>', 9), ('<% $.d and $.e %>', False), ('<% $.f > $.g %>', True), ('<% $.h.len() >= 5 %>', True), ('<% $.h.len() >= $.b + $.c %>', True), ('<% 100 in $.h %>', False), ('<% $.a in $.h%>', True), ('<% ''OpenStack'' in $.i %>', True), ('Hello, <% $.j %>!', 'Hello, World!'), ('<% $.k %> is <% $.l %>!', 'Mistral is awesome!'), ('This is <% $.m %>.', 'This is the way we roll.'), ('<% 1 + 1 = 3 %>', False) ] for expression, expected in test_cases: actual = expr.evaluate_recursively(expression, data) self.assertEqual(expected, actual) def test_evaluate_recursively(self): task_spec_dict = { 'parameters': { 'p1': 'My string', 'p2': '<% $.param2 %>', 'p3': '' }, 'publish': { 'new_key11': 'new_key1' } } modified_task = expr.evaluate_recursively( task_spec_dict, {'param2': 'val32'} ) self.assertDictEqual( { 'parameters': { 'p1': 'My string', 'p2': 'val32', 'p3': '' }, 'publish': { 'new_key11': 'new_key1' } }, modified_task ) def test_evaluate_recursively_arbitrary_dict(self): context = { "auth_token": "123", "project_id": "mistral" } data = { "parameters": { "parameter1": { "name1": "<% $.auth_token %>", "name2": "val_name2" }, "param2": [ "var1", "var2", "/servers/<% $.project_id %>/bla" ] }, "token": "<% $.auth_token %>" } applied = expr.evaluate_recursively(data, context) self.assertDictEqual( { "parameters": { "parameter1": { "name1": "123", "name2": "val_name2" }, "param2": ["var1", "var2", "/servers/mistral/bla"] }, "token": "123" }, applied ) def test_evaluate_recursively_environment(self): environment = { 'host': 'vm1234.example.com', 'db': 'test', 'timeout': 600, 'verbose': True, '__actions': { 'std.sql': { 'conn': 'mysql://admin:secret@<% env().host %>' '/<% env().db %>' } } } context = { '__env': environment } defaults = context['__env']['__actions']['std.sql'] applied = expr.evaluate_recursively(defaults, context) expected = 'mysql://admin:secret@vm1234.example.com/test' self.assertEqual(expected, applied['conn']) def test_validate_jinja_with_yaql_context(self): self.assertRaises(exc.JinjaGrammarException, expr.validate, '{{ $ }}') def test_validate_mixing_jinja_and_yaql(self): self.assertRaises(exc.ExpressionGrammarException, expr.validate, '<% $.a %>{{ _.a }}') self.assertRaises(exc.ExpressionGrammarException, expr.validate, '{{ _.a }}<% $.a %>') def test_evaluate_mixing_jinja_and_yaql(self): actual = expr.evaluate('<% $.a %>{{ _.a }}', {'a': 'b'}) self.assertEqual('<% $.a %>b', actual) actual = expr.evaluate('{{ _.a }}<% $.a %>', {'a': 'b'}) self.assertEqual('b<% $.a %>', actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/test_launcher.py0000644000175000017500000000552300000000000023126 0ustar00coreycorey00000000000000# Copyright 2017 - Brocade Communications Systems, Inc. # Copyright 2018 - Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet from mistral.api import service as api_service from mistral.cmd import launch from mistral.scheduler import base as sched_base from mistral.tests.unit import base class ServiceLauncherTest(base.DbTestCase): def setUp(self): super(ServiceLauncherTest, self).setUp() self.override_config('enabled', False, group='cron_trigger') launch.reset_server_managers() sched_base.destroy_system_scheduler() def test_launch_all(self): eventlet.spawn(launch.launch_any, launch.LAUNCH_OPTIONS.keys()) for i in range(0, 50): svr_proc_mgr = launch.get_server_process_manager() svr_thrd_mgr = launch.get_server_thread_manager() if svr_proc_mgr and svr_thrd_mgr: break eventlet.sleep(0.1) self.assertIsNotNone(svr_proc_mgr) self.assertIsNotNone(svr_thrd_mgr) api_server = api_service.WSGIService('mistral_api') api_workers = api_server.workers self._await(lambda: len(svr_proc_mgr.children.keys()) == api_workers) self._await(lambda: len(svr_thrd_mgr.services.services) == 4) def test_launch_process(self): eventlet.spawn(launch.launch_any, ['api']) for i in range(0, 50): svr_proc_mgr = launch.get_server_process_manager() if svr_proc_mgr: break eventlet.sleep(0.1) svr_thrd_mgr = launch.get_server_thread_manager() self.assertIsNotNone(svr_proc_mgr) self.assertIsNone(svr_thrd_mgr) api_server = api_service.WSGIService('mistral_api') api_workers = api_server.workers self._await(lambda: len(svr_proc_mgr.children.keys()) == api_workers) def test_launch_thread(self): eventlet.spawn(launch.launch_any, ['engine']) for i in range(0, 50): svr_thrd_mgr = launch.get_server_thread_manager() if svr_thrd_mgr: break eventlet.sleep(0.1) svr_proc_mgr = launch.get_server_process_manager() self.assertIsNone(svr_proc_mgr) self.assertIsNotNone(svr_thrd_mgr) self._await(lambda: len(svr_thrd_mgr.services.services) == 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/test_version.py0000644000175000017500000000140400000000000023004 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.tests.unit import base from mistral.version import version_string class VersionStringTest(base.BaseTest): def test_version(self): self.assertIsInstance(version_string, str) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.153568 mistral-10.0.0.0b3/mistral/tests/unit/utils/0000755000175000017500000000000000000000000021047 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/utils/__init__.py0000644000175000017500000000000000000000000023146 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/utils/test_filter_utils.py0000644000175000017500000000255100000000000025170 0ustar00coreycorey00000000000000# Copyright 2018 - Nokia, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.tests.unit import base from mistral.utils import filter_utils class FilterUtilsTest(base.BaseTest): def test_create_filters_with_nones(self): expected_filters = { 'key2': {'eq': 'value2'}, 'key1': {'eq': None} } filters = filter_utils.create_filters_from_request_params( none_values=['key1'], key1=None, key2='value2', key3=None, ) self.assertEqual(expected_filters, filters) del expected_filters['key1'] filters = filter_utils.create_filters_from_request_params( none_values=[], key1=None, key2='value2', key3=None, ) self.assertEqual(expected_filters, filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/utils/test_rest_utils.py0000644000175000017500000000177000000000000024662 0ustar00coreycorey00000000000000# Copyright 2018 - Nokia, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from wsme import exc as wsme_exc from mistral.tests.unit import base from mistral.utils import rest_utils class RestUtilsTest(base.BaseTest): def test_validate_fields(self): rest_utils.validate_fields(["a", "b"], ["a", "b", "c"]) e = self.assertRaises(wsme_exc.ClientSideError, rest_utils.validate_fields, ["d"], ["a"]) self.assertIn("[d]", str(e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/utils/test_safeLoader.py0000644000175000017500000000472600000000000024536 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import TestCase from mistral.utils import safe_yaml class TestSafeLoader(TestCase): def test_safe_load(self): yaml_text = """ version: '2.0' wf1: type: direct input: - a: &a ["lol","lol","lol","lol","lol"] - b: &b [*a,*a,*a,*a,*a,*a,*a,*a,*a] - c: &c [*b,*b,*b,*b,*b,*b,*b,*b,*b] - d: &d [*c,*c,*c,*c,*c,*c,*c,*c,*c] - e: &e [*d,*d,*d,*d,*d,*d,*d,*d,*d] - f: &f [*e,*e,*e,*e,*e,*e,*e,*e,*e] - g: &g [*f,*f,*f,*f,*f,*f,*f,*f,*f] - h: &h [*g,*g,*g,*g,*g,*g,*g,*g,*g] - i: &i [*h,*h,*h,*h,*h,*h,*h,*h,*h] tasks: hello: action: std.echo output="Hello" wait-before: 1 publish: result: <% task(hello).result %> """ result = { 'version': '2.0', 'wf1': {'type': 'direct', 'input': [ {'a': '&a ["lol","lol","lol","lol","lol"]'}, {'b': '&b [*a,*a,*a,*a,*a,*a,*a,*a,*a]'}, {'c': '&c [*b,*b,*b,*b,*b,*b,*b,*b,*b]'}, {'d': '&d [*c,*c,*c,*c,*c,*c,*c,*c,*c]'}, {'e': '&e [*d,*d,*d,*d,*d,*d,*d,*d,*d]'}, {'f': '&f [*e,*e,*e,*e,*e,*e,*e,*e,*e]'}, {'g': '&g [*f,*f,*f,*f,*f,*f,*f,*f,*f]'}, {'h': '&h [*g,*g,*g,*g,*g,*g,*g,*g,*g]'}, {'i': '&i [*h,*h,*h,*h,*h,*h,*h,*h,*h]'}], 'tasks': {'hello': { 'action': 'std.echo output="Hello"', 'wait-before': 1, 'publish': {'result': '<% task(hello).result %>'} }} } } self.assertEqual(result, safe_yaml.load(yaml_text)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/utils/test_utils.py0000644000175000017500000000314200000000000023620 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2015 - Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral import exceptions as exc from mistral.tests.unit import base from mistral.utils import ssh_utils from mistral_lib import utils class UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object): pass class B(A): pass class C(A): pass class D(C): pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, "../dir" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, "..\\dir" ) self.assertIsNone( ssh_utils._to_paramiko_private_key(private_key_filename=None, password='pass') ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.153568 mistral-10.0.0.0b3/mistral/tests/unit/workflow/0000755000175000017500000000000000000000000021561 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/workflow/__init__.py0000644000175000017500000000000000000000000023660 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/workflow/test_direct_workflow.py0000644000175000017500000001167700000000000026412 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.services import workflows as wf_service from mistral.tests.unit import base from mistral.workflow import direct_workflow as d_wf from mistral.workflow import states class DirectWorkflowControllerTest(base.DbTestCase): def _prepare_test(self, wf_text): wfs = wf_service.create_workflows(wf_text) wf_spec = spec_parser.get_workflow_spec_by_definition_id( wfs[0].id, wfs[0].updated_at ) wf_ex = models.WorkflowExecution( id='1-2-3-4', spec=wf_spec.to_dict(), state=states.RUNNING, workflow_id=wfs[0].id, input={}, params={}, context={} ) self.wf_ex = wf_ex self.wf_spec = wf_spec return wf_ex def _create_task_execution(self, name, state): tasks_spec = self.wf_spec.get_tasks() task_ex = models.TaskExecution( id=self.getUniqueString('id'), name=name, spec=tasks_spec[name].to_dict(), state=state ) self.wf_ex.task_executions.append(task_ex) return task_ex @mock.patch.object(db_api, 'get_workflow_execution') @mock.patch.object(db_api, 'get_task_execution') def test_continue_workflow(self, get_task_execution, get_workflow_execution): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hey" publish: res1: <% $.task1 %> on-complete: - task2: <% $.res1 = 'Hey' %> - task3: <% $.res1 = 'Not Hey' %> task2: action: std.echo output="Hi" task3: action: std.echo output="Hoy" """ wf_ex = self._prepare_test(wf_text) get_workflow_execution.return_value = wf_ex wf_ctrl = d_wf.DirectWorkflowController(wf_ex) # Workflow execution is in initial step. No running tasks. cmds = wf_ctrl.continue_workflow() self.assertEqual(1, len(cmds)) cmd = cmds[0] self.assertIs(wf_ctrl.wf_ex, cmd.wf_ex) self.assertIsNotNone(cmd.task_spec) self.assertEqual('task1', cmd.task_spec.get_name()) self.assertEqual(states.RUNNING, self.wf_ex.state) # Assume that 'task1' completed successfully. task1_ex = self._create_task_execution('task1', states.SUCCESS) task1_ex.published = {'res1': 'Hey'} get_task_execution.return_value = task1_ex task1_ex.action_executions.append( models.ActionExecution( name='std.echo', workflow_name='wf', state=states.SUCCESS, output={'result': 'Hey'}, accepted=True, runtime_context={'index': 0} ) ) cmds = wf_ctrl.continue_workflow() task1_ex.processed = True self.assertEqual(1, len(cmds)) self.assertEqual('task2', cmds[0].task_spec.get_name()) self.assertEqual(states.RUNNING, self.wf_ex.state) self.assertEqual(states.SUCCESS, task1_ex.state) # Now assume that 'task2' completed successfully. task2_ex = self._create_task_execution('task2', states.SUCCESS) task2_ex.action_executions.append( models.ActionExecution( name='std.echo', workflow_name='wf', state=states.SUCCESS, output={'result': 'Hi'}, accepted=True ) ) cmds = wf_ctrl.continue_workflow() task2_ex.processed = True self.assertEqual(0, len(cmds)) def test_continue_workflow_no_start_tasks(self): wf_text = """--- version: '2.0' wf: description: > Invalid workflow that doesn't have start tasks (tasks with no inbound connections). type: direct tasks: task1: on-complete: task2 task2: on-complete: task1 """ self.assertRaises(exc.DSLParsingException, self._prepare_test, wf_text) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/workflow/test_reverse_workflow.py0000644000175000017500000001103300000000000026575 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.db.v2 import api as db_api from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.tests.unit import base from mistral.workflow import reverse_workflow as reverse_wf from mistral.workflow import states # TODO(rakhmerov): This workflow is too simple. Add more complicated one. WB = """ --- version: '2.0' name: my_wb workflows: wf: type: reverse tasks: task1: action: std.echo output="Hey" task2: action: std.echo output="Hi!" requires: [task1] """ class ReverseWorkflowControllerTest(base.DbTestCase): def setUp(self): super(ReverseWorkflowControllerTest, self).setUp() wb_service.create_workbook_v2(WB) self.wb_spec = spec_parser.get_workbook_spec_from_yaml(WB) def _create_workflow_execution(self, params): wf_def = db_api.get_workflow_definitions()[0] self.wf_ex = db_api.create_workflow_execution({ 'id': '1-2-3-4', 'spec': self.wb_spec.get_workflows().get('wf').to_dict(), 'state': states.RUNNING, 'params': params, 'workflow_id': wf_def.id }) def _create_task_execution(self, name, state): tasks_spec = self.wb_spec.get_workflows()['wf'].get_tasks() return db_api.create_task_execution({ 'name': name, 'spec': tasks_spec[name].to_dict(), 'state': state, 'workflow_execution_id': self.wf_ex.id }) def test_start_workflow_task2(self): with db_api.transaction(): self._create_workflow_execution({'task_name': 'task2'}) wf_ctrl = reverse_wf.ReverseWorkflowController(self.wf_ex) cmds = wf_ctrl.continue_workflow() self.assertEqual(1, len(cmds)) self.assertEqual('task1', cmds[0].task_spec.get_name()) def test_start_workflow_task1(self): with db_api.transaction(): self._create_workflow_execution({'task_name': 'task1'}) wf_ctrl = reverse_wf.ReverseWorkflowController(self.wf_ex) cmds = wf_ctrl.continue_workflow() self.assertEqual(1, len(cmds)) self.assertEqual('task1', cmds[0].task_spec.get_name()) def test_start_workflow_without_task(self): with db_api.transaction(): self._create_workflow_execution({}) wf_ctrl = reverse_wf.ReverseWorkflowController(self.wf_ex) self.assertRaises(exc.WorkflowException, wf_ctrl.continue_workflow) def test_continue_workflow(self): with db_api.transaction(): self._create_workflow_execution({'task_name': 'task2'}) wf_ctrl = reverse_wf.ReverseWorkflowController(self.wf_ex) # Assume task1 completed. task1_ex = self._create_task_execution('task1', states.SUCCESS) task1_ex.executions.append( models.ActionExecution( name='std.echo', workflow_name='wf', state=states.SUCCESS, output={'result': 'Hey'}, accepted=True ) ) cmds = wf_ctrl.continue_workflow() task1_ex.processed = True self.assertEqual(1, len(cmds)) self.assertEqual('task2', cmds[0].task_spec.get_name()) # Now assume task2 completed. task2_ex = self._create_task_execution('task2', states.SUCCESS) task2_ex.executions.append( models.ActionExecution( name='std.echo', workflow_name='wf', state=states.SUCCESS, output={'result': 'Hi!'}, accepted=True ) ) cmds = wf_ctrl.continue_workflow() task1_ex.processed = True self.assertEqual(0, len(cmds)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/workflow/test_states.py0000644000175000017500000000747100000000000024506 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.tests.unit import base from mistral.workflow import states as s class StatesModuleTest(base.BaseTest): def test_is_valid_transition(self): # From IDLE self.assertTrue(s.is_valid_transition(s.IDLE, s.IDLE)) self.assertTrue(s.is_valid_transition(s.IDLE, s.RUNNING)) self.assertTrue(s.is_valid_transition(s.IDLE, s.ERROR)) self.assertFalse(s.is_valid_transition(s.IDLE, s.PAUSED)) self.assertFalse(s.is_valid_transition(s.IDLE, s.RUNNING_DELAYED)) self.assertFalse(s.is_valid_transition(s.IDLE, s.SUCCESS)) # From RUNNING self.assertTrue(s.is_valid_transition(s.RUNNING, s.RUNNING)) self.assertTrue(s.is_valid_transition(s.RUNNING, s.ERROR)) self.assertTrue(s.is_valid_transition(s.RUNNING, s.PAUSED)) self.assertTrue(s.is_valid_transition(s.RUNNING, s.RUNNING_DELAYED)) self.assertTrue(s.is_valid_transition(s.RUNNING, s.SUCCESS)) self.assertFalse(s.is_valid_transition(s.RUNNING, s.IDLE)) # From PAUSED self.assertTrue(s.is_valid_transition(s.PAUSED, s.PAUSED)) self.assertTrue(s.is_valid_transition(s.PAUSED, s.RUNNING)) self.assertTrue(s.is_valid_transition(s.PAUSED, s.ERROR)) self.assertFalse(s.is_valid_transition(s.PAUSED, s.RUNNING_DELAYED)) self.assertFalse(s.is_valid_transition(s.PAUSED, s.SUCCESS)) self.assertFalse(s.is_valid_transition(s.PAUSED, s.IDLE)) # From DELAYED self.assertTrue( s.is_valid_transition(s.RUNNING_DELAYED, s.RUNNING_DELAYED) ) self.assertTrue(s.is_valid_transition(s.RUNNING_DELAYED, s.RUNNING)) self.assertTrue(s.is_valid_transition(s.RUNNING_DELAYED, s.ERROR)) self.assertFalse(s.is_valid_transition(s.RUNNING_DELAYED, s.PAUSED)) self.assertFalse(s.is_valid_transition(s.RUNNING_DELAYED, s.SUCCESS)) self.assertFalse(s.is_valid_transition(s.RUNNING_DELAYED, s.IDLE)) # From SUCCESS self.assertTrue(s.is_valid_transition(s.SUCCESS, s.SUCCESS)) self.assertFalse(s.is_valid_transition(s.SUCCESS, s.RUNNING)) self.assertFalse(s.is_valid_transition(s.SUCCESS, s.ERROR)) self.assertFalse(s.is_valid_transition(s.SUCCESS, s.PAUSED)) self.assertFalse(s.is_valid_transition(s.SUCCESS, s.RUNNING_DELAYED)) self.assertFalse(s.is_valid_transition(s.SUCCESS, s.IDLE)) # From ERROR self.assertTrue(s.is_valid_transition(s.ERROR, s.ERROR)) self.assertTrue(s.is_valid_transition(s.ERROR, s.RUNNING)) self.assertFalse(s.is_valid_transition(s.ERROR, s.PAUSED)) self.assertFalse(s.is_valid_transition(s.ERROR, s.RUNNING_DELAYED)) self.assertFalse(s.is_valid_transition(s.ERROR, s.SUCCESS)) self.assertFalse(s.is_valid_transition(s.ERROR, s.IDLE)) # From WAITING self.assertTrue(s.is_valid_transition(s.WAITING, s.RUNNING)) self.assertFalse(s.is_valid_transition(s.WAITING, s.SUCCESS)) self.assertFalse(s.is_valid_transition(s.WAITING, s.PAUSED)) self.assertFalse(s.is_valid_transition(s.WAITING, s.RUNNING_DELAYED)) self.assertFalse(s.is_valid_transition(s.WAITING, s.IDLE)) self.assertFalse(s.is_valid_transition(s.WAITING, s.ERROR)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/tests/unit/workflow/test_workflow_base.py0000644000175000017500000000411700000000000026041 0ustar00coreycorey00000000000000# Copyright 2015 - Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.lang import parser as spec_parser from mistral.lang.v2 import workflows from mistral.tests.unit import base from mistral.workflow import base as wf_base from mistral.workflow import commands from mistral.workflow import direct_workflow as direct_wf from mistral.workflow import reverse_workflow as reverse_wf from mistral.db.v2.sqlalchemy import models as db_models DIRECT_WF = """ --- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hey" """ REVERSE_WF = """ --- version: '2.0' wf: type: reverse tasks: task1: action: std.echo output="Hey" """ class WorkflowControllerTest(base.BaseTest): def test_get_controller_direct(self): wf_spec = spec_parser.get_workflow_list_spec_from_yaml(DIRECT_WF)[0] wf_ex = db_models.WorkflowExecution(spec=wf_spec.to_dict()) self.assertIsInstance( wf_base.get_controller(wf_ex, wf_spec), direct_wf.DirectWorkflowController ) def test_get_controller_reverse(self): wf_spec = spec_parser.get_workflow_list_spec_from_yaml(REVERSE_WF)[0] wf_ex = db_models.WorkflowExecution(spec=wf_spec.to_dict()) self.assertIsInstance( wf_base.get_controller(wf_ex, wf_spec), reverse_wf.ReverseWorkflowController ) def test_all_engine_commands_have_implementation(self): for command in workflows.ENGINE_COMMANDS: self.assertIsNotNone(commands.get_command_class(command)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/mistral/utils/0000755000175000017500000000000000000000000016726 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/__init__.py0000644000175000017500000001070300000000000021040 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - Huawei Technologies Co. Ltd # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import inspect import os import shutil import tempfile import threading from oslo_concurrency import processutils from oslo_serialization import jsonutils from mistral import exceptions as exc # Thread local storage. _th_loc_storage = threading.local() # TODO(rakhmerov): these two constants are misplaced. Utility methods # should not be Mistral specific. They should be generic enough so to # be moved to any other project w/o changes. ACTION_TASK_TYPE = 'ACTION' WORKFLOW_TASK_TYPE = 'WORKFLOW' @contextlib.contextmanager def tempdir(**kwargs): argdict = kwargs.copy() if 'dir' not in argdict: argdict['dir'] = '/tmp/' tmpdir = tempfile.mkdtemp(**argdict) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: raise exc.DataAccessException( "Failed to delete temp dir %(dir)s (reason: %(reason)s)" % {'dir': tmpdir, 'reason': e} ) def save_text_to(text, file_path, overwrite=False): if os.path.exists(file_path) and not overwrite: raise exc.DataAccessException( "Cannot save data to file. File %s already exists." ) with open(file_path, 'w') as f: f.write(text) def generate_key_pair(key_length=2048): """Create RSA key pair with specified number of bits in key. Returns tuple of private and public keys. """ with tempdir() as tmpdir: keyfile = os.path.join(tmpdir, 'tempkey') args = [ 'ssh-keygen', '-q', # quiet '-N', '', # w/o passphrase '-t', 'rsa', # create key of rsa type '-f', keyfile, # filename of the key file '-C', 'Generated-by-Mistral' # key comment ] if key_length is not None: args.extend(['-b', key_length]) processutils.execute(*args) if not os.path.exists(keyfile): raise exc.DataAccessException( "Private key file hasn't been created" ) private_key = open(keyfile).read() public_key_path = keyfile + '.pub' if not os.path.exists(public_key_path): raise exc.DataAccessException( "Public key file hasn't been created" ) public_key = open(public_key_path).read() return private_key, public_key def to_json_str(obj): """Serializes an object into a JSON string. :param obj: Object to serialize. :return: JSON string. """ if obj is None: return None def _fallback(value): if inspect.isgenerator(value): result = list(value) # The result of the generator call may be again not primitive # so we need to call "to_primitive" again with the same fallback # function. Note that the endless recursion here is not a problem # because "to_primitive" limits the depth for custom classes, # if they are present in the object graph being traversed. return jsonutils.to_primitive( result, convert_instances=True, fallback=_fallback ) return value # We need to convert the root of the given object graph into # a primitive by hand so that we also enable conversion of # object of custom classes into primitives. Otherwise, they are # ignored by the "json" lib. return jsonutils.dumps( jsonutils.to_primitive(obj, convert_instances=True, fallback=_fallback) ) def from_json_str(json_str): """Reconstructs an object from a JSON string. :param json_str: A JSON string. :return: Deserialized object. """ if json_str is None: return None return jsonutils.loads(json_str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/filter_utils.py0000644000175000017500000000604500000000000022012 0ustar00coreycorey00000000000000# Copyright 2016 NEC Corporation. All rights reserved. # Copyright 2019 Nokia Software. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six EQUALS = 'eq' NOT_EQUAL = 'neq' LESS_THAN = 'lt' LESS_THAN_EQUALS = 'lte' GREATER_THAN = 'gt' GREATER_THAN_EQUALS = 'gte' IN = 'in' NOT_IN = 'nin' HAS = 'has' ALL = (GREATER_THAN_EQUALS, GREATER_THAN, LESS_THAN_EQUALS, HAS, NOT_EQUAL, LESS_THAN, IN, EQUALS, NOT_IN) def create_filters_from_request_params(none_values=None, **params): """Create filters from REST request parameters. :param none_values: field names, where the value is required to be None. :param req_params: REST request parameters. :return: filters dictionary. """ none_values = none_values or [] filters = {} for column, data in params.items(): if (data is None and column in none_values) or data is not None: if isinstance(data, six.string_types): f_type, value = extract_filter_type_and_value(data) create_or_update_filter(column, value, f_type, filters) else: create_or_update_filter(column, data, _filter=filters) return filters def create_or_update_filter(column, value, filter_type='eq', _filter=None): """Create or Update filter. :param column: Column name by which user want to filter. :param value: Column value. :param filter_type: filter type. Filter type can be 'eq', 'neq', 'gt', 'gte', 'lte', 'in', 'lt', 'nin'. Default is 'eq'. :param _filter: Optional. If provided same filter dictionary will be updated. :return: filter dictionary. """ if _filter is None: _filter = {} _filter[column] = {filter_type: value} return _filter def extract_filter_type_and_value(data): """Extract filter type and its value from the data. :param data: REST parameter value from which filter type and value can be get. It should be in format of 'filter_type:value'. :return: filter type and value. """ if has_filters(data): filter_type, value = data.split(':', 1) value = six.text_type(value) if data.startswith((IN, NOT_IN)): value = list(value.split(",")) else: value = data filter_type = EQUALS return filter_type, value def has_filters(value): for filter_type in ALL: if value.startswith(filter_type + ':'): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/javascript.py0000644000175000017500000000637400000000000021460 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2020 - Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from mistral import config as cfg from mistral import exceptions as exc from mistral import utils from oslo_utils import importutils from stevedore import driver from stevedore import extension _PYV8 = importutils.try_import('PyV8') _V8EVAL = importutils.try_import('v8eval') _PY_MINI_RACER = importutils.try_import('py_mini_racer.py_mini_racer') _EVALUATOR = None class JSEvaluator(object): @classmethod @abc.abstractmethod def evaluate(cls, script, context): """Executes given JavaScript. :param script: The text of JavaScript snippet that needs to be executed. :param context: This object will be assigned to the $ javascript variable. :return result of evaluated javascript code. :raise MistralException: if corresponding js library is not installed. """ pass class PyV8Evaluator(JSEvaluator): @classmethod def evaluate(cls, script, ctx): if not _PYV8: raise exc.MistralException( "PyV8 module is not available. Please install PyV8." ) with _PYV8.JSContext() as js_ctx: # Prepare data context and way for interaction with it. js_ctx.eval('$ = %s' % utils.to_json_str(ctx)) result = js_ctx.eval(script) return _PYV8.convert(result) class V8EvalEvaluator(JSEvaluator): @classmethod def evaluate(cls, script, ctx): if not _V8EVAL: raise exc.MistralException( "v8eval module is not available. Please install v8eval." ) v8 = _V8EVAL.V8() ctx_str = utils.to_json_str(ctx) return v8.eval( ('$ = %s; %s' % (ctx_str, script)).encode(encoding='UTF-8') ) class PyMiniRacerEvaluator(JSEvaluator): @classmethod def evaluate(cls, script, ctx): if not _PY_MINI_RACER: raise exc.MistralException( "PyMiniRacer module is not available. Please install " "PyMiniRacer." ) js_ctx = _PY_MINI_RACER.MiniRacer() return js_ctx.eval( '$ = {}; {}'.format(utils.to_json_str(ctx), script) ) _mgr = extension.ExtensionManager( namespace='mistral.expression.evaluators', invoke_on_load=False ) def get_js_evaluator(): global _EVALUATOR if not _EVALUATOR: mgr = driver.DriverManager( 'mistral.js.implementation', cfg.CONF.js_implementation, invoke_on_load=True ) _EVALUATOR = mgr.driver return _EVALUATOR def evaluate(script, ctx): return get_js_evaluator().evaluate(script, ctx) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/mistral/utils/openstack/0000755000175000017500000000000000000000000020715 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/openstack/__init__.py0000644000175000017500000000000000000000000023014 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/openstack/keystone.py0000644000175000017500000000501500000000000023131 0ustar00coreycorey00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading from keystoneclient.v3 import client as ks_client from oslo_config import cfg from mistral import context CONF = cfg.CONF def client(): ctx = context.ctx() auth_url = ctx.auth_uri or CONF.keystone_authtoken.www_authenticate_uri cl = ks_client.Client( user_id=ctx.user_id, token=ctx.auth_token, tenant_id=ctx.project_id, auth_url=auth_url ) cl.management_url = auth_url return cl def client_for_admin(): return _admin_client() def client_for_trusts(trust_id): return _admin_client(trust_id=trust_id) def _admin_client(trust_id=None): if CONF.keystone_authtoken.auth_type is None: auth_url = CONF.keystone_authtoken.www_authenticate_uri project_name = CONF.keystone_authtoken.admin_tenant_name # You can't use trust and project together if trust_id: project_name = None cl = ks_client.Client( username=CONF.keystone_authtoken.admin_user, password=CONF.keystone_authtoken.admin_password, project_name=project_name, auth_url=auth_url, trusts=trust_id ) cl.management_url = auth_url return cl else: kwargs = {} if trust_id: # Remove domain_id, domain_name, project_name and project_id, # since we need a trust scoped auth object kwargs['domain_id'] = None kwargs['domain_name'] = None kwargs['project_name'] = None kwargs['project_domain_name'] = None kwargs['project_id'] = None kwargs['trust_id'] = trust_id auth = loading.load_auth_from_conf_options( CONF, 'keystone_authtoken', **kwargs ) sess = loading.load_session_from_conf_options( CONF, 'keystone', auth=auth ) return ks_client.Client(session=sess) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/profiler.py0000644000175000017500000000433200000000000021124 0ustar00coreycorey00000000000000# Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json from oslo_config import cfg from oslo_log import log as logging import osprofiler.profiler import osprofiler.web from mistral_lib import utils PROFILER_LOG = logging.getLogger(cfg.CONF.profiler.profiler_log_name) def log_to_file(info, context=None): attrs = [ str(info['timestamp']), info['base_id'], info['parent_id'], info['trace_id'], info['name'] ] th_local_name = '_profiler_trace_%s_start_time_' % info['trace_id'] if info['name'].endswith('-start'): utils.set_thread_local( th_local_name, datetime.datetime.utcnow() ) # Insert a blank sequence for a trace start. attrs.insert(1, ' ' * 8) if info['name'].endswith('-stop'): delta = ( datetime.datetime.utcnow() - utils.get_thread_local(th_local_name) ).total_seconds() utils.set_thread_local(th_local_name, None) # Insert a blank sequence for a trace start. attrs.insert(1, str(delta)) if delta > 0.5: attrs.append(' <- !!!') if 'info' in info and 'db' in info['info']: db_info = copy.deepcopy(info['info']['db']) db_info['params'] = { k: str(v) if isinstance(v, datetime.datetime) else v for k, v in db_info.get('params', {}).items() } attrs.append(json.dumps(db_info)) PROFILER_LOG.info(' '.join(attrs)) def setup(binary, host): if cfg.CONF.profiler.enabled: osprofiler.notifier.set(log_to_file) osprofiler.web.enable(cfg.CONF.profiler.hmac_keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/rest_utils.py0000644000175000017500000002162700000000000021505 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # Copyright 2018 - Nokia, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import json from oslo_db import exception as db_exc from oslo_log import log as logging import pecan import six import sqlalchemy as sa import tenacity import webob from wsme import exc as wsme_exc from mistral import context as auth_ctx from mistral.db import utils as db_utils from mistral.db.v2.sqlalchemy import api as db_api from mistral import exceptions as exc LOG = logging.getLogger(__name__) def wrap_wsme_controller_exception(func): """Decorator for controllers method. This decorator wraps controllers method to manage wsme exceptions: In case of expected error it aborts the request with specific status code. """ @functools.wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except (exc.MistralException, exc.MistralError) as e: pecan.response.translatable_error = e LOG.error('Error during API call: %s', str(e)) raise wsme_exc.ClientSideError( msg=six.text_type(e), status_code=e.http_code ) return wrapped def wrap_pecan_controller_exception(func): """Decorator for controllers method. This decorator wraps controllers method to manage pecan exceptions: In case of expected error it aborts the request with specific status code. """ @functools.wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except (exc.MistralException, exc.MistralError) as e: LOG.error('Error during API call: %s', str(e)) return webob.Response( status=e.http_code, content_type='application/json', body=json.dumps(dict(faultstring=six.text_type(e))), charset='UTF-8' ) return wrapped def validate_query_params(limit, sort_keys, sort_dirs): if limit is not None and limit <= 0: raise wsme_exc.ClientSideError("Limit must be positive.") if len(sort_keys) < len(sort_dirs): raise wsme_exc.ClientSideError( "Length of sort_keys must be equal or greater than sort_dirs." ) if len(sort_keys) > len(sort_dirs): sort_dirs.extend(['asc'] * (len(sort_keys) - len(sort_dirs))) for sort_dir in sort_dirs: if sort_dir not in ['asc', 'desc']: raise wsme_exc.ClientSideError( "Unknown sort direction, must be 'desc' or 'asc'." ) def validate_fields(fields, object_fields): """Check for requested non-existent fields. Check if the user requested non-existent fields. :param fields: A list of fields requested by the user. :param object_fields: A list of fields supported by the object. """ if not fields: return invalid_fields = set(fields) - set(object_fields) if invalid_fields: raise wsme_exc.ClientSideError( 'Some fields do not exist [%s], please choose from [%s]' % (', '.join(invalid_fields), ', '.join(object_fields)) ) def filters_to_dict(**kwargs): """Return only non-null values :param kwargs: All possible filters :type kwargs: dict :return: Actual filters :rtype: dict """ return {k: v for k, v in kwargs.items() if v is not None} def get_all(list_cls, cls, get_all_function, get_function, resource_function=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, fields=None, all_projects=False, **filters): """Return a list of cls. :param list_cls: REST Resource collection class (e.g.: Actions, Workflows, ...) :param cls: REST Resource class (e.g.: Action, Workflow, ...) :param get_all_function: Request function to get all elements with filtering (limit, marker, sort_keys, sort_dirs, fields) :param get_function: Function used to fetch the marker :param resource_function: Optional, function used to fetch additional data :param marker: Optional. Pagination marker for large data sets. :param limit: Optional. Maximum number of resources to return in a single result. Default value is None for backward compatibility. :param sort_keys: Optional. List of columns to sort results by. Default: ['created_at']. :param sort_dirs: Optional. List of directions to sort corresponding to sort_keys, "asc" or "desc" can be chosen. Default: ['asc']. :param fields: Optional. A specified list of fields of the resource to be returned. 'id' will be included automatically in fields if it's provided, since it will be used when constructing 'next' link. :param filters: Optional. A specified dictionary of filters to match. :param all_projects: Optional. Get resources of all projects. """ sort_keys = ['created_at'] if sort_keys is None else sort_keys sort_dirs = ['asc'] if sort_dirs is None else sort_dirs fields = [] if fields is None else fields if fields and 'id' not in fields: fields.insert(0, 'id') validate_query_params(limit, sort_keys, sort_dirs) validate_fields(fields, cls.get_fields()) # Admin user can get all tenants resources, no matter they are private or # public. insecure = False if (all_projects or (auth_ctx.ctx().is_admin and filters.get('project_id', ''))): insecure = True marker_obj = None if marker: marker_obj = get_function(marker) def _get_all_function(): with db_api.transaction(): db_models = get_all_function( limit=limit, marker=marker_obj, sort_keys=sort_keys, sort_dirs=sort_dirs, insecure=insecure, **filters ) for db_model in db_models: if resource_function: rest_resource = resource_function(db_model) else: rest_resource = cls.from_db_model(db_model) rest_resources.append(rest_resource) rest_resources = [] r = create_db_retry_object() # If only certain fields are requested then we ignore "resource_function" # parameter because it doesn't make sense anymore. if fields: # Use retries to prevent possible failures. db_list = r.call( get_all_function, limit=limit, marker=marker_obj, sort_keys=sort_keys, sort_dirs=sort_dirs, fields=fields, insecure=insecure, **filters ) for obj_values in db_list: # Note: in case if only certain fields have been requested # "db_list" contains tuples with values of db objects. rest_resources.append( cls.from_tuples(zip(fields, obj_values)) ) else: r.call(_get_all_function) return list_cls.convert_with_links( rest_resources, limit, pecan.request.application_url, sort_keys=','.join(sort_keys), sort_dirs=','.join(sort_dirs), fields=','.join(fields) if fields else '', **filters ) class MistralRetrying(tenacity.Retrying): def call(self, fn, *args, **kwargs): try: return super(MistralRetrying, self).call(fn, *args, **kwargs) except tenacity.RetryError: raise exc.MistralError("The service is temporarily unavailable") def create_db_retry_object(): return MistralRetrying( retry=tenacity.retry_if_exception_type( ( sa.exc.OperationalError, db_exc.DBDeadlock, db_exc.DBConnectionError ) ), stop=tenacity.stop_after_attempt(10), wait=tenacity.wait_incrementing(increment=0.5) # 0.5 seconds ) def rest_retry_on_db_error(func): return db_utils.retry_on_db_error(func, create_db_retry_object()) def load_deferred_fields(ex, fields): if not ex: return ex # We need to refer lazy-loaded fields explicitly in # order to make sure that they are correctly loaded. for f in fields: hasattr(ex, f) return ex ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/safe_yaml.py0000644000175000017500000000336700000000000021251 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import yaml from yaml import * # noqa yaml.SafeDumper.ignore_aliases = lambda *args: True class SafeLoader(yaml.SafeLoader): """Treat '@', '&', '*' as plain string. Anchors are not used in mistral workflow. It's better to disable them completely. Anchors can be used as an exploit to a Denial of service attack through expansion (Billion Laughs) see https://en.wikipedia.org/wiki/Billion_laughs_attack. Also this module uses the safe loader by default which is always a better loader. When using yaml module to load a yaml file or a string use this module instead of yaml. Example: import mistral.utils.safe_yaml as safe_yaml ... ... safe_yaml.load(...) """ def fetch_alias(self): return self.fetch_plain() def fetch_anchor(self): return self.fetch_plain() def check_plain(self): # Modified: allow '@' if self.peek() == '@': return True else: return super(SafeLoader, self).check_plain() def load(stream): return yaml.load(stream, SafeLoader) def safe_load(stream): return load(stream) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/ssh_utils.py0000644000175000017500000001204000000000000021312 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import path import six from oslo_log import log as logging import paramiko from mistral import exceptions as exc KEY_PATH = path.expanduser("~/.ssh/") LOG = logging.getLogger(__name__) def _read_paramimko_stream(recv_func): result = b'' buf = recv_func(1024) while buf != b'': result += buf buf = recv_func(1024) return result.decode('utf-8') def _to_paramiko_private_key(private_key_filename, private_key=None, password=None): if private_key: return paramiko.RSAKey.from_private_key( file_obj=six.StringIO(private_key), password=password) if private_key_filename: if '../' in private_key_filename or '..\\' in private_key_filename: raise exc.DataAccessException( "Private key filename must not contain '..'. " "Actual: %s" % private_key_filename ) if private_key_filename.startswith('/'): private_key_path = private_key_filename else: private_key_path = KEY_PATH + private_key_filename return paramiko.RSAKey( filename=private_key_path, password=password) return None def _connect(host, username, password=None, pkey=None, proxy=None): LOG.debug('Creating SSH connection to %s', host) ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_client.connect( host, username=username, password=password, pkey=pkey, sock=proxy ) return ssh_client def _cleanup(ssh_client): ssh_client.close() def _execute_command(ssh_client, cmd, get_stderr=False, raise_when_error=True): try: chan = ssh_client.get_transport().open_session() chan.exec_command(cmd) # TODO(nmakhotkin): that could hang if stderr buffer overflows stdout = _read_paramimko_stream(chan.recv) stderr = _read_paramimko_stream(chan.recv_stderr) ret_code = chan.recv_exit_status() if ret_code and raise_when_error: raise RuntimeError("Cmd: %s\nReturn code: %s\nstdout: %s" % (cmd, ret_code, stdout)) if get_stderr: return ret_code, stdout, stderr else: return ret_code, stdout finally: _cleanup(ssh_client) def execute_command_via_gateway(cmd, host, username, private_key_filename, gateway_host, gateway_username=None, proxy_command=None, password=None, private_key=None): LOG.debug('Creating SSH connection') private_key = _to_paramiko_private_key(private_key_filename, private_key, password) proxy = None if proxy_command: LOG.debug('Creating proxy using command: %s', proxy_command) proxy = paramiko.ProxyCommand(proxy_command) _proxy_ssh_client = paramiko.SSHClient() _proxy_ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) LOG.debug('Connecting to proxy gateway at: %s', gateway_host) if not gateway_username: gateway_username = username _proxy_ssh_client.connect( gateway_host, username=gateway_username, pkey=private_key, sock=proxy ) proxy = _proxy_ssh_client.get_transport().open_session() proxy.exec_command("nc {0} 22".format(host)) ssh_client = _connect( host, username=username, pkey=private_key, proxy=proxy ) try: return _execute_command( ssh_client, cmd, get_stderr=False, raise_when_error=True ) finally: _cleanup(_proxy_ssh_client) def execute_command(cmd, host, username, password=None, private_key_filename=None, private_key=None, get_stderr=False, raise_when_error=True): LOG.debug('Creating SSH connection') private_key = _to_paramiko_private_key(private_key_filename, private_key, password) ssh_client = _connect(host, username, password, private_key) LOG.debug("Executing command %s", cmd) return _execute_command(ssh_client, cmd, get_stderr, raise_when_error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/utils/wf_trace.py0000644000175000017500000000303200000000000021070 0ustar00coreycorey00000000000000# Copyright 2015 - StackStorm, Inc. # Copyright 2015 - Mirantis, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from mistral.db.v2.sqlalchemy import models cfg.CONF.import_opt('workflow_trace_log_name', 'mistral.config') WF_TRACE = logging.getLogger(cfg.CONF.workflow_trace_log_name) def info(obj, msg, *args, **kvargs): """Logs workflow trace record for Execution or Task. :param obj: If type is TaskExecution or WorkflowExecution, appends execution_id and task_id to the log message. The rest of parameters follow logger.info(...) """ debug_info = '' if type(obj) is models.TaskExecution: exec_id = obj.workflow_execution_id task_id = obj.id debug_info = '(execution_id=%s task_id=%s)' % (exec_id, task_id) elif type(obj) is models.WorkflowExecution: debug_info = '(execution_id=%s)' % obj.id if debug_info: msg = '%s %s' % (msg, debug_info) WF_TRACE.info(msg, *args, **kvargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/version.py0000644000175000017500000000132600000000000017627 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pbr import version version_info = version.VersionInfo('mistral') version_string = version_info.version_string() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/mistral/workflow/0000755000175000017500000000000000000000000017440 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/__init__.py0000644000175000017500000000000000000000000021537 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/base.py0000644000175000017500000002144500000000000020732 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2015 - Huawei Technologies Co. Ltd # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_log import log as logging from osprofiler import profiler from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral.lang import parser as spec_parser from mistral.workflow import commands from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import utils as u LOG = logging.getLogger(__name__) @profiler.trace('wf-controller-get-controller', hide_args=True) def get_controller(wf_ex, wf_spec=None): """Gets a workflow controller instance by given workflow execution object. :param wf_ex: Workflow execution object. :param wf_spec: Workflow specification object. If passed, the method works faster. :returns: Workflow controller class. """ if not wf_spec: wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id) wf_type = wf_spec.get_type() ctrl_cls = None for cls in u.iter_subclasses(WorkflowController): if cls.__workflow_type__ == wf_type: ctrl_cls = cls break if not ctrl_cls: raise exc.MistralError( 'Failed to find a workflow controller [type=%s]' % wf_type ) return ctrl_cls(wf_ex, wf_spec) class TaskLogicalState(object): """Task logical state. This data structure describes what state a task should have according to the logic of the workflow type and state of other tasks. """ def __init__(self, state, state_info=None, cardinality=0, triggered_by=None): self.state = state self.state_info = state_info self.cardinality = cardinality self.triggered_by = triggered_by or [] def get_state(self): return self.state def get_state_info(self): return self.state_info def get_cardinality(self): return self.cardinality def get_triggered_by(self): return self.get_triggered_by class WorkflowController(object): """Workflow Controller base class. Different workflow controllers implement different workflow algorithms. In practice it may actually mean that there may be multiple ways of describing workflow models (and even languages) that will be supported by Mistral. """ def __init__(self, wf_ex, wf_spec=None): """Creates a new workflow controller. :param wf_ex: Workflow execution. :param wf_spec: Workflow specification. """ self.wf_ex = wf_ex if wf_spec is None: wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id) self.wf_spec = wf_spec @profiler.trace('workflow-controller-continue-workflow', hide_args=True) def continue_workflow(self, task_ex=None): """Calculates a list of commands to continue the workflow. Given a workflow specification this method makes required analysis according to this workflow type rules and identifies a list of commands needed to continue the workflow. :param task_ex: Task execution that caused workflow continuation. Optional. If not specified, it means that no certain task caused this operation (e.g. workflow has been just started or resumed manually). :return: List of workflow commands (instances of mistral.workflow.commands.WorkflowCommand). """ if self._is_completed(): return [] return self._find_next_commands(task_ex) def rerun_tasks(self, task_execs, reset=True): """Gets commands to rerun existing task executions. :param task_execs: List of task executions. :param reset: If true, then purge action executions for the tasks. :return: List of workflow commands. """ if self._is_paused_or_completed(): return [] cmds = [ commands.RunExistingTask( self.wf_ex, self.wf_spec, t_e, reset, rerun=True ) for t_e in task_execs ] LOG.debug("Commands to rerun workflow tasks: %s", cmds) return cmds @abc.abstractmethod def get_logical_task_state(self, task_ex): """Determines a logical state of the given task. :param task_ex: Task execution. :return: Tuple (state, state_info, cardinality) where 'state' and 'state_info' are the corresponding values which the given task should have according to workflow rules and current states of other tasks. 'cardinality' gives the estimation on the number of preconditions that are not yet met in case if state is WAITING. This number can be used to estimate how frequently we can refresh the state of this task. """ raise NotImplementedError @abc.abstractmethod def find_indirectly_affected_task_executions(self, task_name): """Get a set of task executions indirectly affected by the given. :param task_name: Task name. :return: Task executions that can be indirectly affected by a task identified by the given name. """ raise NotImplementedError @abc.abstractmethod def is_error_handled_for(self, task_ex): """Determines if error is handled for specific task. :param task_ex: Task execution. :return: True if either there is no error at all or error is considered handled. """ raise NotImplementedError @abc.abstractmethod def all_errors_handled(self): """Determines if all errors (if any) are handled. :return: True if either there aren't errors at all or all errors are considered handled. """ raise NotImplementedError def any_cancels(self): """Determines if there are any task cancellations. :return: True if there is one or more tasks in cancelled state. """ return db_api.get_task_executions_count( workflow_execution_id=self.wf_ex.id, state=states.CANCELLED ) > 0 @abc.abstractmethod def evaluate_workflow_final_context(self): """Evaluates final workflow context assuming that workflow has finished. :return: Final workflow context. """ raise NotImplementedError def get_task_inbound_context(self, task_spec): # TODO(rakhmerov): This method should also be able to work with task_ex # to cover 'split' (aka 'merge') use case. upstream_task_execs = self._get_upstream_task_executions(task_spec) return data_flow.evaluate_upstream_context(upstream_task_execs) @abc.abstractmethod def _get_upstream_task_executions(self, task_spec): """Gets workflow upstream tasks for the given task. :param task_spec: Task specification. :return: List of upstream task executions for the given task spec. """ raise NotImplementedError def may_complete_workflow(self, task_ex): """Determines if the task execution may lead to workflow completion.""" return states.is_completed(task_ex.state) @abc.abstractmethod def _find_next_commands(self, task_ex): """Finds commands that should run next. A concrete algorithm of finding such tasks depends on a concrete workflow controller. :return: List of workflow commands. """ # If task execution was passed then we should make all calculations # only based on it. if task_ex: return [] # Add all tasks in IDLE state. return [ commands.RunExistingTask(self.wf_ex, self.wf_spec, t) for t in self._get_task_executions(state=states.IDLE) ] def _is_completed(self): return states.is_completed(self.wf_ex.state) def _is_paused_or_completed(self): return states.is_paused_or_completed(self.wf_ex.state) def _get_task_executions(self, **kwargs): return db_api.get_task_executions( workflow_execution_id=self.wf_ex.id, sort_keys=[], # disable sorting **kwargs ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/commands.py0000644000175000017500000002054000000000000021614 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral.lang import parser as spec_parser from mistral.lang.v2 import workflows from mistral.workflow import states class WorkflowCommand(object): """Workflow command. A set of workflow commands form a communication protocol between workflow controller and its clients. When a workflow controller makes a decision about how to continue a workflow it returns a set of commands so that a caller knows what to do next. """ def __init__(self, wf_ex, wf_spec, task_spec, ctx, triggered_by=None, handles_error=False): self.wf_ex = wf_ex self.wf_spec = wf_spec self.task_spec = task_spec self.ctx = ctx or {} self.triggered_by = triggered_by self.handles_error = handles_error def to_dict(self): return { 'task_name': self.task_spec.get_name(), 'ctx': self.ctx, 'triggered_by': self.triggered_by } class Noop(WorkflowCommand): """No-operation command.""" def __repr__(self): return "NOOP [workflow=%s]" % self.wf_ex.name def to_dict(self): d = super(Noop, self).to_dict() d['cmd_name'] = 'noop' return d class RunTask(WorkflowCommand): """Instruction to run a workflow task.""" def __init__(self, wf_ex, wf_spec, task_spec, ctx, triggered_by=None, handles_error=False): super(RunTask, self).__init__( wf_ex, wf_spec, task_spec, ctx, triggered_by=triggered_by, handles_error=handles_error ) self.wait = False self.unique_key = None def is_waiting(self): return self.wait def get_unique_key(self): return self.unique_key def __repr__(self): return ( "Run task [workflow=%s, task=%s, waif_flag=%s, triggered_by=%s]" % ( self.wf_ex.name, self.task_spec.get_name(), self.wait, self.triggered_by ) ) def to_dict(self): d = super(RunTask, self).to_dict() d['cmd_name'] = self.task_spec.get_name() d['wait'] = self.wait d['unique_key'] = self.unique_key return d class RunExistingTask(WorkflowCommand): """Command to run an existing workflow task.""" def __init__(self, wf_ex, wf_spec, task_ex, reset=True, triggered_by=None, handles_error=False, rerun=False): super(RunExistingTask, self).__init__( wf_ex, wf_spec, spec_parser.get_task_spec(task_ex.spec), task_ex.in_context, triggered_by=triggered_by, handles_error=handles_error ) self.task_ex = task_ex self.reset = reset self.unique_key = task_ex.unique_key self.rerun = rerun def to_dict(self): d = super(RunExistingTask, self).to_dict() d['cmd_name'] = 'run_existing_task' d['task_ex_id'] = self.task_ex.id d['reset'] = self.reset d['unique_key'] = self.unique_key return d class SetWorkflowState(WorkflowCommand): """Instruction to change a workflow state.""" def __init__(self, wf_ex, wf_spec, task_spec, ctx, new_state, msg=None, triggered_by=None, handles_error=False): super(SetWorkflowState, self).__init__( wf_ex, wf_spec, task_spec, ctx, triggered_by=triggered_by, handles_error=handles_error ) self.new_state = new_state self.msg = msg def to_dict(self): d = super(SetWorkflowState, self).to_dict() d['new_state'] = self.new_state d['msg'] = self.msg return d class FailWorkflow(SetWorkflowState): """Instruction to fail a workflow.""" def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None, triggered_by=None, handles_error=False): super(FailWorkflow, self).__init__( wf_ex, wf_spec, task_spec, ctx, states.ERROR, msg=msg, triggered_by=triggered_by, handles_error=handles_error ) def __repr__(self): return "Fail [workflow=%s]" % self.wf_ex.name def to_dict(self): d = super(FailWorkflow, self).to_dict() d['cmd_name'] = 'fail' return d class SucceedWorkflow(SetWorkflowState): """Instruction to succeed a workflow.""" def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None, triggered_by=None, handles_error=False): super(SucceedWorkflow, self).__init__( wf_ex, wf_spec, task_spec, ctx, states.SUCCESS, msg=msg, triggered_by=triggered_by, handles_error=handles_error ) def __repr__(self): return "Succeed [workflow=%s]" % self.wf_ex.name def to_dict(self): d = super(SucceedWorkflow, self).to_dict() d['cmd_name'] = 'succeed' return d class PauseWorkflow(SetWorkflowState): """Instruction to pause a workflow.""" def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None, triggered_by=None, handles_error=False): super(PauseWorkflow, self).__init__( wf_ex, wf_spec, task_spec, ctx, states.PAUSED, msg=msg, triggered_by=triggered_by, handles_error=handles_error ) def __repr__(self): return "Pause [workflow=%s]" % self.wf_ex.name def to_dict(self): d = super(PauseWorkflow, self).to_dict() d['cmd_name'] = 'pause' return d ENGINE_CMD_CLS = { workflows.NOOP_COMMAND: Noop, workflows.FAIL_COMMAND: FailWorkflow, workflows.SUCCEED_COMMAND: SucceedWorkflow, workflows.PAUSE_COMMAND: PauseWorkflow } def is_engine_command(cmd): return cmd is not None and isinstance(cmd, (SetWorkflowState, Noop)) def get_command_class(cmd_name): return ENGINE_CMD_CLS[cmd_name] if cmd_name in ENGINE_CMD_CLS else None # TODO(rakhmerov): IMO the way how we instantiate commands is weird. # If we look at how we implement the logic of saving commands to # dicts (to_dict) and restoring back from dicts then we'll see # the lack of symmetry and unified way to do that depending on a # command. Also RunExistingTask turns to be a special case that # is not processed with this method at all. Might be a 'bad smell'. # This all makes me think that we need to do some refactoring here. def create_command(cmd_name, wf_ex, wf_spec, task_spec, ctx, params=None, triggered_by=None, handles_error=False): cmd_cls = get_command_class(cmd_name) or RunTask if issubclass(cmd_cls, SetWorkflowState): return cmd_cls( wf_ex, wf_spec, task_spec, ctx, msg=params.get('msg'), triggered_by=triggered_by, handles_error=handles_error ) else: return cmd_cls( wf_ex, wf_spec, task_spec, ctx, triggered_by=triggered_by, handles_error=handles_error ) def restore_command_from_dict(wf_ex, cmd_dict): cmd_name = cmd_dict['cmd_name'] wf_spec = spec_parser.get_workflow_spec_by_execution_id(wf_ex.id) task_spec = wf_spec.get_tasks()[cmd_dict['task_name']] ctx = cmd_dict['ctx'] params = {'msg': cmd_dict.get('msg')} if 'msg' in cmd_dict else None triggered_by = cmd_dict.get('triggered_by') return create_command( cmd_name, wf_ex, wf_spec, task_spec, ctx, params, triggered_by ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/data_flow.py0000644000175000017500000002465700000000000021770 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from osprofiler import profiler from mistral import context as auth_ctx from mistral.db.v2.sqlalchemy import models from mistral import exceptions as exc from mistral import expressions as expr from mistral.lang import parser as spec_parser from mistral.workflow import states from mistral_lib import utils from mistral_lib.utils import inspect_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF class ContextView(dict): """Workflow context view. It's essentially an immutable composite structure providing fast lookup over multiple dictionaries w/o having to merge those dictionaries every time. The lookup algorithm simply iterates over provided dictionaries one by one and returns a value taken from the first dictionary where the provided key exists. This means that these dictionaries must be provided in the order of decreasing priorities. Note: Although this class extends built-in 'dict' it shouldn't be considered a normal dictionary because it may not implement all methods and account for all corner cases. It's only a read-only view. """ def __init__(self, *dicts): super(ContextView, self).__init__() self.dicts = dicts or [] def __getitem__(self, key): for d in self.dicts: if key in d: return d[key] raise KeyError(key) def get(self, key, default=None): for d in self.dicts: if key in d: return d[key] return default def __contains__(self, key): return any(key in d for d in self.dicts) def keys(self): keys = set() for d in self.dicts: keys.update(d.keys()) return keys def items(self): return [(k, self[k]) for k in self.keys()] def values(self): return [self[k] for k in self.keys()] def iteritems(self): # NOTE: This is for compatibility with Python 2.7 # YAQL converts output objects after they are evaluated # to basic types and it uses six.iteritems() internally # which calls d.items() in case of Python 2.7 and d.iteritems() # for Python 2.7 return iter(self.items()) def iterkeys(self): # NOTE: This is for compatibility with Python 2.7 # See the comment for iteritems(). return iter(self.keys()) def itervalues(self): # NOTE: This is for compatibility with Python 2.7 # See the comment for iteritems(). return iter(self.values()) def __len__(self): return len(self.keys()) @staticmethod def _raise_immutable_error(): raise exc.MistralError('Context view is immutable.') def __setitem__(self, key, value): self._raise_immutable_error() def update(self, E=None, **F): self._raise_immutable_error() def clear(self): self._raise_immutable_error() def pop(self, k, d=None): self._raise_immutable_error() def popitem(self): self._raise_immutable_error() def __delitem__(self, key): self._raise_immutable_error() def __repr__(self): return ''.join( ['{', ', '.join([str(d)[1:-1] for d in self.dicts]), '}'] ) def evaluate_upstream_context(upstream_task_execs): published_vars = {} ctx = {} for t_ex in upstream_task_execs: # TODO(rakhmerov): These two merges look confusing. So it's a # temporary solution. There's still the bug # https://bugs.launchpad.net/mistral/+bug/1424461 that needs to be # fixed using context variable versioning. published_vars = utils.merge_dicts( published_vars, t_ex.published ) utils.merge_dicts(ctx, evaluate_task_outbound_context(t_ex)) return utils.merge_dicts(ctx, published_vars) def _extract_execution_result(ex): if isinstance(ex, models.WorkflowExecution): return ex.output if ex.output: return ex.output['result'] def invalidate_task_execution_result(task_ex): for ex in task_ex.executions: ex.accepted = False def get_task_execution_result(task_ex): execs = task_ex.executions execs.sort( key=lambda x: x.runtime_context.get('index') ) results = [ _extract_execution_result(ex) for ex in execs if hasattr(ex, 'output') and ex.accepted ] task_spec = spec_parser.get_task_spec(task_ex.spec) if task_spec.get_with_items(): # TODO(rakhmerov): Smell: violation of 'with-items' encapsulation. with_items_ctx = task_ex.runtime_context.get('with_items') if with_items_ctx and with_items_ctx.get('count') > 0: return results else: return [] return results[0] if len(results) == 1 else results def publish_variables(task_ex, task_spec): if task_ex.state not in [states.SUCCESS, states.ERROR]: return wf_ex = task_ex.workflow_execution expr_ctx = ContextView( get_current_task_dict(task_ex), task_ex.in_context, get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input ) if task_ex.name in expr_ctx: LOG.warning( 'Shadowing context variable with task name while ' 'publishing: %s', task_ex.name ) publish_spec = task_spec.get_publish(task_ex.state) if not publish_spec: return # Publish branch variables. branch_vars = publish_spec.get_branch() task_ex.published = expr.evaluate_recursively(branch_vars, expr_ctx) # Publish global variables. global_vars = publish_spec.get_global() utils.merge_dicts( task_ex.workflow_execution.context, expr.evaluate_recursively(global_vars, expr_ctx) ) # TODO(rakhmerov): # 1. Publish atomic variables. # 2. Add the field "publish" in TaskExecution model similar to "published" # but containing info as # {'branch': {vars}, 'global': {vars}, 'atomic': {vars}} @profiler.trace( 'data-flow-evaluate-task-outbound-context', hide_args=True ) def evaluate_task_outbound_context(task_ex): """Evaluates task outbound Data Flow context. This method assumes that complete task output (after publisher etc.) has already been evaluated. :param task_ex: DB task. :return: Outbound task Data Flow context. """ # NOTE(rakhmerov): 'task_ex.in_context' has the SQLAlchemy specific # type MutableDict. So we need to create a shallow copy using dict(...) # initializer and use it. It's enough to be safe in order to manipulate # with entries of the result dictionary, like adding more entries. # However, we must not change values themselves because they are # shared between the original dictionary and the newly created. # It's better to avoid using the method copy.deepcopy() because on # dictionaries with many entries it significantly increases memory # footprint and reduces performance. in_context = ( dict(task_ex.in_context) if getattr(task_ex, 'in_context', None) is not None else {} ) return utils.update_dict(in_context, getattr(task_ex, 'published', {})) def evaluate_workflow_output(wf_ex, wf_output, ctx): """Evaluates workflow output. :param wf_ex: Workflow execution. :param wf_output: Workflow output. :param ctx: Final Data Flow context (cause task's outbound context). """ # Evaluate workflow 'output' clause using the final workflow context. ctx_view = ContextView( ctx, get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input ) output = expr.evaluate_recursively(wf_output, ctx_view) # TODO(rakhmerov): Many don't like that we return the whole context # if 'output' is not explicitly defined. return output or ctx def get_current_task_dict(task_ex): return { '__task_execution': { 'id': task_ex.id, 'name': task_ex.name } } def add_openstack_data_to_context(wf_ex): wf_ex.context = wf_ex.context or {} if CONF.pecan.auth_enable: exec_ctx = auth_ctx.ctx() if exec_ctx: wf_ex.context.update({'openstack': exec_ctx.to_dict()}) def add_execution_to_context(wf_ex): wf_ex.context = wf_ex.context or {} wf_ex.context['__execution'] = {'id': wf_ex.id} def add_workflow_variables_to_context(wf_ex, wf_spec): wf_ex.context = wf_ex.context or {} # The context for calculating workflow variables is workflow input # and other data already stored in workflow initial context. ctx_view = ContextView( get_workflow_environment_dict(wf_ex), wf_ex.context, wf_ex.input ) wf_vars = expr.evaluate_recursively(wf_spec.get_vars(), ctx_view) utils.merge_dicts(wf_ex.context, wf_vars) def evaluate_object_fields(obj, context): fields = inspect_utils.get_public_fields(obj) evaluated_fields = expr.evaluate_recursively(fields, context) for k, v in evaluated_fields.items(): setattr(obj, k, v) def get_workflow_environment_dict(wf_ex): if not wf_ex: return {} if wf_ex.root_execution_id: return get_workflow_environment_dict(wf_ex.root_execution) env_dict = wf_ex.params['env'] if 'env' in wf_ex.params else {} return {'__env': env_dict} def get_workflow_execution_published_global(wf_ex): res = {} # Variables that get published globally are stored in the # workflow execution "context" field. So we just need to # copy its content excluding all internally used keys and # workflow variables defined under "vars" section in the # workflow text. exclude = {'__execution', 'openstack'} exclude = exclude.union(wf_ex.spec.get('vars', {})) for k, v in wf_ex.context.items(): if k not in exclude: res[k] = v return res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/direct_workflow.py0000644000175000017500000004347300000000000023231 0ustar00coreycorey00000000000000# Copyright 2015 - Mirantis, Inc. # Copyright 2020 - NetCracker Technology Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from osprofiler import profiler from mistral.db.v2 import api as db_api from mistral import exceptions as exc from mistral import expressions as expr from mistral.workflow import base from mistral.workflow import commands from mistral.workflow import data_flow from mistral.workflow import states from mistral_lib import utils LOG = logging.getLogger(__name__) MAX_SEARCH_DEPTH = 5 class DirectWorkflowController(base.WorkflowController): """'Direct workflow' controller. This handler implements the workflow pattern which is based on direct transitions between tasks, i.e. after each task completion a decision should be made which tasks should run next based on result of task execution. Note, that tasks can run in parallel. For example, if there's a workflow consisting of three tasks 'A', 'B' and 'C' where 'A' starts first then 'B' and 'C' can start second if certain associated with transition 'A'->'B' and 'A'->'C' evaluate to true. """ __workflow_type__ = "direct" def _get_upstream_task_executions(self, task_spec): t_specs_names = [t_spec.get_name() for t_spec in self.wf_spec.find_inbound_task_specs(task_spec)] if not t_specs_names: return [] if not task_spec.get_join(): return self._get_task_executions( name=t_specs_names[0], # not a join, has just one parent state={'in': (states.SUCCESS, states.ERROR, states.CANCELLED)}, processed=True ) t_execs_candidates = self._get_task_executions( name={'in': t_specs_names}, state={'in': (states.SUCCESS, states.ERROR, states.CANCELLED)}, ) t_execs = [] for t_ex in t_execs_candidates: if task_spec.get_name() in [t[0] for t in t_ex.next_tasks]: t_execs.append(t_ex) return t_execs def _find_next_commands(self, task_ex=None): cmds = super(DirectWorkflowController, self)._find_next_commands( task_ex ) # Checking if task_ex is empty is a serious optimization here # because 'self.wf_ex.task_executions' leads to initialization of # the entire collection which in case of highly-parallel workflows # may be very expensive. if not task_ex and not self.wf_ex.task_executions: return self._find_start_commands() if task_ex: task_execs = [task_ex] else: task_execs = [ t_ex for t_ex in self.wf_ex.task_executions if states.is_completed(t_ex.state) and not t_ex.processed ] for t_ex in task_execs: cmds.extend(self._find_next_commands_for_task(t_ex)) return cmds def _find_start_commands(self): return [ commands.RunTask( self.wf_ex, self.wf_spec, t_s, self.get_task_inbound_context(t_s) ) for t_s in self.wf_spec.find_start_tasks() ] @profiler.trace( 'direct-wf-controller-find-next-commands-for-task', hide_args=True ) def _find_next_commands_for_task(self, task_ex): """Finds next commands based on the state of the given task. :param task_ex: Task execution for which next commands need to be found. :return: List of workflow commands. """ cmds = [] ctx = data_flow.evaluate_task_outbound_context(task_ex) for t_n, params, event_name in self._find_next_tasks(task_ex, ctx): t_s = self.wf_spec.get_tasks()[t_n] if not (t_s or t_n in commands.ENGINE_CMD_CLS): raise exc.WorkflowException("Task '%s' not found." % t_n) elif not t_s: t_s = self.wf_spec.get_task(task_ex.name) triggered_by = [ { 'task_id': task_ex.id, 'event': event_name } ] cmd = commands.create_command( t_n, self.wf_ex, self.wf_spec, t_s, ctx, params=params, triggered_by=triggered_by, handles_error=(event_name == 'on-error') ) self._configure_if_join(cmd) cmds.append(cmd) LOG.debug("Found commands: %s", cmds) return cmds def _configure_if_join(self, cmd): if not isinstance(cmd, (commands.RunTask, commands.RunExistingTask)): return if not cmd.task_spec.get_join(): return cmd.unique_key = self._get_join_unique_key(cmd) cmd.wait = True def _get_join_unique_key(self, cmd): return 'join-task-%s-%s' % (self.wf_ex.id, cmd.task_spec.get_name()) def rerun_tasks(self, task_execs, reset=True): cmds = super(DirectWorkflowController, self).rerun_tasks( task_execs, reset ) for cmd in cmds: self._configure_if_join(cmd) return cmds # TODO(rakhmerov): Need to refactor this method to be able to pass tasks # whose contexts need to be merged. def evaluate_workflow_final_context(self): ctx = {} for batch in self._find_end_task_executions_as_batches(): for t_ex in batch: ctx = utils.merge_dicts( ctx, data_flow.evaluate_task_outbound_context(t_ex) ) return ctx def get_logical_task_state(self, task_ex): task_spec = self.wf_spec.get_tasks()[task_ex.name] if not task_spec.get_join(): # A simple 'non-join' task does not have any preconditions # based on state of other tasks so its logical state always # equals to its real state. return base.TaskLogicalState(task_ex.state, task_ex.state_info) return self._get_join_logical_state(task_spec) def find_indirectly_affected_task_executions(self, t_name): all_joins = {task_spec.get_name() for task_spec in self.wf_spec.get_tasks() if task_spec.get_join()} t_execs_cache = { t_ex.name: t_ex for t_ex in self._get_task_executions( fields=('id', 'name'), name={'in': all_joins} ) } if all_joins else {} visited_task_names = set() clauses = self.wf_spec.find_outbound_task_names(t_name) res = set() while clauses: visited_task_names.add(t_name) t_name = clauses.pop() # Handle cycles. if t_name in visited_task_names: continue # Encountered an engine command. if not self.wf_spec.get_tasks()[t_name]: continue if t_name in all_joins and t_name in t_execs_cache: res.add(t_execs_cache[t_name]) continue clauses.update(self.wf_spec.find_outbound_task_names(t_name)) return res def is_error_handled_for(self, task_ex): # TODO(rakhmerov): The method works in a different way than # all_errors_handled(). It doesn't evaluate expressions under # "on-error" clause. return bool(self.wf_spec.get_on_error_clause(task_ex.name)) def all_errors_handled(self): cnt = db_api.get_task_executions_count( workflow_execution_id=self.wf_ex.id, state=states.ERROR, error_handled=False ) return cnt == 0 def _find_end_task_executions_as_batches(self): batches = db_api.get_completed_task_executions_as_batches( workflow_execution_id=self.wf_ex.id, has_next_tasks=False ) for batch in batches: yield batch def may_complete_workflow(self, task_ex): res = super(DirectWorkflowController, self).may_complete_workflow( task_ex ) return res and not task_ex.has_next_tasks @profiler.trace('direct-wf-controller-find-next-tasks', hide_args=True) def _find_next_tasks(self, task_ex, ctx): t_n = task_ex.name t_s = task_ex.state ctx_view = data_flow.ContextView( data_flow.get_current_task_dict(task_ex), ctx, data_flow.get_workflow_environment_dict(self.wf_ex), self.wf_ex.context, self.wf_ex.input ) # [(task_name, params, 'on-success'|'on-error'|'on-complete'), ...] result = [] if t_s == states.ERROR: for name, cond, params in self.wf_spec.get_on_error_clause(t_n): if not cond or expr.evaluate(cond, ctx_view): params = expr.evaluate_recursively(params, ctx_view) result.append((name, params, 'on-error')) if t_s == states.SUCCESS: for name, cond, params in self.wf_spec.get_on_success_clause(t_n): if not cond or expr.evaluate(cond, ctx_view): params = expr.evaluate_recursively(params, ctx_view) result.append((name, params, 'on-success')) if states.is_completed(t_s) and not states.is_cancelled(t_s): for name, cond, params in self.wf_spec.get_on_complete_clause(t_n): if not cond or expr.evaluate(cond, ctx_view): params = expr.evaluate_recursively(params, ctx_view) result.append((name, params, 'on-complete')) return result @profiler.trace( 'direct-wf-controller-get-join-logical-state', hide_args=True ) def _get_join_logical_state(self, task_spec): """Evaluates logical state of 'join' task. :param task_spec: 'join' task specification. :return: TaskLogicalState (state, state_info, cardinality, triggered_by) where 'state' and 'state_info' describe the logical state of the given 'join' task and 'cardinality' gives the remaining number of unfulfilled preconditions. If logical state is not WAITING then 'cardinality' should always be 0. """ # TODO(rakhmerov): We need to use task_ex instead of task_spec # in order to cover a use case when there's more than one instance # of the same 'join' task in a workflow. join_expr = task_spec.get_join() in_task_specs = self.wf_spec.find_inbound_task_specs(task_spec) if not in_task_specs: return base.TaskLogicalState(states.RUNNING) t_execs_cache = self._prepare_task_executions_cache(task_spec) # List of tuples (task_name, task_ex, state, depth, event_name). induced_states = [] for t_s in in_task_specs: t_ex = t_execs_cache[t_s.get_name()] tup = self._get_induced_join_state( t_s, t_ex, task_spec, t_execs_cache ) induced_states.append( ( t_s.get_name(), t_ex, tup[0], tup[1], tup[2] ) ) def count(state): cnt = 0 total_depth = 0 for s in induced_states: if s[2] == state: cnt += 1 total_depth += s[3] return cnt, total_depth errors_tuple = count(states.ERROR) runnings_tuple = count(states.RUNNING) total_count = len(induced_states) def _blocked_message(): return ( 'Blocked by tasks: %s' % [s[0] for s in induced_states if s[2] == states.WAITING] ) def _failed_message(): return ( 'Failed by tasks: %s' % [s[0] for s in induced_states if s[2] == states.ERROR] ) def _triggered_by(state): return [ {'task_id': s[1].id, 'event': s[4]} for s in induced_states if s[2] == state and s[1] is not None ] # If "join" is configured as a number or 'one'. if isinstance(join_expr, int) or join_expr == 'one': spec_cardinality = 1 if join_expr == 'one' else join_expr if runnings_tuple[0] >= spec_cardinality: return base.TaskLogicalState( states.RUNNING, triggered_by=_triggered_by(states.RUNNING) ) # E.g. 'join: 3' with inbound [ERROR, ERROR, RUNNING, WAITING] # No chance to get 3 RUNNING states. if errors_tuple[0] > (total_count - spec_cardinality): return base.TaskLogicalState(states.ERROR, _failed_message()) # Calculate how many tasks need to finish to trigger this 'join'. cardinality = spec_cardinality - runnings_tuple[0] return base.TaskLogicalState( states.WAITING, _blocked_message(), cardinality=cardinality ) if join_expr == 'all': if total_count == runnings_tuple[0]: return base.TaskLogicalState( states.RUNNING, triggered_by=_triggered_by(states.RUNNING) ) if errors_tuple[0] > 0: return base.TaskLogicalState( states.ERROR, _failed_message(), triggered_by=_triggered_by(states.ERROR) ) # Remaining cardinality is just a difference between all tasks and # a number of those tasks that induce RUNNING state. cardinality = total_count - runnings_tuple[1] return base.TaskLogicalState( states.WAITING, _blocked_message(), cardinality=cardinality ) raise RuntimeError('Unexpected join expression: %s' % join_expr) # TODO(rakhmerov): Method signature is incorrect given that # we may have multiple task executions for a task. It should # accept inbound task execution rather than a spec. @profiler.trace( 'direct-wf-controller-get-induced-join-state', hide_args=True ) def _get_induced_join_state(self, in_task_spec, in_task_ex, join_task_spec, t_execs_cache): join_task_name = join_task_spec.get_name() if not in_task_ex: possible, depth = self._possible_route( in_task_spec, t_execs_cache ) if possible: return states.WAITING, depth, None else: return states.ERROR, depth, 'impossible route' if not states.is_completed(in_task_ex.state): return states.WAITING, 1, None # [(task name, event name), ...] next_tasks_tuples = in_task_ex.next_tasks or [] next_tasks_dict = {tup[0]: tup[1] for tup in next_tasks_tuples} if join_task_name not in next_tasks_dict: return states.ERROR, 1, "not triggered" return states.RUNNING, 1, next_tasks_dict[join_task_name] def _possible_route(self, task_spec, t_execs_cache, depth=1): in_task_specs = self.wf_spec.find_inbound_task_specs(task_spec) if not in_task_specs: return True, depth for t_s in in_task_specs: if t_s.get_name() not in t_execs_cache: t_execs_cache.update( self._prepare_task_executions_cache(task_spec) ) t_ex = t_execs_cache.get(t_s.get_name()) if not t_ex: possible, depth = self._possible_route( t_s, t_execs_cache, depth + 1 ) if possible: return True, depth else: t_name = task_spec.get_name() if not states.is_completed(t_ex.state): return True, depth if t_name in [t[0] for t in t_ex.next_tasks]: return True, depth return False, depth def _find_all_parent_task_names(self, task_spec, depth=1): if depth == MAX_SEARCH_DEPTH: return {task_spec.get_name()} in_task_specs = self.wf_spec.find_inbound_task_specs(task_spec) if not in_task_specs: return {task_spec.get_name()} names = set() for t_s in in_task_specs: names.update(self._find_all_parent_task_names(t_s, depth + 1)) if depth > 1: names.add(task_spec.get_name()) return names def _prepare_task_executions_cache(self, task_spec): names = self._find_all_parent_task_names(task_spec) t_execs_cache = { t_ex.name: t_ex for t_ex in self._get_task_executions( fields=('id', 'name', 'state', 'next_tasks'), name={'in': names} ) } if names else {} # don't perform a db request if 'names' are empty for name in names: if name not in t_execs_cache: t_execs_cache[name] = None return t_execs_cache ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/reverse_workflow.py0000644000175000017500000001377300000000000023432 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import networkx as nx from networkx.algorithms import traversal from mistral import exceptions as exc from mistral.workflow import base from mistral.workflow import commands from mistral.workflow import data_flow from mistral.workflow import states class ReverseWorkflowController(base.WorkflowController): """'Reverse workflow controller. This controller implements the workflow pattern which is based on dependencies between tasks, i.e. each task in a workflow graph may be dependent on other tasks. To run this type of workflow user must specify a task name that serves a target node in the graph that the algorithm should come to by resolving all dependencies. For example, if there's a workflow consisting of two tasks 'A' and 'B' where 'A' depends on 'B' and if we specify a target task name 'A' then the controller first will run task 'B' and then, when a dependency of 'A' is resolved, will run task 'A'. """ __workflow_type__ = "reverse" def _find_next_commands(self, task_ex): """Finds all tasks with resolved dependencies. This method finds all tasks with resolved dependencies and returns them in the form of workflow commands. """ cmds = super(ReverseWorkflowController, self)._find_next_commands( task_ex ) # TODO(rakhmerov): Adapt reverse workflow to non-locking model. # 1. Task search must use task_ex parameter. # 2. When a task has more than one dependency it's possible to # get into 'phantom read' phenomena and create multiple instances # of the same task. So 'unique_key' in conjunction with 'wait_flag' # must be used to prevent this. task_specs = self._find_task_specs_with_satisfied_dependencies() return cmds + [ commands.RunTask( self.wf_ex, self.wf_spec, t_s, self.get_task_inbound_context(t_s) ) for t_s in task_specs ] def _get_target_task_specification(self): task_name = self.wf_ex.params.get('task_name') task_spec = self.wf_spec.get_tasks().get(task_name) if not task_spec: raise exc.WorkflowException( 'Invalid task name [wf_spec=%s, task_name=%s]' % (self.wf_spec, task_name) ) return task_spec def _get_upstream_task_executions(self, task_spec): t_specs_names = self.wf_spec.get_task_requires(task_spec) or [] t_execs = self._get_task_executions(name={'in': t_specs_names}) return [t_ex for t_ex in t_execs if t_ex.state == states.SUCCESS] def evaluate_workflow_final_context(self): task_name = self._get_target_task_specification().get_name() task_execs = self._get_task_executions(name=task_name) # NOTE: For reverse workflow there can't be multiple # executions for one task. assert len(task_execs) <= 1 if len(task_execs) == 1: return data_flow.evaluate_task_outbound_context(task_execs[0]) else: return {} def get_logical_task_state(self, task_ex): # TODO(rakhmerov): Implement. return base.TaskLogicalState(task_ex.state, task_ex.state_info) def find_indirectly_affected_task_executions(self, task_name): return set() def is_error_handled_for(self, task_ex): return task_ex.state != states.ERROR def all_errors_handled(self): return len(self._get_task_executions(state=states.ERROR)) == 0 def _find_task_specs_with_satisfied_dependencies(self): """Given a target task name finds tasks with no dependencies. :return: Task specifications with no dependencies. """ tasks_spec = self.wf_spec.get_tasks() graph = self._build_graph(tasks_spec) # Unwind tasks from the target task # and filter out tasks with dependencies. return [ t_s for t_s in traversal.dfs_postorder_nodes( graph.reverse(), self._get_target_task_specification() ) if self._is_satisfied_task(t_s) ] def _is_satisfied_task(self, task_spec): if self._get_task_executions(name=task_spec.get_name()): return False if not self.wf_spec.get_task_requires(task_spec): return True success_t_names = set() for t_ex in self.wf_ex.task_executions: if t_ex.state == states.SUCCESS: success_t_names.add(t_ex.name) return not ( set(self.wf_spec.get_task_requires(task_spec)) - success_t_names ) def _build_graph(self, tasks_spec): graph = nx.DiGraph() # Add graph nodes. for t in tasks_spec: graph.add_node(t) # Add graph edges. for t_spec in tasks_spec: for dep_t_spec in self._get_dependency_tasks(tasks_spec, t_spec): graph.add_edge(dep_t_spec, t_spec) return graph def _get_dependency_tasks(self, tasks_spec, task_spec): dep_task_names = self.wf_spec.get_task_requires(task_spec) if len(dep_task_names) == 0: return [] dep_t_specs = set() for t_spec in tasks_spec: for t_name in dep_task_names: if t_name == t_spec.get_name(): dep_t_specs.add(t_spec) return dep_t_specs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/states.py0000644000175000017500000000532600000000000021323 0ustar00coreycorey00000000000000# Copyright 2013 - Mirantis, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Valid task and workflow states.""" IDLE = 'IDLE' """Task is not started yet.""" WAITING = 'WAITING' """ Task execution object has been created, but it is not ready to start because some preconditions are not met. NOTE: The task may never run just because some of the preconditions may never be met. """ RUNNING = 'RUNNING' """Task, action or workflow is currently being executed.""" RUNNING_DELAYED = 'DELAYED' """Task is in the running state but temporarily delayed.""" PAUSED = 'PAUSED' """Task, action or workflow has been paused.""" SUCCESS = 'SUCCESS' """Task, action or workflow has finished successfully.""" CANCELLED = 'CANCELLED' """Task, action or workflow has been cancelled.""" ERROR = 'ERROR' """Task, action or workflow has finished with an error.""" _ALL = [ IDLE, WAITING, RUNNING, RUNNING_DELAYED, PAUSED, SUCCESS, CANCELLED, ERROR ] _VALID_TRANSITIONS = { IDLE: [RUNNING, ERROR, CANCELLED], WAITING: [RUNNING], RUNNING: [PAUSED, RUNNING_DELAYED, SUCCESS, ERROR, CANCELLED], RUNNING_DELAYED: [RUNNING, ERROR, CANCELLED], PAUSED: [RUNNING, ERROR, CANCELLED], SUCCESS: [], CANCELLED: [RUNNING], ERROR: [RUNNING] } TERMINAL_STATES = {SUCCESS, ERROR, CANCELLED} def is_valid(state): return state in _ALL def is_invalid(state): return not is_valid(state) def is_completed(state): return state in [SUCCESS, ERROR, CANCELLED] def is_cancelled(state): return state == CANCELLED def is_running(state): return state in [RUNNING, RUNNING_DELAYED] def is_waiting(state): return state == WAITING def is_idle(state): return state == IDLE def is_paused(state): return state == PAUSED def is_paused_or_completed(state): return is_paused(state) or is_completed(state) def is_paused_or_idle(state): return is_paused(state) or is_idle(state) def is_valid_transition(from_state, to_state): if is_invalid(from_state) or is_invalid(to_state): return False if from_state == to_state: return True return to_state in _VALID_TRANSITIONS[from_state] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/mistral/workflow/utils.py0000644000175000017500000000150200000000000021150 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2016 - Brocade Communications Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mistral_lib.actions import types # For backwards compatibility Result = types.Result ResultSerializer = types.ResultSerializer ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538868.101567 mistral-10.0.0.0b3/mistral.egg-info/0000755000175000017500000000000000000000000017260 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/mistral.egg-info/PKG-INFO0000644000175000017500000000442600000000000020363 0ustar00coreycorey00000000000000Metadata-Version: 1.1 Name: mistral Version: 10.0.0.0b3 Summary: Mistral Project Home-page: https://docs.openstack.org/mistral/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: Apache License, Version 2.0 Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/mistral.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Mistral ======= Workflow Service integrated with OpenStack. This project aims to provide a mechanism to define tasks and workflows in a simple YAML-based language, manage and execute them in a distributed environment. Project Resources ----------------- * `Mistral Official Documentation `_ * `User Documentation `_ * `Administrator Documentation `_ * `Developer Documentation `_ * Project status, bugs, and blueprints are tracked on `Launchpad `_ * CloudFlow: visualization tool for workflow executions on https://github.com/nokia/CloudFlow * Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/mistral/ * Source for the project can be found at: https://opendev.org/openstack/mistral Platform: UNKNOWN Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538868.0 mistral-10.0.0.0b3/mistral.egg-info/SOURCES.txt0000644000175000017500000007637700000000000021170 0ustar00coreycorey00000000000000.coveragerc .dockerignore .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt lower-constraints.txt requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/v2/action.inc api-ref/source/v2/cron-trigger.inc api-ref/source/v2/execution.inc api-ref/source/v2/task.inc api-ref/source/v2/workbook.inc api-ref/source/v2/workflow.inc devstack/README.rst devstack/plugin.sh devstack/settings devstack/files/apache-mistral-api.template doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/admin/architecture.rst doc/source/admin/index.rst doc/source/admin/quickstart.rst doc/source/admin/upgrade_guide.rst doc/source/admin/configuration/config-guide.rst doc/source/admin/configuration/index.rst doc/source/admin/configuration/policy-guide.rst doc/source/admin/configuration/samples/index.rst doc/source/admin/configuration/samples/policy-yaml.rst doc/source/admin/img/mistral_architecture.png doc/source/admin/install/dashboard_guide.rst doc/source/admin/install/get_started.rst doc/source/admin/install/index.rst doc/source/admin/install/install-obs.rst doc/source/admin/install/install-rdo.rst doc/source/admin/install/install-ubuntu.rst doc/source/admin/install/install.rst doc/source/admin/install/installation_guide.rst doc/source/admin/install/mistralclient_guide.rst doc/source/admin/install/next-steps.rst doc/source/admin/install/verify.rst doc/source/developer/index.rst doc/source/developer/contributor/coding_guidelines.rst doc/source/developer/contributor/debugging_and_testing.rst doc/source/developer/contributor/devstack.rst doc/source/developer/contributor/index.rst doc/source/developer/contributor/profiling.rst doc/source/developer/contributor/troubleshooting.rst doc/source/developer/contributor/img/Pycharm_run_config_menu.png doc/source/developer/contributor/img/dashboard_debug_config.png doc/source/developer/contributor/img/dashboard_django_settings.png doc/source/developer/contributor/img/dashboard_environment_variables.png doc/source/developer/extensions/creating_custom_action.rst doc/source/developer/extensions/extending_yaql.rst doc/source/developer/extensions/index.rst doc/source/user/asynchronous_actions.rst doc/source/user/faq.rst doc/source/user/index.rst doc/source/user/main_features.rst doc/source/user/overview.rst doc/source/user/rest_api_v2.rst doc/source/user/wf_lang_v2.rst doc/source/user/wf_namespaces.rst doc/source/user/cli/index.rst doc/source/user/cookbooks/cloud_cron.rst doc/source/user/cookbooks/index.rst doc/source/user/cookbooks/img/cloud_cron_updating_multiple_servers.png doc/source/user/cookbooks/img/ssh_proxied.png doc/source/user/terminology/actions.rst doc/source/user/terminology/cron_triggers.rst doc/source/user/terminology/executions.rst doc/source/user/terminology/index.rst doc/source/user/terminology/workbooks.rst doc/source/user/terminology/workflows.rst doc/source/user/terminology/img/actions.png doc/source/user/terminology/img/cron_trigger.png doc/source/user/terminology/img/direct_workflow.png doc/source/user/terminology/img/reverse_workflow.png doc/source/user/terminology/img/workbook_namespacing.png doc/source/user/use_cases/index.rst doc/source/user/use_cases/long_running_business_process.rst doc/source/user/use_cases/img/long_running_business_process.png etc/README.mistral.conf etc/event_definitions.yml.sample etc/logging.conf.sample etc/logging.conf.sample.rotating etc/policy.json etc/wf_trace_logging.conf.sample etc/wf_trace_logging.conf.sample.rotating mistral/__init__.py mistral/_i18n.py mistral/config.py mistral/context.py mistral/exceptions.py mistral/messaging.py mistral/version.py mistral.egg-info/PKG-INFO mistral.egg-info/SOURCES.txt mistral.egg-info/dependency_links.txt mistral.egg-info/entry_points.txt mistral.egg-info/not-zip-safe mistral.egg-info/pbr.json mistral.egg-info/requires.txt mistral.egg-info/top_level.txt mistral/actions/__init__.py mistral/actions/action_factory.py mistral/actions/std_actions.py mistral/api/__init__.py mistral/api/access_control.py mistral/api/app.py mistral/api/service.py mistral/api/wsgi.py mistral/api/controllers/__init__.py mistral/api/controllers/resource.py mistral/api/controllers/root.py mistral/api/controllers/v2/__init__.py mistral/api/controllers/v2/action.py mistral/api/controllers/v2/action_execution.py mistral/api/controllers/v2/cron_trigger.py mistral/api/controllers/v2/environment.py mistral/api/controllers/v2/event_trigger.py mistral/api/controllers/v2/execution.py mistral/api/controllers/v2/execution_report.py mistral/api/controllers/v2/member.py mistral/api/controllers/v2/resources.py mistral/api/controllers/v2/root.py mistral/api/controllers/v2/service.py mistral/api/controllers/v2/sub_execution.py mistral/api/controllers/v2/task.py mistral/api/controllers/v2/types.py mistral/api/controllers/v2/validation.py mistral/api/controllers/v2/workbook.py mistral/api/controllers/v2/workflow.py mistral/api/hooks/__init__.py mistral/api/hooks/content_type.py mistral/auth/__init__.py mistral/auth/keycloak.py mistral/auth/keystone.py mistral/cmd/__init__.py mistral/cmd/launch.py mistral/db/__init__.py mistral/db/utils.py mistral/db/sqlalchemy/__init__.py mistral/db/sqlalchemy/base.py mistral/db/sqlalchemy/model_base.py mistral/db/sqlalchemy/sqlite_lock.py mistral/db/sqlalchemy/types.py mistral/db/sqlalchemy/migration/__init__.py mistral/db/sqlalchemy/migration/alembic.ini mistral/db/sqlalchemy/migration/cli.py mistral/db/sqlalchemy/migration/alembic_migrations/README.md mistral/db/sqlalchemy/migration/alembic_migrations/__init__.py mistral/db/sqlalchemy/migration/alembic_migrations/env.py mistral/db/sqlalchemy/migration/alembic_migrations/script.py.mako mistral/db/sqlalchemy/migration/alembic_migrations/versions/001_kilo.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/002_kilo.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/003_cron_trigger_constraints.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/004_add_description_for_execution.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/005_increase_execution_columns_size.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/006_add_processed_to_delayed_calls_v2.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/007_move_system_flag_to_base_definition.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/008_increase_size_of_state_info_column.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/009_add_database_indices.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/010_add_resource_members_v2_table.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/011_add_workflow_id_for_execution.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/012_add_event_triggers_v2_table.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/013_split_execution_table_increase_names.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/014_fix_past_scripts_discrepancies.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/015_add_unique_keys_for_non_locking_model.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/016_increase_size_of_task_unique_key.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/017_add_named_lock_table.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/018_increate_task_execution_unique_key_size.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/019_change_scheduler_schema.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/020_add_type_to_task_execution.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/021_increase_env_columns_size.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/022_namespace_support.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/023_add_root_execution_id.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/024_add_composite_index_workflow_execution_id_name.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/025_fix_length_task_name.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/026_optimize_task_expression_func.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/027_add_last_heartbeat_to_action_execution.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/028_add_namespace_column_to_workbooks.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/029_workbook_empty_namespace.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/030_increase_delayed_calls_v2_auth_context.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/031_add_started_at_and_finished_at_to_task_execution.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/032_add_has_next_tasks_and_error_handled_to_task_execution.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/033_add_next_tasks_to_task_execution.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/034_add_scheduled_jobs_table.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/035_namespace_support_postgresql.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/036_namespace_support_for_workbooks_postgresql.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/037_add_namespace_column_to_action_definitions.py mistral/db/sqlalchemy/migration/alembic_migrations/versions/__init__.py mistral/db/v2/__init__.py mistral/db/v2/api.py mistral/db/v2/sqlalchemy/__init__.py mistral/db/v2/sqlalchemy/api.py mistral/db/v2/sqlalchemy/filters.py mistral/db/v2/sqlalchemy/models.py mistral/engine/__init__.py mistral/engine/action_handler.py mistral/engine/actions.py mistral/engine/base.py mistral/engine/default_engine.py mistral/engine/dispatcher.py mistral/engine/engine_server.py mistral/engine/policies.py mistral/engine/post_tx_queue.py mistral/engine/task_handler.py mistral/engine/tasks.py mistral/engine/utils.py mistral/engine/workflow_handler.py mistral/engine/workflows.py mistral/event_engine/__init__.py mistral/event_engine/base.py mistral/event_engine/default_event_engine.py mistral/event_engine/event_engine_server.py mistral/executors/__init__.py mistral/executors/base.py mistral/executors/default_executor.py mistral/executors/executor_server.py mistral/executors/remote_executor.py mistral/expressions/__init__.py mistral/expressions/base.py mistral/expressions/jinja_expression.py mistral/expressions/std_functions.py mistral/expressions/yaql_expression.py mistral/ext/__init__.py mistral/ext/pygmentplugin.py mistral/hacking/__init__.py mistral/hacking/checks.py mistral/lang/__init__.py mistral/lang/base.py mistral/lang/parser.py mistral/lang/types.py mistral/lang/v2/__init__.py mistral/lang/v2/actions.py mistral/lang/v2/base.py mistral/lang/v2/on_clause.py mistral/lang/v2/policies.py mistral/lang/v2/publish.py mistral/lang/v2/retry_policy.py mistral/lang/v2/task_defaults.py mistral/lang/v2/tasks.py mistral/lang/v2/workbook.py mistral/lang/v2/workflows.py mistral/notifiers/__init__.py mistral/notifiers/base.py mistral/notifiers/default_notifier.py mistral/notifiers/notification_events.py mistral/notifiers/notification_server.py mistral/notifiers/remote_notifier.py mistral/notifiers/publishers/__init__.py mistral/notifiers/publishers/noop.py mistral/notifiers/publishers/webhook.py mistral/policies/__init__.py mistral/policies/action.py mistral/policies/action_executions.py mistral/policies/base.py mistral/policies/cron_trigger.py mistral/policies/environment.py mistral/policies/event_trigger.py mistral/policies/execution.py mistral/policies/member.py mistral/policies/service.py mistral/policies/task.py mistral/policies/workbook.py mistral/policies/workflow.py mistral/resources/actions/wait_ssh.yaml mistral/rpc/__init__.py mistral/rpc/base.py mistral/rpc/clients.py mistral/rpc/kombu/__init__.py mistral/rpc/kombu/base.py mistral/rpc/kombu/kombu_client.py mistral/rpc/kombu/kombu_hosts.py mistral/rpc/kombu/kombu_listener.py mistral/rpc/kombu/kombu_server.py mistral/rpc/kombu/examples/__init__.py mistral/rpc/kombu/examples/client.py mistral/rpc/kombu/examples/server.py mistral/rpc/oslo/__init__.py mistral/rpc/oslo/oslo_client.py mistral/rpc/oslo/oslo_server.py mistral/scheduler/__init__.py mistral/scheduler/base.py mistral/scheduler/default_scheduler.py mistral/scheduler/scheduler_server.py mistral/service/__init__.py mistral/service/base.py mistral/service/coordination.py mistral/services/__init__.py mistral/services/action_heartbeat_checker.py mistral/services/action_heartbeat_sender.py mistral/services/action_manager.py mistral/services/actions.py mistral/services/expiration_policy.py mistral/services/legacy_scheduler.py mistral/services/periodic.py mistral/services/security.py mistral/services/triggers.py mistral/services/workbooks.py mistral/services/workflows.py mistral/tests/__init__.py mistral/tests/releasenotes/notes/return-errors-for-std-mistral-http-b852b6d8f0034477.yaml mistral/tests/resources/action_jinja.yaml mistral/tests/resources/action_v2.yaml mistral/tests/resources/single_wf.yaml mistral/tests/resources/wb_v1.yaml mistral/tests/resources/wb_v2.yaml mistral/tests/resources/wb_with_nested_wf.yaml mistral/tests/resources/wf_action_ex_concurrency.yaml mistral/tests/resources/wf_jinja.yaml mistral/tests/resources/wf_task_ex_concurrency.yaml mistral/tests/resources/wf_v2.yaml mistral/tests/resources/for_wf_namespace/lowest_level_wf.yaml mistral/tests/resources/for_wf_namespace/middle_wf.yaml mistral/tests/resources/for_wf_namespace/top_level_wf.yaml mistral/tests/resources/workbook/v2/my_workbook.yaml mistral/tests/resources/workbook/v2/workbook_schema_test.yaml mistral/tests/unit/__init__.py mistral/tests/unit/base.py mistral/tests/unit/config.py mistral/tests/unit/test_command_dispatcher.py mistral/tests/unit/test_context.py mistral/tests/unit/test_coordination.py mistral/tests/unit/test_exception_base.py mistral/tests/unit/test_expressions.py mistral/tests/unit/test_launcher.py mistral/tests/unit/test_version.py mistral/tests/unit/actions/__init__.py mistral/tests/unit/actions/test_action_manager.py mistral/tests/unit/actions/test_javascript_action.py mistral/tests/unit/actions/test_std_echo_action.py mistral/tests/unit/actions/test_std_email_action.py mistral/tests/unit/actions/test_std_fail_action.py mistral/tests/unit/actions/test_std_http_action.py mistral/tests/unit/actions/test_std_mistral_http_action.py mistral/tests/unit/actions/test_std_ssh_action.py mistral/tests/unit/actions/test_std_test_dict_action.py mistral/tests/unit/actions/test_types.py mistral/tests/unit/api/__init__.py mistral/tests/unit/api/base.py mistral/tests/unit/api/test_access_control.py mistral/tests/unit/api/test_auth.py mistral/tests/unit/api/test_cors_middleware.py mistral/tests/unit/api/test_oslo_middleware.py mistral/tests/unit/api/test_resource_base.py mistral/tests/unit/api/test_resource_list.py mistral/tests/unit/api/test_service.py mistral/tests/unit/api/v2/__init__.py mistral/tests/unit/api/v2/test_action_executions.py mistral/tests/unit/api/v2/test_actions.py mistral/tests/unit/api/v2/test_cron_triggers.py mistral/tests/unit/api/v2/test_environment.py mistral/tests/unit/api/v2/test_event_trigger.py mistral/tests/unit/api/v2/test_execution_report.py mistral/tests/unit/api/v2/test_executions.py mistral/tests/unit/api/v2/test_global_publish.py mistral/tests/unit/api/v2/test_keycloak_auth.py mistral/tests/unit/api/v2/test_members.py mistral/tests/unit/api/v2/test_root.py mistral/tests/unit/api/v2/test_services.py mistral/tests/unit/api/v2/test_sub_execution.py mistral/tests/unit/api/v2/test_tasks.py mistral/tests/unit/api/v2/test_workbooks.py mistral/tests/unit/api/v2/test_workflows.py mistral/tests/unit/db/__init__.py mistral/tests/unit/db/v2/__init__.py mistral/tests/unit/db/v2/test_db_model.py mistral/tests/unit/db/v2/test_locking.py mistral/tests/unit/db/v2/test_sqlalchemy_db_api.py mistral/tests/unit/db/v2/test_sqlite_transactions.py mistral/tests/unit/db/v2/test_transactions.py mistral/tests/unit/engine/__init__.py mistral/tests/unit/engine/base.py mistral/tests/unit/engine/test_action_caching.py mistral/tests/unit/engine/test_action_context.py mistral/tests/unit/engine/test_action_defaults.py mistral/tests/unit/engine/test_action_heartbeat_checker.py mistral/tests/unit/engine/test_action_heartbeat_sender.py mistral/tests/unit/engine/test_adhoc_actions.py mistral/tests/unit/engine/test_commands.py mistral/tests/unit/engine/test_cron_trigger.py mistral/tests/unit/engine/test_dataflow.py mistral/tests/unit/engine/test_default_engine.py mistral/tests/unit/engine/test_direct_workflow.py mistral/tests/unit/engine/test_direct_workflow_rerun.py mistral/tests/unit/engine/test_direct_workflow_rerun_cancelled.py mistral/tests/unit/engine/test_direct_workflow_with_cycles.py mistral/tests/unit/engine/test_disabled_yaql_conversion.py mistral/tests/unit/engine/test_environment.py mistral/tests/unit/engine/test_error_handling.py mistral/tests/unit/engine/test_error_result.py mistral/tests/unit/engine/test_execution_fields_size_limitation.py mistral/tests/unit/engine/test_execution_params.py mistral/tests/unit/engine/test_integrity_check.py mistral/tests/unit/engine/test_javascript_action.py mistral/tests/unit/engine/test_join.py mistral/tests/unit/engine/test_names_validation.py mistral/tests/unit/engine/test_noop_task.py mistral/tests/unit/engine/test_policies.py mistral/tests/unit/engine/test_profiler.py mistral/tests/unit/engine/test_race_condition.py mistral/tests/unit/engine/test_reverse_workflow.py mistral/tests/unit/engine/test_reverse_workflow_rerun.py mistral/tests/unit/engine/test_reverse_workflow_rerun_cancelled.py mistral/tests/unit/engine/test_run_action.py mistral/tests/unit/engine/test_safe_rerun.py mistral/tests/unit/engine/test_set_state.py mistral/tests/unit/engine/test_state_info.py mistral/tests/unit/engine/test_subworkflows.py mistral/tests/unit/engine/test_subworkflows_pause_resume.py mistral/tests/unit/engine/test_task_cancel.py mistral/tests/unit/engine/test_task_defaults.py mistral/tests/unit/engine/test_task_pause_resume.py mistral/tests/unit/engine/test_task_publish.py mistral/tests/unit/engine/test_task_started_finished_at.py mistral/tests/unit/engine/test_tasks_function.py mistral/tests/unit/engine/test_with_items.py mistral/tests/unit/engine/test_with_items_task.py mistral/tests/unit/engine/test_workflow_cancel.py mistral/tests/unit/engine/test_workflow_resume.py mistral/tests/unit/engine/test_workflow_stop.py mistral/tests/unit/engine/test_workflow_variables.py mistral/tests/unit/engine/test_yaql_functions.py mistral/tests/unit/executors/__init__.py mistral/tests/unit/executors/base.py mistral/tests/unit/executors/test_local_executor.py mistral/tests/unit/executors/test_server_plugins.py mistral/tests/unit/expressions/__init__.py mistral/tests/unit/expressions/test_jinja_expression.py mistral/tests/unit/expressions/test_yaql_expression.py mistral/tests/unit/expressions/test_yaql_json_serialization.py mistral/tests/unit/hacking/__init__.py mistral/tests/unit/hacking/test_checks.py mistral/tests/unit/lang/__init__.py mistral/tests/unit/lang/test_spec_caching.py mistral/tests/unit/lang/v2/__init__.py mistral/tests/unit/lang/v2/base.py mistral/tests/unit/lang/v2/test_actions.py mistral/tests/unit/lang/v2/test_tasks.py mistral/tests/unit/lang/v2/test_workbook.py mistral/tests/unit/lang/v2/test_workflows.py mistral/tests/unit/mstrlfixtures/__init__.py mistral/tests/unit/mstrlfixtures/hacking.py mistral/tests/unit/mstrlfixtures/policy_fixtures.py mistral/tests/unit/notifiers/__init__.py mistral/tests/unit/notifiers/base.py mistral/tests/unit/notifiers/test_notifier_servers.py mistral/tests/unit/notifiers/test_notify.py mistral/tests/unit/policies/__init__.py mistral/tests/unit/policies/test_actions.py mistral/tests/unit/policies/test_workflows.py mistral/tests/unit/rpc/__init__.py mistral/tests/unit/rpc/kombu/__init__.py mistral/tests/unit/rpc/kombu/base.py mistral/tests/unit/rpc/kombu/fake_kombu.py mistral/tests/unit/rpc/kombu/test_kombu_client.py mistral/tests/unit/rpc/kombu/test_kombu_hosts.py mistral/tests/unit/rpc/kombu/test_kombu_listener.py mistral/tests/unit/rpc/kombu/test_kombu_server.py mistral/tests/unit/scheduler/__init__.py mistral/tests/unit/scheduler/test_default_scheduler.py mistral/tests/unit/services/__init__.py mistral/tests/unit/services/test_action_manager.py mistral/tests/unit/services/test_action_service.py mistral/tests/unit/services/test_event_engine.py mistral/tests/unit/services/test_expiration_policy.py mistral/tests/unit/services/test_legacy_scheduler.py mistral/tests/unit/services/test_trigger_service.py mistral/tests/unit/services/test_workbook_service.py mistral/tests/unit/services/test_workflow_service.py mistral/tests/unit/utils/__init__.py mistral/tests/unit/utils/test_filter_utils.py mistral/tests/unit/utils/test_rest_utils.py mistral/tests/unit/utils/test_safeLoader.py mistral/tests/unit/utils/test_utils.py mistral/tests/unit/workflow/__init__.py mistral/tests/unit/workflow/test_direct_workflow.py mistral/tests/unit/workflow/test_reverse_workflow.py mistral/tests/unit/workflow/test_states.py mistral/tests/unit/workflow/test_workflow_base.py mistral/utils/__init__.py mistral/utils/filter_utils.py mistral/utils/javascript.py mistral/utils/profiler.py mistral/utils/rest_utils.py mistral/utils/safe_yaml.py mistral/utils/ssh_utils.py mistral/utils/wf_trace.py mistral/utils/openstack/__init__.py mistral/utils/openstack/keystone.py mistral/workflow/__init__.py mistral/workflow/base.py mistral/workflow/commands.py mistral/workflow/data_flow.py mistral/workflow/direct_workflow.py mistral/workflow/reverse_workflow.py mistral/workflow/states.py mistral/workflow/utils.py playbooks/docker-buildimage/post.yaml playbooks/docker-buildimage/run.yaml playbooks/legacy/mistral-ha/run.yaml rally-jobs/README.rst rally-jobs/task-mistral.yaml rally-jobs/extra/README.rst rally-jobs/extra/mistral_wb.yaml rally-jobs/extra/nested_wb.yaml rally-jobs/extra/scenarios/complex_wf/complex_wf_params.json rally-jobs/extra/scenarios/complex_wf/complex_wf_wb.yaml rally-jobs/extra/scenarios/join/join_100_wb.yaml rally-jobs/extra/scenarios/join/join_500_wb.yaml rally-jobs/extra/scenarios/with_items/count_100_concurrency_10.json rally-jobs/extra/scenarios/with_items/wb.yaml rally-jobs/plugins/README.rst rally-jobs/plugins/__init__.py rally-jobs/plugins/mistral_expressions_scenario.py releasenotes/notes/.placeholder releasenotes/notes/add-action-region-to-actions-353f6c4b10f76677.yaml releasenotes/notes/add-execution-event-notifications-0f77c1c3eb1d6929.yaml releasenotes/notes/add-json-dump-deprecate-json-pp-252c6c495fd2dea1.yaml releasenotes/notes/add-missing-tacker-actions-dddcf77ddd90192f.yaml releasenotes/notes/add-publicize-policy-d3b44590286c7fdd.yaml releasenotes/notes/add-py-mini-racer-javascript-evaluator-9d8f9e0e36504d72.yaml releasenotes/notes/add-task_execution_id-indexes-16edc58085e47663.yaml releasenotes/notes/add_action_definition_caching-78d4446d61c6d739.yaml releasenotes/notes/add_config_option_for_oslo_rpc_executor-44afe1f728afdcb2.yaml releasenotes/notes/add_more_logging_for_sending_actions-c2ddd97027803ecd.yaml releasenotes/notes/add_public_event_triggers-ab6249ca85fd5497.yaml releasenotes/notes/add_root_execution_id_to_jinja-90b67c69a50370b5.yaml releasenotes/notes/add_skip_validation-9e8b906c45bdb89f.yaml releasenotes/notes/add_yaql_conver_output_data_config_option-4a0fa926a736de7e.yaml releasenotes/notes/add_yaql_convert_input_data_config_property-09822dee1f46eb8e.yaml releasenotes/notes/add_yaql_engine_options-200fdcfda04683ca.yaml releasenotes/notes/allow_none_for_workflow_execution_params-f25b752e207d51d7.yaml releasenotes/notes/alternative-rpc-layer-21ca7f6171c8f628.yaml releasenotes/notes/changing-context-in-delayed-calls-78d8e9a622fe3fe9.yaml releasenotes/notes/changing-isolation-level-to-read-committed-7080833ad284b901.yaml releasenotes/notes/cleanup-rpc-cleints-transport-eaa90fef070b81fd.yaml releasenotes/notes/clone_cached_action_definitions-e8b6005b467f35f2.yaml releasenotes/notes/close-stuck-running-action-executions-b67deda65d117cee.yaml releasenotes/notes/create-and-run-workflows-within-namespaces-e4fba869a889f55f.yaml releasenotes/notes/drop-ceilometerclient-b33330a28906759e.yaml releasenotes/notes/drop-py-2-7-d6ce46d3dc571c01.yaml releasenotes/notes/evaluate_env_parameter-14baa54c860da11c.yaml releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yaml releasenotes/notes/fix-auth-context-with-big-catalog-7647a07d616e653f.yaml releasenotes/notes/fix-event-engines-ha-cc78f341095cdabf.yaml releasenotes/notes/fix-jinja-expression-handling-135451645d7a4e6f.yaml releasenotes/notes/fix-next-url-formatting-2cc0d8a27625c73a.yaml releasenotes/notes/fix-regression-when-logging-58faa35f02cefb34.yaml releasenotes/notes/fix_error_validate_token_when_run_cron_trigger-7beffc06b75294fb.yaml releasenotes/notes/fix_has_next_tasks_field_calculation-5717f93d7adcd9b0.yaml releasenotes/notes/fix_join_when_last_finished_indirect_error-b0e5adf99cde9a58.yaml releasenotes/notes/fix_pause_command-58294f613488511c.yaml releasenotes/notes/fix_task_function-04b83ada20a71f12.yaml releasenotes/notes/fix_task_state_info_assignment-e25481ce8c3193ba.yaml releasenotes/notes/fix_workflow_output-cee5df431679de6b.yaml releasenotes/notes/force-stop-executions-00cd67dbbc9b5483.yaml releasenotes/notes/function-called-tasks-available-in-an-expression-17ca83d797ffb3ab.yaml releasenotes/notes/http-proxy-to-wsgi-oslo-middleware-f66f1b9533ea1e8a.yaml releasenotes/notes/improve_std_html_action-eca10df5bf934be8.yaml releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a052.yaml releasenotes/notes/include_root_cause_of_action_error_first-4a730a7cbc36f375.yaml releasenotes/notes/ironic-api-newton-9397da8135bb97b4.yaml releasenotes/notes/keycloak-auth-support-74131b49e2071762.yaml releasenotes/notes/load-keystoneauth-option-d9657d3052e82125.yaml releasenotes/notes/magnum-actions-support-b131fa942b937fa5.yaml releasenotes/notes/make_integrity_checker_work_with_batches-56c1cd94200d4c38.yaml releasenotes/notes/mistral-aodh-actions-e4c2b7598d2e39ef.yaml releasenotes/notes/mistral-api-server-https-716a6d741893dd23.yaml releasenotes/notes/mistral-customize-authorization-d6b9a965f3056f09.yaml releasenotes/notes/mistral-docker-image-9d6e04ac928289dd.yaml releasenotes/notes/mistral-engine-scale-in-bd348f9237f32481.yaml releasenotes/notes/mistral-gnocchi-actions-f26fd76b8a4df40e.yaml releasenotes/notes/mistral-murano-actions-2250f745aaf8536a.yaml releasenotes/notes/mistral-senlin-actions-f3fe359c4e91de01.yaml releasenotes/notes/mistral-tempest-plugin-2f6dcbceb4d27eb0.yaml releasenotes/notes/mistral-vitrage-actions-a205b8ea82b43cab.yaml releasenotes/notes/move_openstack_actions_from_mistral_to_mistral_extra-b3f7bc71ffd72c6e.yaml releasenotes/notes/namespace_for_adhoc_actions.yaml releasenotes/notes/namespace_for_workbooks.yaml releasenotes/notes/new-service-actions-support-47279bd649732632.yaml releasenotes/notes/optimize_adhoc_actions_scheduling-e324f66f962ae409.yaml releasenotes/notes/policy-and-doc-in-code-9f1737c474998991.yaml releasenotes/notes/refactor_action_heartbeats_without_scheduler-9c3500d6a2b25a4d.yaml releasenotes/notes/region-name-support-9e4b4ccd963ace88.yaml releasenotes/notes/remove_polling_from_join-3a7921c4af741822.yaml releasenotes/notes/remove_redundant_persistent_data_from_task_context-c5281a5f5ae688f1.yaml releasenotes/notes/remove_unnecessary_workflow_execution_update-bdc9526bd39539c4.yaml releasenotes/notes/role-based-resource-access-control-3579714be15d9b0b.yaml releasenotes/notes/safe-rerun-in-task-defaults-87a4cbe12558bc6d.yaml releasenotes/notes/set_security_context_for_action_execution_checker-eee7fb697fb213d1.yaml releasenotes/notes/simplify_workflow_and_join_completion_check-77a47c5d8953096d.yaml releasenotes/notes/std-ssh-add-pkey-2c665a81ff9fbdfd.yaml releasenotes/notes/std.email-reply-to-c283770c798db7d0.yaml releasenotes/notes/sub_execution_api.yaml releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.yaml releasenotes/notes/support-env-in-adhoc-actions-20c98598893aa19f.yaml releasenotes/notes/support-manage-cron-trigger-by-id-ab544e8068b84967.yaml releasenotes/notes/support-manila-action-8af256d5fadd1ac5.yaml releasenotes/notes/support-qinling-action-99cd323d4df36d48.yaml releasenotes/notes/support-zun-action-3263350334d1d34f.yaml releasenotes/notes/tacket-actions-support-2b4cee2644313cb3.yaml releasenotes/notes/transition-message-8dc4dd99240bd0f7.yaml releasenotes/notes/update-mistral-docker-image-0c6294fc021545e0.yaml releasenotes/notes/update-retry-policy-fb5e73ce717ed066.yaml releasenotes/notes/use-workflow-uuid-30d5e51c6ac57f1d.yaml releasenotes/notes/use_mapped_entity_for_root_execution-1af6af12ee437282.yaml releasenotes/notes/using_passive_deletes_in_sqlalchemy-4b3006b3aba55155.yaml releasenotes/notes/validate-ad-hoc-action-api-added-6d7eaaedbe8129a7.yaml releasenotes/notes/wf_final_context_evaluation_with_batches-6292ab64c131dfcc.yaml releasenotes/notes/workflow-create-instance-YaqlEvaluationException-e22afff26a193c4f.yaml releasenotes/notes/workflow-sharing-746255cda20c48d2.yaml releasenotes/notes/workflow_environment_optimizations-deb8868df3f0dc36.yaml releasenotes/notes/x-target-insecure-values-4b2bdbfd42526abc.yaml releasenotes/notes/yaml-json-parse-53217627a647dc1d.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/cover.sh tools/generate_mistralclient_help.sh tools/install_venv.py tools/install_venv_common.py tools/rank_profiled_methods.py tools/sync_db.py tools/sync_db.sh tools/test-setup.sh tools/update_env_deps tools/with_venv.sh tools/config/check_uptodate.sh tools/config/config-generator.mistral.conf tools/config/policy-generator.mistral.conf tools/cookiecutter-mistral-custom/README.rst tools/cookiecutter-mistral-custom/cookiecutter.json tools/cookiecutter-mistral-custom/run_cookiecutter.sh tools/cookiecutter-mistral-custom/update_actions.sh tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/LICENSE tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/README.rst tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/requirements.txt tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/setup.cfg tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/setup.py tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/tox.ini tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/__init__.py tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/actions.py tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/expression_functions.py tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/tests/__init__.py tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/tests/test_action.py tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/tests/test_expressions.py tools/docker/DOCKER_README.rst tools/docker/Dockerfile tools/docker/start.sh tools/docker/docker-compose/auth.json tools/docker/docker-compose/infrastructure.yaml tools/docker/docker-compose/mistral-multi-node.yaml tools/docker/docker-compose/mistral-single-node.yaml tools/docker/docker-compose/mistral.env tools/wf_generators/generate_parallel_wf.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/mistral.egg-info/dependency_links.txt0000644000175000017500000000000100000000000023326 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/mistral.egg-info/entry_points.txt0000644000175000017500000000652100000000000022562 0ustar00coreycorey00000000000000[console_scripts] mistral-db-manage = mistral.db.sqlalchemy.migration.cli:main mistral-server = mistral.cmd.launch:main [kombu_driver.executors] blocking = futurist:SynchronousExecutor eventlet = futurist:GreenThreadPoolExecutor threading = futurist:ThreadPoolExecutor [mistral.actions] std.async_noop = mistral.actions.std_actions:AsyncNoOpAction std.echo = mistral.actions.std_actions:EchoAction std.email = mistral.actions.std_actions:SendEmailAction std.fail = mistral.actions.std_actions:FailAction std.http = mistral.actions.std_actions:HTTPAction std.javascript = mistral.actions.std_actions:JavaScriptAction std.js = mistral.actions.std_actions:JavaScriptAction std.mistral_http = mistral.actions.std_actions:MistralHTTPAction std.noop = mistral.actions.std_actions:NoOpAction std.sleep = mistral.actions.std_actions:SleepAction std.ssh = mistral.actions.std_actions:SSHAction std.ssh_proxied = mistral.actions.std_actions:SSHProxiedAction std.test_dict = mistral.actions.std_actions:TestDictAction [mistral.auth] keycloak-oidc = mistral.auth.keycloak:KeycloakAuthHandler keystone = mistral.auth.keystone:KeystoneAuthHandler [mistral.executors] local = mistral.executors.default_executor:DefaultExecutor remote = mistral.executors.remote_executor:RemoteExecutor [mistral.expression.evaluators] jinja = mistral.expressions.jinja_expression:InlineJinjaEvaluator yaql = mistral.expressions.yaql_expression:InlineYAQLEvaluator [mistral.expression.functions] env = mistral.expressions.std_functions:env_ execution = mistral.expressions.std_functions:execution_ executions = mistral.expressions.std_functions:executions_ global = mistral.expressions.std_functions:global_ json_dump = mistral.expressions.std_functions:json_dump_ json_parse = mistral.expressions.std_functions:json_parse_ json_pp = mistral.expressions.std_functions:json_pp_ task = mistral.expressions.std_functions:task_ tasks = mistral.expressions.std_functions:tasks_ uuid = mistral.expressions.std_functions:uuid_ yaml_dump = mistral.expressions.std_functions:yaml_dump_ yaml_parse = mistral.expressions.std_functions:yaml_parse_ [mistral.js.implementation] py_mini_racer = mistral.utils.javascript:PyMiniRacerEvaluator pyv8 = mistral.utils.javascript:PyV8Evaluator v8eval = mistral.utils.javascript:V8EvalEvaluator [mistral.notification.publishers] noop = mistral.notifiers.publishers.noop:NoopPublisher webhook = mistral.notifiers.publishers.webhook:WebhookPublisher [mistral.notifiers] local = mistral.notifiers.default_notifier:DefaultNotifier remote = mistral.notifiers.remote_notifier:RemoteNotifier [mistral.rpc.backends] kombu_client = mistral.rpc.kombu.kombu_client:KombuRPCClient kombu_server = mistral.rpc.kombu.kombu_server:KombuRPCServer oslo_client = mistral.rpc.oslo.oslo_client:OsloRPCClient oslo_server = mistral.rpc.oslo.oslo_server:OsloRPCServer [mistral.schedulers] default = mistral.scheduler.default_scheduler:DefaultScheduler legacy = mistral.services.legacy_scheduler:LegacyScheduler [oslo.config.opts] mistral.config = mistral.config:list_opts [oslo.config.opts.defaults] mistral.config = mistral.config:set_cors_middleware_defaults [oslo.policy.enforcer] mistral = mistral.api.access_control:get_enforcer [oslo.policy.policies] mistral = mistral.policies:list_rules [pygments.lexers] mistral = mistral.ext.pygmentplugin:MistralLexer [wsgi_scripts] mistral-wsgi-api = mistral.api.app:init_wsgi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/mistral.egg-info/not-zip-safe0000644000175000017500000000000100000000000021506 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/mistral.egg-info/pbr.json0000644000175000017500000000005700000000000020740 0ustar00coreycorey00000000000000{"git_version": "a7da00d7", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/mistral.egg-info/requires.txt0000644000175000017500000000141500000000000021661 0ustar00coreycorey00000000000000Babel!=2.4.0,>=2.3.4 Jinja2>=2.10 PyJWT>=1.5 PyYAML>=5.1 SQLAlchemy>=1.2.5 WSME>=0.8.0 alembic>=0.9.6 cachetools>=2.0.0 croniter>=0.3.4 dogpile.cache>=0.6.2 eventlet!=0.20.1,!=0.21.0,!=0.23.0,!=0.25.0,>=0.20.0 jsonschema>=2.6.0 keystonemiddleware>=4.18.0 kombu!=4.0.2,>=4.6.1 mistral-lib>=1.4.0 networkx>=2.3 oslo.concurrency>=3.26.0 oslo.config>=5.2.0 oslo.context>=2.20.0 oslo.db>=4.40.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=1.30.0 oslo.serialization>=2.21.1 oslo.service!=1.28.1,>=1.24.0 oslo.utils>=3.37.0 osprofiler>=1.4.0 paramiko>=2.4.1 pbr!=2.1.0,>=2.0.0 pecan>=1.2.1 requests>=2.14.2 six>=1.10.0 stevedore>=1.20.0 tenacity>=5.0.1 tooz>=1.58.0 yaql>=1.1.3 zake>=0.1.6 [:(python_version<'3.0')] networkx<2.3,>=1.10 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538867.0 mistral-10.0.0.0b3/mistral.egg-info/top_level.txt0000644000175000017500000000001000000000000022001 0ustar00coreycorey00000000000000mistral ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0775666 mistral-10.0.0.0b3/playbooks/0000755000175000017500000000000000000000000016116 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/playbooks/docker-buildimage/0000755000175000017500000000000000000000000021465 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/playbooks/docker-buildimage/post.yaml0000644000175000017500000000122700000000000023340 0ustar00coreycorey00000000000000- hosts: all tasks: - name: Ensure artifacts directory exists file: path: '{{ zuul.executor.work_root }}/artifacts' state: directory delegate_to: localhost - name: Copy files from {{ ansible_user_dir }}/src/{{ zuul.project.canonical_name }} on node synchronize: src: '{{ ansible_user_dir }}/src/{{ zuul.project.canonical_name }}/' dest: '{{ zuul.executor.work_root }}/artifacts/images' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/mistral-docker.tar.gz - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/playbooks/docker-buildimage/run.yaml0000644000175000017500000000136400000000000023161 0ustar00coreycorey00000000000000- hosts: all vars: work_dir: '{{ ansible_user_dir }}/src/{{ zuul.project.canonical_name }}' tasks: - name: Install Docker shell: curl -fsSL https://get.docker.com/ | sh args: chdir: '{{ work_dir }}' become: yes - name: Restart Docker service service: name: docker state: restarted become: yes - name: Install docker-py pip: name: docker-py become: yes - name: Build and archive a Mistral image docker_image: name: mistral path: '{{ work_dir }}' dockerfile: '{{ work_dir }}/tools/docker/Dockerfile' archive_path: '{{ work_dir}}/mistral-docker.tar.gz' buildargs: BUILD_V8EVAL: false become: yes ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0775666 mistral-10.0.0.0b3/playbooks/legacy/0000755000175000017500000000000000000000000017362 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/playbooks/legacy/mistral-ha/0000755000175000017500000000000000000000000021423 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/playbooks/legacy/mistral-ha/run.yaml0000644000175000017500000000401300000000000023111 0ustar00coreycorey00000000000000- hosts: all name: Autoconverted job legacy-mistral-ha from old job gate-mistral-ha-ubuntu-xenial-nv roles: - role: bindep bindep_dir: "{{ ansible_user_dir }}/workspace" tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x CLONEMAP=`mktemp` function cleanup { # In cases where zuul-cloner is aborted during a git # clone operation, git will remove the git work tree in # its cleanup. The work tree in these jobs is the # workspace directory, which means that subsequent # jenkins post-build actions can not run because the # workspace has been removed. # To reduce the likelihood of this having an impact, # recreate the workspace directory if needed mkdir -p $WORKSPACE rm -f $CLONEMAP } trap cleanup EXIT cat > $CLONEMAP << EOF clonemap: - name: $ZUUL_PROJECT dest: . EOF /usr/zuul-env/bin/zuul-cloner -m $CLONEMAP --cache-dir /opt/git \ https://opendev.org $ZUUL_PROJECT executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | # TODO: this is a temporary solution that puts all installation # code into a script residing in mistral repo just for more # convenient debugging (since we will be able to send patchsets to # mistral with "check experimental" and trigger the gate). After # it's ready it'll be better to create a special builder in this # file. export DEVSTACK_GATE_USE_PYTHON3=True ha_gate/install.sh ha_gate/run_tests.sh chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/rally-jobs/0000755000175000017500000000000000000000000016171 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/README.rst0000644000175000017500000000166000000000000017663 0ustar00coreycorey00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * task-mistral.yaml is a task that will be run in gates against OpenStack deployed by DevStack with installed Rally & Mistral. * plugins - directory where you can add rally plugins. Almost everything in Rally is plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* Useful links ------------ * More about rally: https://rally.readthedocs.org/en/latest/ * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/rally-jobs/extra/0000755000175000017500000000000000000000000017314 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/README.rst0000644000175000017500000000025400000000000021004 0ustar00coreycorey00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/mistral_wb.yaml0000644000175000017500000000025000000000000022340 0ustar00coreycorey00000000000000--- version: "2.0" name: wb workflows: wf1: type: direct tasks: hello: action: std.echo output="Hello" publish: result: $ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/nested_wb.yaml0000644000175000017500000000255500000000000022161 0ustar00coreycorey00000000000000--- version: "2.0" name: wb workflows: wrapping_wf: type: direct tasks: call_inner_wf_1: workflow: inner_wf call_inner_wf_2: workflow: inner_wf call_inner_wf_3: workflow: inner_wf call_inner_wf_4: workflow: inner_wf inner_wf: type: direct tasks: hello1: action: std.echo output="Hello" publish: result: $ hello2: action: std.echo output="Hello" publish: result: $ on-success: - world hello3: action: std.echo output="Hello" publish: result: $ on-success: - world hello4: action: std.echo output="Hello" publish: result: $ on-success: - world world: action: std.echo output="World" join: all publish: result: $ on-success: - test1 - test2 - test3 - test4 test1: action: std.echo output="Test!!" publish: result: $ test2: action: std.echo output="Test!!" publish: result: $ test3: action: std.echo output="Test!!" publish: result: $ test4: action: std.echo output="Test!!" publish: result: $ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0775666 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/0000755000175000017500000000000000000000000021302 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/complex_wf/0000755000175000017500000000000000000000000023445 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/complex_wf/complex_wf_params.json0000644000175000017500000000040300000000000030043 0ustar00coreycorey00000000000000{ "env": { "env_param_01": { "env_param_01_nested_01": "xyz" }, "env_param_02": { "env_param_02_nested_01": "xyz" }, "working_url": "http://httpstat.us/200", "return_error_code_url": "https://httpbin.org/status/418" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/complex_wf/complex_wf_wb.yaml0000644000175000017500000007134000000000000027171 0ustar00coreycorey00000000000000--- version: "2.0" name: very_big_wb actions: my_action: input: - action_parameter_05: xyz - action_parameter_06: "xyz" - env - action_parameter_07: xyz - action_parameter_04: null - action_parameter_03: "xyz" - action_parameter_02: "" - action_parameter_08: "" - action_parameter_09: xyz - action_param_01: "" - action_parameter_01: "xyz" - action_parameter_10: "xyz" - output: output base: std.http base-input: url: <% $.env.working_url %> allow_redirects: true verify: 'xyz' headers: "Content-Type": "application/json" "Header1": <% $.env.env_param_01.env_param_01_nested_01 %> "Header2": <% $.env.env_param_01.env_param_01_nested_01 + ' ' + $.env.env_param_02.env_param_02_nested_01 %> method: PATCH body: xyz: <% $.action_parameter_05 %> workflows: my_workflow: input: - workflow_parameter_04: "workflow_parameter_04" - workflow_parameter_03: "workflow_parameter_03" - workflow_parameter_02: "workflow_parameter_02" - workflow_parameter_01: null - workflow_parameter_05: "workflow_parameter_05" - workflow_parameter_06: "workflow_parameter_06" output: wf_output_01: xyz wf_output_02: <% env().env_param_01 %> task-defaults: on-error: - wf_default_on_error tasks: wf_task_01: action: my_action input: env: "<% env() %>" action_parameter_02: "" action_parameter_03: xyz action_parameter_04: '<% env().env_param_02.env_param_02_nested_01 %>' on-error: - fail on-success: - wf_task_02 publish: wf_published_01: nested_01: "xyz" wf_task_02: action: std.http input: url: <% env().working_url %> method: POST allow_redirects: true headers: "Content-Type": "application/json" "X-Flow-ID": <% env().env_param_02.env_param_02_nested_01 %> "xyz": <% $.wf_published_01.nested_01 %> "Authorization": <% env().env_param_02.env_param_02_nested_01 + ' ' + env().env_param_02.env_param_02_nested_01 %> body: env_param_01_nested_01: <% env().env_param_01.env_param_01_nested_01 %> aaaaa: a1: <% $.workflow_parameter_03 %> a2: <% $.workflow_parameter_02 %> a3: <% $.workflow_parameter_01 %> a4: <% env().env_param_02 %> a5: <% $.workflow_parameter_04 %> on-success: - wf_task_05 wf_task_03: action: std.echo input: output: "<% $ %>" wf_task_04: action: std.echo input: output: "<% $ %>" on-complete: - fail wf_default_on_error: action: my_action input: action_parameter_02: '' action_parameter_01: "<% $.wf_published_01.nested_01 %>" action_parameter_03: fxyz env: "<% env() %>" on-error: - wf_task_04 on-success: - wf_task_04 wf_task_05: action: my_action input: env: "<% env() %>" action_parameter_02: "" action_parameter_01: "<% $.wf_published_01.nested_01 %>" action_parameter_03: xyz on-error: - wf_task_03 top_level_workflow: tasks: task_01_top_level: action: "std.echo" input: output: "<% $ %>" on-success: - "do_work" do_work: workflow: "big_wf" on-success: - more_work - make_sure_no_errors task_12: action: "my_action" input: env: "<% env() %>" action_parameter_06: "xxx" action_parameter_03: "<% env().env_param_01.env_param_01_nested_01 %>" action_parameter_10: "ggg" on-complete: - make_sure_no_errors more_work: action: "my_action" input: env: "<% env() %>" action_parameter_06: "xxx" action_parameter_03: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_12" make_sure_no_errors: action: std.noop publish: tasks_in_error: "<% tasks(execution().id, true, ERROR) %>" on-success: - do_fail: "<% len($.tasks_in_error) > 0 %>" do_fail: action: std.fail big_wf: input: - attribute_01: "attribute_01" - attribute_02: "attribute_02" - attribute_03: "attribute_03" - attribute_04: "attribute_04" - attribute_05: "attribute_05" - attribute_06: "attribute_06" - attribute_07: "attribute_07" - attribute_08: "attribute_08" - attribute_09: "attribute_09" - attribute_10: "attribute_10" - attribute_11: "attribute_11" - attribute_12: "attribute_12" - attribute_13: "attribute_13" - attribute_14: "attribute_14" - attribute_15: "attribute_15" - attribute_16: "attribute_16" - attribute_17: "attribute_17" - attribute_18: "attribute_18" - attribute_19: "attribute_19" - attribute_20: "attribute_20" - attribute_21: "attribute_21" - attribute_22: "attribute_22" - attribute_23: "attribute_23" - attribute_24: "attribute_24" - attribute_25: "attribute_25" - attribute_26: "attribute_26" - attribute_27: "attribute_27" - attribute_28: "attribute_28" - attribute_29: "attribute_29" - input_01: "input_01" - input_02: "input_02" - input_03: "input_03" - input_04: "input_04" - input_05: "input_05" - input_06: "input_06" - input_07: "input_07" - input_08: "input_08" - property_01: null - property_02: "*" - property_03: null - property_04: "xyz" - property_05: null - property_06: null - property_07: null - property_08: "1232" - property_09: "xyz" - property_10: "*" - property_11: "*" - property_12: "xyz" - property_13: false - property_14: null - property_15: "xyz" - property_16: null - property_17: "xyz" - property_18: null - property_19: "xyz" - property_20: "1" - property_21: "xyz" - property_22: 123 - property_23: false task-defaults: on-error: - "system_task_on_error" tasks: task_01: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_30) %>" publish: attribute_31: "<% $.get(attribute_30) %>" on-success: - "task_08" - "task_18" - "task_23" - "task_25" task_02: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_30) %>" publish: attribute_63: "<% $.get(attribute_30) %>" on-success: - "task_08" - "task_18" - "task_23" - "task_25" task_03: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_30) %>" publish: attribute_32: "<% $.get(attribute_30) %>" on-success: - "task_08" - "task_18" - "task_23" - "task_25" task_04: workflow: "my_workflow" input: workflow_parameter_01: workflow_parameter_01_nested_param: "<% $.get(input_01) %>" workflow_parameter_02: "<% $.get(attribute_01) %>" workflow_parameter_03: "<% $.get(attribute_03) %>" workflow_parameter_04: "<% $.get(attribute_02) %>" publish: task_04_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_30: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_33: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_01" - "task_02" - "task_03" - "task_04_2" task_04_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_04_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_05: workflow: "my_workflow" input: workflow_parameter_01: xyz_01: "<% $.get(property_04) %>" xyz_02: "<% $.get(attribute_34) %>" xyz_03: "<% $.get(attribute_35) %>" xyz_04: "<% $.get(attribute_36) %>" xyz_05: "<% $.get(property_06) %>" xyz_06: "<% $.get(attribute_37) %>" xyz_07: "<% $.get(property_07) %>" xyz_08: "<% $.get(attribute_38) %>" xyz_09: "<% $.get(attribute_39) %>" xyz_10: "<% $.get(attribute_40) %>" xyz_11: "<% $.get(attribute_41) %>" xyz_12: "<% $.get(attribute_42) %>" xyz_13: "<% $.get(property_02) %>" xyz_14: "<% $.get(property_01) %>" xyz_15: "<% $.get(property_05) %>" xyz_16: "<% $.get(attribute_43) %>" xyz_17: "<% $.get(attribute_44) %>" xyz_18: "<% $.get(property_03) %>" workflow_parameter_02: "<% $.get(attribute_04) %>" workflow_parameter_03: "<% $.get(attribute_06) %>" workflow_parameter_04: "<% $.get(attribute_05) %>" publish: task_05_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_45: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_46: "<% env().env_param_01.env_param_01_nested_01 %>" join: "all" on-success: - "task_05_2" - "task_23" task_05_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_05_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_06: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(property_11) %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(property_10) %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(property_09) %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(property_08) %>" publish: attribute_37: "<% $.get(property_08) %>" attribute_34: "<% $.get(property_09) %>" attribute_42: "<% $.get(property_10) %>" attribute_38: "<% $.get(property_11) %>" on-success: - "task_05" - "task_08" - "task_18" - "task_23" task_07: workflow: "my_workflow" input: workflow_parameter_01: xyz_11: "<% $.get(attribute_53) %>" workflow_parameter_02: "<% $.get(property_18) %>" workflow_parameter_03: "<% $.get(attribute_19) %>" workflow_parameter_04: "<% $.get(property_17) %>" workflow_parameter_05: "<% $.get(property_19) %>" workflow_parameter_06: "<% $.get(property_20) %>" workflow_parameter_07: "<% $.get(attribute_20) %>" publish: task_07_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" outputs: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_65: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_66: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_07_2" - "task_08" - "task_18" task_07_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_07_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_08: workflow: "my_workflow" input: workflow_parameter_01: xyz_01: "<% $.get(property_12) %>" xyz_02: "<% $.get(attribute_65) %>" xyz_03: "<% $.get(attribute_32) %>" xyz_04: "<% $.get(property_13) %>" workflow_parameter_02: "<% $.get(attribute_07) %>" workflow_parameter_03: "<% $.get(attribute_09) %>" workflow_parameter_04: "<% $.get(attribute_08) %>" publish: task_08_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_67: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_68: "<% env().env_param_01.env_param_01_nested_01 %>" join: "all" on-success: - "task_05" - "task_08_2" - "task_09" task_08_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_08_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_09: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "xyz" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_67) %>" publish: attribute_43: "xyz" attribute_44: "<% $.get(attribute_67) %>" on-success: - "task_05" - "task_23" task_10: workflow: "my_workflow" input: workflow_parameter_01: xyz_01: "<% $.get(attribute_45) %>" xyz_11: "<% $.get(attribute_46) %>" xyz_21: "<% $.get(input_01) %>" xyz_22: "<% $.get(attribute_47) %>" xyz_23: "<% $.get(attribute_48) %>" workflow_parameter_02: "<% $.get(attribute_10) %>" workflow_parameter_03: "<% $.get(attribute_12) %>" workflow_parameter_04: "<% $.get(attribute_11) %>" publish: task_10_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_49: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_50: "<% env().env_param_01.env_param_01_nested_01 %>" join: "all" on-success: - "task_07" - "task_10_2" task_10_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_10_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_11: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_51) %>" publish: attribute_46: "<% $.get(attribute_51) %>" on-success: - "task_10" - "task_23" task_12: workflow: "my_workflow" input: workflow_parameter_02: "<% $.get(attribute_13) %>" workflow_parameter_03: "<% $.get(attribute_15) %>" workflow_parameter_04: "<% $.get(attribute_14) %>" publish: task_12_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_51: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_52: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_53: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_11" - "task_12_2" task_12_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_12_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_13: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_54) %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(property_15) %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(input_02) %>" publish: attribute_47: "<% $.get(input_02) %>" attribute_45: "<% $.get(property_15) %>" attribute_48: "<% $.get(attribute_54) %>" on-success: - "task_05" - "task_10" - "task_17" - "task_23" task_14: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_54) %>" publish: attribute_55: "<% $.get(attribute_54) %>" on-success: - "task_05" - "task_10" - "task_17" - "task_23" task_15: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_17) %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_54) %>" publish: attribute_39: "<% $.get(attribute_54) %>" attribute_35: "<% $.get(attribute_17) %>" on-success: - "task_05" - "task_10" - "task_17" - "task_23" task_16: workflow: "my_workflow" input: workflow_parameter_01: xyz_01: "<% $.get(property_15) %>" xyz_02: "<% $.get(input_03) %>" xyz_03: "<% $.get(attribute_56) %>" xyz_04: "<% $.get(property_16) %>" xyz_05: "<% $.get(property_14) %>" xyz_06: "<% $.get(input_02) %>" workflow_parameter_02: "<% $.get(attribute_16) %>" workflow_parameter_03: "<% $.get(attribute_18) %>" workflow_parameter_04: "<% $.get(attribute_17) %>" publish: task_16_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_54: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_57: "<% env().env_param_01.env_param_01_nested_01 %>" join: "all" on-success: - "task_05" - "task_10" - "task_13" - "task_14" - "task_15" - "task_16_2" - "task_17" task_16_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_16_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_17: workflow: "my_workflow" input: workflow_parameter_01: xyz_01: "<% $.get(input_05) %>" xyz_02: "<% $.get(input_06) %>" xyz_03: "<% $.get(property_21) %>" xyz_04: "<% $.get(input_08) %>" xyz_05: "<% $.get(input_04) %>" xyz_06: "<% $.get(input_07) %>" xyz_07: "<% $.get(attribute_55) %>" workflow_parameter_02: "<% $.get(attribute_21) %>" workflow_parameter_03: "<% $.get(attribute_23) %>" workflow_parameter_04: "<% $.get(attribute_22) %>" publish: attribute_58: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_59: "<% env().env_param_01.env_param_01_nested_01 %>" task_17_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" join: "all" on-success: - "task_17_2" - "task_23" task_17_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_17_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_18: workflow: "my_workflow" input: workflow_parameter_01: xyz_15: "<% $.get(property_22) %>" xyz_16: "<% $.get(attribute_31) %>" workflow_parameter_02: "<% $.get(attribute_24) %>" workflow_parameter_03: "<% $.get(attribute_26) %>" workflow_parameter_04: "<% $.get(attribute_25) %>" publish: attribute_60: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_61: "<% env().env_param_01.env_param_01_nested_01 %>" task_18_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" join: "all" on-success: - "task_05" - "task_18_2" - "task_19" task_18_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_18_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" task_19: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_60) %>" publish: attribute_40: "<% $.get(attribute_60) %>" on-success: - "task_05" - "task_23" task_20: action: "my_action" input: output: "<% $ %>" on-complete: - "fail" task_21: workflow: "my_workflow" on-success: - "task_04" - "task_06" - "task_12" problematic_task: action: "my_action" input: env: "<% env() %>" action_parameter_02: "problematic_task" action_parameter_01: "<% env().env_param_02.env_param_02_nested_01 %>" action_parameter_03: "xyz" on-success: - "task_20" on-error: - "task_20" task_22: action: "my_action" input: env: "<% env() %>" action_parameter_02: "" action_parameter_01: "<% env().env_param_02.env_param_02_nested_01 %>" action_parameter_03: "xyz" on-error: - "task_20" task_23: workflow: "my_workflow" input: env: "<% env() %>" workflow_parameter_05: "xyz" workflow_parameter_06: "xyz" join: "all" on-success: - "task_22" system_task_on_error: workflow: "my_workflow" input: env: "<% env() %>" workflow_parameter_05: "xyz" workflow_parameter_06: "xyz" join: 1 on-complete: - "problematic_task" task_24: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% $.get(attribute_62) %>" publish: attribute_56: "<% $.get(attribute_62) %>" on-success: - "task_16" - "task_23" task_25: workflow: "my_workflow" input: workflow_parameter_01: xyz_01: "<% $.get(attribute_63) %>" xyz_02: "<% $.get(property_23) %>" workflow_parameter_02: "<% $.get(attribute_27) %>" workflow_parameter_03: "<% $.get(attribute_29) %>" workflow_parameter_04: "<% $.get(attribute_28) %>" publish: attribute_62: "<% env().env_param_01.env_param_01_nested_01 %>" attribute_64: "<% env().env_param_01.env_param_01_nested_01 %>" task_25_workflow_outputs: published_01: "<% env().env_param_01.env_param_01_nested_01 %>" published_02: "<% env().env_param_01.env_param_01_nested_01 %>" join: "all" on-success: - "task_16" - "task_24" - "task_25_2" task_25_2: action: "my_action" input: env: "<% env() %>" action_param_01: action_param_01_nested_02: xyz: "xyz" xyzn: "xyz" xyznm: "xyz" xyznmt: "<% $.task_25_workflow_outputs %>" action_param_01_nested_param: - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" - xyz_30: "xyz" xyznm: "xyz" xyznmtv: "<% env().env_param_01.env_param_01_nested_01 %>" on-success: - "task_23" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/join/0000755000175000017500000000000000000000000022241 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/join/join_100_wb.yaml0000644000175000017500000001106200000000000025134 0ustar00coreycorey00000000000000--- version: '2.0' name: join_100_wb workflows: wf: description: contains "join" that joins 100 parallel tasks tasks: join_task: join: all task_1: on-success: join_task task_2: on-success: join_task task_3: on-success: join_task task_4: on-success: join_task task_5: on-success: join_task task_6: on-success: join_task task_7: on-success: join_task task_8: on-success: join_task task_9: on-success: join_task task_10: on-success: join_task task_11: on-success: join_task task_12: on-success: join_task task_13: on-success: join_task task_14: on-success: join_task task_15: on-success: join_task task_16: on-success: join_task task_17: on-success: join_task task_18: on-success: join_task task_19: on-success: join_task task_20: on-success: join_task task_21: on-success: join_task task_22: on-success: join_task task_23: on-success: join_task task_24: on-success: join_task task_25: on-success: join_task task_26: on-success: join_task task_27: on-success: join_task task_28: on-success: join_task task_29: on-success: join_task task_30: on-success: join_task task_31: on-success: join_task task_32: on-success: join_task task_33: on-success: join_task task_34: on-success: join_task task_35: on-success: join_task task_36: on-success: join_task task_37: on-success: join_task task_38: on-success: join_task task_39: on-success: join_task task_40: on-success: join_task task_41: on-success: join_task task_42: on-success: join_task task_43: on-success: join_task task_44: on-success: join_task task_45: on-success: join_task task_46: on-success: join_task task_47: on-success: join_task task_48: on-success: join_task task_49: on-success: join_task task_50: on-success: join_task task_51: on-success: join_task task_52: on-success: join_task task_53: on-success: join_task task_54: on-success: join_task task_55: on-success: join_task task_56: on-success: join_task task_57: on-success: join_task task_58: on-success: join_task task_59: on-success: join_task task_60: on-success: join_task task_61: on-success: join_task task_62: on-success: join_task task_63: on-success: join_task task_64: on-success: join_task task_65: on-success: join_task task_66: on-success: join_task task_67: on-success: join_task task_68: on-success: join_task task_69: on-success: join_task task_70: on-success: join_task task_71: on-success: join_task task_72: on-success: join_task task_73: on-success: join_task task_74: on-success: join_task task_75: on-success: join_task task_76: on-success: join_task task_77: on-success: join_task task_78: on-success: join_task task_79: on-success: join_task task_80: on-success: join_task task_81: on-success: join_task task_82: on-success: join_task task_83: on-success: join_task task_84: on-success: join_task task_85: on-success: join_task task_86: on-success: join_task task_87: on-success: join_task task_88: on-success: join_task task_89: on-success: join_task task_90: on-success: join_task task_91: on-success: join_task task_92: on-success: join_task task_93: on-success: join_task task_94: on-success: join_task task_95: on-success: join_task task_96: on-success: join_task task_97: on-success: join_task task_98: on-success: join_task task_99: on-success: join_task task_100: on-success: join_task ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/join/join_500_wb.yaml0000644000175000017500000005502200000000000025144 0ustar00coreycorey00000000000000--- version: '2.0' name: join_500_wb workflows: wf: description: contains "join" that joins 500 parallel tasks tasks: join_task: join: all task_1: on-success: join_task task_2: on-success: join_task task_3: on-success: join_task task_4: on-success: join_task task_5: on-success: join_task task_6: on-success: join_task task_7: on-success: join_task task_8: on-success: join_task task_9: on-success: join_task task_10: on-success: join_task task_11: on-success: join_task task_12: on-success: join_task task_13: on-success: join_task task_14: on-success: join_task task_15: on-success: join_task task_16: on-success: join_task task_17: on-success: join_task task_18: on-success: join_task task_19: on-success: join_task task_20: on-success: join_task task_21: on-success: join_task task_22: on-success: join_task task_23: on-success: join_task task_24: on-success: join_task task_25: on-success: join_task task_26: on-success: join_task task_27: on-success: join_task task_28: on-success: join_task task_29: on-success: join_task task_30: on-success: join_task task_31: on-success: join_task task_32: on-success: join_task task_33: on-success: join_task task_34: on-success: join_task task_35: on-success: join_task task_36: on-success: join_task task_37: on-success: join_task task_38: on-success: join_task task_39: on-success: join_task task_40: on-success: join_task task_41: on-success: join_task task_42: on-success: join_task task_43: on-success: join_task task_44: on-success: join_task task_45: on-success: join_task task_46: on-success: join_task task_47: on-success: join_task task_48: on-success: join_task task_49: on-success: join_task task_50: on-success: join_task task_51: on-success: join_task task_52: on-success: join_task task_53: on-success: join_task task_54: on-success: join_task task_55: on-success: join_task task_56: on-success: join_task task_57: on-success: join_task task_58: on-success: join_task task_59: on-success: join_task task_60: on-success: join_task task_61: on-success: join_task task_62: on-success: join_task task_63: on-success: join_task task_64: on-success: join_task task_65: on-success: join_task task_66: on-success: join_task task_67: on-success: join_task task_68: on-success: join_task task_69: on-success: join_task task_70: on-success: join_task task_71: on-success: join_task task_72: on-success: join_task task_73: on-success: join_task task_74: on-success: join_task task_75: on-success: join_task task_76: on-success: join_task task_77: on-success: join_task task_78: on-success: join_task task_79: on-success: join_task task_80: on-success: join_task task_81: on-success: join_task task_82: on-success: join_task task_83: on-success: join_task task_84: on-success: join_task task_85: on-success: join_task task_86: on-success: join_task task_87: on-success: join_task task_88: on-success: join_task task_89: on-success: join_task task_90: on-success: join_task task_91: on-success: join_task task_92: on-success: join_task task_93: on-success: join_task task_94: on-success: join_task task_95: on-success: join_task task_96: on-success: join_task task_97: on-success: join_task task_98: on-success: join_task task_99: on-success: join_task task_100: on-success: join_task task_101: on-success: join_task task_102: on-success: join_task task_103: on-success: join_task task_104: on-success: join_task task_105: on-success: join_task task_106: on-success: join_task task_107: on-success: join_task task_108: on-success: join_task task_109: on-success: join_task task_110: on-success: join_task task_111: on-success: join_task task_112: on-success: join_task task_113: on-success: join_task task_114: on-success: join_task task_115: on-success: join_task task_116: on-success: join_task task_117: on-success: join_task task_118: on-success: join_task task_119: on-success: join_task task_120: on-success: join_task task_121: on-success: join_task task_122: on-success: join_task task_123: on-success: join_task task_124: on-success: join_task task_125: on-success: join_task task_126: on-success: join_task task_127: on-success: join_task task_128: on-success: join_task task_129: on-success: join_task task_130: on-success: join_task task_131: on-success: join_task task_132: on-success: join_task task_133: on-success: join_task task_134: on-success: join_task task_135: on-success: join_task task_136: on-success: join_task task_137: on-success: join_task task_138: on-success: join_task task_139: on-success: join_task task_140: on-success: join_task task_141: on-success: join_task task_142: on-success: join_task task_143: on-success: join_task task_144: on-success: join_task task_145: on-success: join_task task_146: on-success: join_task task_147: on-success: join_task task_148: on-success: join_task task_149: on-success: join_task task_150: on-success: join_task task_151: on-success: join_task task_152: on-success: join_task task_153: on-success: join_task task_154: on-success: join_task task_155: on-success: join_task task_156: on-success: join_task task_157: on-success: join_task task_158: on-success: join_task task_159: on-success: join_task task_160: on-success: join_task task_161: on-success: join_task task_162: on-success: join_task task_163: on-success: join_task task_164: on-success: join_task task_165: on-success: join_task task_166: on-success: join_task task_167: on-success: join_task task_168: on-success: join_task task_169: on-success: join_task task_170: on-success: join_task task_171: on-success: join_task task_172: on-success: join_task task_173: on-success: join_task task_174: on-success: join_task task_175: on-success: join_task task_176: on-success: join_task task_177: on-success: join_task task_178: on-success: join_task task_179: on-success: join_task task_180: on-success: join_task task_181: on-success: join_task task_182: on-success: join_task task_183: on-success: join_task task_184: on-success: join_task task_185: on-success: join_task task_186: on-success: join_task task_187: on-success: join_task task_188: on-success: join_task task_189: on-success: join_task task_190: on-success: join_task task_191: on-success: join_task task_192: on-success: join_task task_193: on-success: join_task task_194: on-success: join_task task_195: on-success: join_task task_196: on-success: join_task task_197: on-success: join_task task_198: on-success: join_task task_199: on-success: join_task task_200: on-success: join_task task_201: on-success: join_task task_202: on-success: join_task task_203: on-success: join_task task_204: on-success: join_task task_205: on-success: join_task task_206: on-success: join_task task_207: on-success: join_task task_208: on-success: join_task task_209: on-success: join_task task_210: on-success: join_task task_211: on-success: join_task task_212: on-success: join_task task_213: on-success: join_task task_214: on-success: join_task task_215: on-success: join_task task_216: on-success: join_task task_217: on-success: join_task task_218: on-success: join_task task_219: on-success: join_task task_220: on-success: join_task task_221: on-success: join_task task_222: on-success: join_task task_223: on-success: join_task task_224: on-success: join_task task_225: on-success: join_task task_226: on-success: join_task task_227: on-success: join_task task_228: on-success: join_task task_229: on-success: join_task task_230: on-success: join_task task_231: on-success: join_task task_232: on-success: join_task task_233: on-success: join_task task_234: on-success: join_task task_235: on-success: join_task task_236: on-success: join_task task_237: on-success: join_task task_238: on-success: join_task task_239: on-success: join_task task_240: on-success: join_task task_241: on-success: join_task task_242: on-success: join_task task_243: on-success: join_task task_244: on-success: join_task task_245: on-success: join_task task_246: on-success: join_task task_247: on-success: join_task task_248: on-success: join_task task_249: on-success: join_task task_250: on-success: join_task task_251: on-success: join_task task_252: on-success: join_task task_253: on-success: join_task task_254: on-success: join_task task_255: on-success: join_task task_256: on-success: join_task task_257: on-success: join_task task_258: on-success: join_task task_259: on-success: join_task task_260: on-success: join_task task_261: on-success: join_task task_262: on-success: join_task task_263: on-success: join_task task_264: on-success: join_task task_265: on-success: join_task task_266: on-success: join_task task_267: on-success: join_task task_268: on-success: join_task task_269: on-success: join_task task_270: on-success: join_task task_271: on-success: join_task task_272: on-success: join_task task_273: on-success: join_task task_274: on-success: join_task task_275: on-success: join_task task_276: on-success: join_task task_277: on-success: join_task task_278: on-success: join_task task_279: on-success: join_task task_280: on-success: join_task task_281: on-success: join_task task_282: on-success: join_task task_283: on-success: join_task task_284: on-success: join_task task_285: on-success: join_task task_286: on-success: join_task task_287: on-success: join_task task_288: on-success: join_task task_289: on-success: join_task task_290: on-success: join_task task_291: on-success: join_task task_292: on-success: join_task task_293: on-success: join_task task_294: on-success: join_task task_295: on-success: join_task task_296: on-success: join_task task_297: on-success: join_task task_298: on-success: join_task task_299: on-success: join_task task_300: on-success: join_task task_301: on-success: join_task task_302: on-success: join_task task_303: on-success: join_task task_304: on-success: join_task task_305: on-success: join_task task_306: on-success: join_task task_307: on-success: join_task task_308: on-success: join_task task_309: on-success: join_task task_310: on-success: join_task task_311: on-success: join_task task_312: on-success: join_task task_313: on-success: join_task task_314: on-success: join_task task_315: on-success: join_task task_316: on-success: join_task task_317: on-success: join_task task_318: on-success: join_task task_319: on-success: join_task task_320: on-success: join_task task_321: on-success: join_task task_322: on-success: join_task task_323: on-success: join_task task_324: on-success: join_task task_325: on-success: join_task task_326: on-success: join_task task_327: on-success: join_task task_328: on-success: join_task task_329: on-success: join_task task_330: on-success: join_task task_331: on-success: join_task task_332: on-success: join_task task_333: on-success: join_task task_334: on-success: join_task task_335: on-success: join_task task_336: on-success: join_task task_337: on-success: join_task task_338: on-success: join_task task_339: on-success: join_task task_340: on-success: join_task task_341: on-success: join_task task_342: on-success: join_task task_343: on-success: join_task task_344: on-success: join_task task_345: on-success: join_task task_346: on-success: join_task task_347: on-success: join_task task_348: on-success: join_task task_349: on-success: join_task task_350: on-success: join_task task_351: on-success: join_task task_352: on-success: join_task task_353: on-success: join_task task_354: on-success: join_task task_355: on-success: join_task task_356: on-success: join_task task_357: on-success: join_task task_358: on-success: join_task task_359: on-success: join_task task_360: on-success: join_task task_361: on-success: join_task task_362: on-success: join_task task_363: on-success: join_task task_364: on-success: join_task task_365: on-success: join_task task_366: on-success: join_task task_367: on-success: join_task task_368: on-success: join_task task_369: on-success: join_task task_370: on-success: join_task task_371: on-success: join_task task_372: on-success: join_task task_373: on-success: join_task task_374: on-success: join_task task_375: on-success: join_task task_376: on-success: join_task task_377: on-success: join_task task_378: on-success: join_task task_379: on-success: join_task task_380: on-success: join_task task_381: on-success: join_task task_382: on-success: join_task task_383: on-success: join_task task_384: on-success: join_task task_385: on-success: join_task task_386: on-success: join_task task_387: on-success: join_task task_388: on-success: join_task task_389: on-success: join_task task_390: on-success: join_task task_391: on-success: join_task task_392: on-success: join_task task_393: on-success: join_task task_394: on-success: join_task task_395: on-success: join_task task_396: on-success: join_task task_397: on-success: join_task task_398: on-success: join_task task_399: on-success: join_task task_400: on-success: join_task task_401: on-success: join_task task_402: on-success: join_task task_403: on-success: join_task task_404: on-success: join_task task_405: on-success: join_task task_406: on-success: join_task task_407: on-success: join_task task_408: on-success: join_task task_409: on-success: join_task task_410: on-success: join_task task_411: on-success: join_task task_412: on-success: join_task task_413: on-success: join_task task_414: on-success: join_task task_415: on-success: join_task task_416: on-success: join_task task_417: on-success: join_task task_418: on-success: join_task task_419: on-success: join_task task_420: on-success: join_task task_421: on-success: join_task task_422: on-success: join_task task_423: on-success: join_task task_424: on-success: join_task task_425: on-success: join_task task_426: on-success: join_task task_427: on-success: join_task task_428: on-success: join_task task_429: on-success: join_task task_430: on-success: join_task task_431: on-success: join_task task_432: on-success: join_task task_433: on-success: join_task task_434: on-success: join_task task_435: on-success: join_task task_436: on-success: join_task task_437: on-success: join_task task_438: on-success: join_task task_439: on-success: join_task task_440: on-success: join_task task_441: on-success: join_task task_442: on-success: join_task task_443: on-success: join_task task_444: on-success: join_task task_445: on-success: join_task task_446: on-success: join_task task_447: on-success: join_task task_448: on-success: join_task task_449: on-success: join_task task_450: on-success: join_task task_451: on-success: join_task task_452: on-success: join_task task_453: on-success: join_task task_454: on-success: join_task task_455: on-success: join_task task_456: on-success: join_task task_457: on-success: join_task task_458: on-success: join_task task_459: on-success: join_task task_460: on-success: join_task task_461: on-success: join_task task_462: on-success: join_task task_463: on-success: join_task task_464: on-success: join_task task_465: on-success: join_task task_466: on-success: join_task task_467: on-success: join_task task_468: on-success: join_task task_469: on-success: join_task task_470: on-success: join_task task_471: on-success: join_task task_472: on-success: join_task task_473: on-success: join_task task_474: on-success: join_task task_475: on-success: join_task task_476: on-success: join_task task_477: on-success: join_task task_478: on-success: join_task task_479: on-success: join_task task_480: on-success: join_task task_481: on-success: join_task task_482: on-success: join_task task_483: on-success: join_task task_484: on-success: join_task task_485: on-success: join_task task_486: on-success: join_task task_487: on-success: join_task task_488: on-success: join_task task_489: on-success: join_task task_490: on-success: join_task task_491: on-success: join_task task_492: on-success: join_task task_493: on-success: join_task task_494: on-success: join_task task_495: on-success: join_task task_496: on-success: join_task task_497: on-success: join_task task_498: on-success: join_task task_499: on-success: join_task task_500: on-success: join_task ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1575682 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/with_items/0000755000175000017500000000000000000000000023456 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/with_items/count_100_concurrency_10.json0000644000175000017500000000005000000000000030766 0ustar00coreycorey00000000000000{ "count": 100, "concurrency": 10 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/extra/scenarios/with_items/wb.yaml0000644000175000017500000000041300000000000024750 0ustar00coreycorey00000000000000--- version: '2.0' name: with_items_wb workflows: wf: input: - count: 10 - concurrency: 0 tasks: task1: with-items: i in <% range(0, $.count) %> action: std.echo output=<% $.i %> concurrency: <% $.concurrency %> ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1615682 mistral-10.0.0.0b3/rally-jobs/plugins/0000755000175000017500000000000000000000000017652 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/plugins/README.rst0000644000175000017500000000060600000000000021343 0ustar00coreycorey00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/plugins/__init__.py0000644000175000017500000000000000000000000021751 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/plugins/mistral_expressions_scenario.py0000644000175000017500000001017600000000000026231 0ustar00coreycorey00000000000000# Copyright 2020 - Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import math import random import string from rally.task import validation from rally_openstack import consts from rally_openstack import scenario from rally_openstack.scenarios.mistral import utils def random_string(length=10): """Generate a random string of given length """ letters = string.ascii_lowercase return ''.join(random.choices(letters, k=length)) class MistralExpressionScenario(utils.MistralScenario): def run(self, tasks_number=50, params_size_mb=5): wf_text, wf_name = self.create_wf_string(tasks_number) params = self.create_params(params_size_mb) self._create_workflow(wf_text) self._create_execution(wf_name, **params) def create_params(self, size_mb): block_size_mb = 0.2 rand_string = random_string(105400) number_of_fields = math.floor(size_mb / block_size_mb) data_list = '' # each one of these blocks is 200kb data_template = """ "data%s": { "dummy": 5470438, "data_value": -796997888, "sub_data": { "meta_data": { "value": "%s", "text": "dummy text" }, "field1": "%s", "field2": false, "field3": { "value1": -1081872761.2081857, "value2": -1081872761.2081857 } } }, """ wf_params = """ { "field": "some Value", "data": { {{{__DATA_LIST__}}} } } """ for i in range(1, int(number_of_fields + 1)): data_list += data_template % (i, rand_string, rand_string) data_list = data_list[:-2] wf_params = wf_params.replace('{{{__DATA_LIST__}}}', data_list) params = json.loads(wf_params) return params def get_query(self): raise NotImplementedError def create_wf_string(self, tasks_number): wf_tasks = '' wf_name = 'wf_{}'.format(random_string(5)) query = self.get_query() wf_text = """ version: '2.0' {}: tasks: task0: action: std.noop {{{__TASK_LIST__}}} """ task_template = """ task{}: action: std.noop publish: output{}: {} """ for i in range(1, tasks_number + 1): wf_tasks += task_template.format(i, i, query) wf_text = wf_text.replace('{{{__TASK_LIST__}}}', wf_tasks) wf_text = wf_text.format(wf_name) return wf_text, wf_name @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_services", services=[consts.Service.MISTRAL]) @scenario.configure(name="MistralExecutions.YaqlExpression", platform="openstack") class YaqlExpressionScenario(MistralExpressionScenario): def get_query(self): return '<% data %>' @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_services", services=[consts.Service.MISTRAL]) @scenario.configure(name="MistralExecutions.JinjaExpression", platform="openstack") class JinjaExpressionScenario(MistralExpressionScenario): def get_query(self): return '{{ data }} ' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/rally-jobs/task-mistral.yaml0000644000175000017500000000772300000000000021501 0ustar00coreycorey00000000000000{% set extra_dir = "~/.rally/extra" %} --- MistralWorkbooks.list_workbooks: - runner: type: "constant" times: 50 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 MistralWorkbooks.create_workbook: - args: definition: "{{ extra_dir }}/mistral_wb.yaml" runner: type: "constant" times: 50 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 - args: definition: "{{ extra_dir }}/mistral_wb.yaml" do_delete: true runner: type: "constant" times: 50 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 MistralExecutions.list_executions: - runner: type: "constant" times: 50 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 MistralExecutions.create_execution_from_workbook: - args: definition: "{{ extra_dir }}/mistral_wb.yaml" do_delete: true runner: type: "constant" times: 20 concurrency: 5 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 - args: definition: "{{ extra_dir }}/nested_wb.yaml" workflow_name: "wrapping_wf" do_delete: true runner: type: "constant" times: 20 concurrency: 5 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 - args: definition: "{{ extra_dir }}/scenarios/complex_wf/complex_wf_wb.yaml" workflow_name: "top_level_workflow" params: "{{ extra_dir }}/scenarios/complex_wf/complex_wf_params.json" do_delete: true runner: type: "constant" times: 20 concurrency: 5 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 - args: definition: "{{ extra_dir }}/scenarios/with_items/wb.yaml" params: "{{ extra_dir }}/scenarios/with_items/count_100_concurrency_10.json" do_delete: true runner: type: "constant" times: 20 concurrency: 5 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 - args: definition: "{{ extra_dir }}/scenarios/join/join_500_wb.yaml" do_delete: true runner: type: "constant" times: 10 concurrency: 2 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 - args: definition: "{{ extra_dir }}/scenarios/join/join_500_wb.yaml" do_delete: true runner: type: "constant" times: 10 concurrency: 1 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 MistralExecutions.YaqlExpression: - args: tasks_number: 30 params_size_mb: 4 runner: type: "constant" times: 50 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 MistralExecutions.JinjaExpression: - args: tasks_number: 30 params_size_mb: 4 runner: type: "constant" times: 50 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.0775666 mistral-10.0.0.0b3/releasenotes/0000755000175000017500000000000000000000000016604 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1735685 mistral-10.0.0.0b3/releasenotes/notes/0000755000175000017500000000000000000000000017734 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/.placeholder0000644000175000017500000000000000000000000022205 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add-action-region-to-actions-353f6c4b10f76677.yaml0000644000175000017500000000063600000000000030254 0ustar00coreycorey00000000000000--- features: - Support to specify 'action_region' for OpenStack actions so that it's possible to operate different resources in different regions in one single workflow. upgrade: - Run ``python tools/sync_db.py --config-file `` to re-populate database. deprecations: - The config option 'os-actions-endpoint-type' is moved from DEFAULT group to 'openstack_actions' group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add-execution-event-notifications-0f77c1c3eb1d6929.yaml0000644000175000017500000000042200000000000031556 0ustar00coreycorey00000000000000--- features: - | Introduce execution events and notification server and plugins for publishing these events for consumers. Event notification is defined per workflow execution and can be configured to notify on all the events or only for specific events. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add-json-dump-deprecate-json-pp-252c6c495fd2dea1.yaml0000644000175000017500000000055400000000000031077 0ustar00coreycorey00000000000000--- features: - | A new YAQL/jinja2 expression function has been added for outputting JSON. It is json_dump and accepts one argument, which is the object to be serialised to JSON. deprecations: - | The YAQL/jinja2 expression function ``json_pp`` has been deprecated and will be removed in the S cycle. ``json_dump`` should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add-missing-tacker-actions-dddcf77ddd90192f.yaml0000644000175000017500000000072100000000000030373 0ustar00coreycorey00000000000000--- features: - | Add missing Tacker actions to Mistral that includes vnf forwarding graph (vnffg), vnffg descriptor, network service (ns) and ns descriptor actions - vnffgd actions: create_vnffgd, delete_vnffgd, list_vnffgds, show_vnffgd - vnffg actions: create_vnffg, update_vnffg, delete_vnffg, list_vnffgs, show_vnffg - nsd actions: create_nsd, delete_nsd, list_nsds, show_nsd - ns actions: create_ns, delete_ns, list_nss, show_ns././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add-publicize-policy-d3b44590286c7fdd.yaml0000644000175000017500000000046600000000000027063 0ustar00coreycorey00000000000000--- features: - | Mistral now supports a `publicize` policy on actions and workflows which controls whether the users are allowed to create or update them. The default policy does not change which means that everyone can publish action or workflow unless specified differently in the policy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add-py-mini-racer-javascript-evaluator-9d8f9e0e36504d72.yaml0000644000175000017500000000034600000000000032343 0ustar00coreycorey00000000000000--- features: - | Added new JavaScript evaluator py_mini_racer. The py_mini_racer package allows us to get a JavaScript evaluator that doesn't require compilation. This is much lighter and easier to get started with.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add-task_execution_id-indexes-16edc58085e47663.yaml0000644000175000017500000000021100000000000030567 0ustar00coreycorey00000000000000--- fixes: - | Added new indexes on the task_execution_id column of the action_executions_v2 and workflow_executions_v2 tables.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_action_definition_caching-78d4446d61c6d739.yaml0000644000175000017500000000070000000000000030663 0ustar00coreycorey00000000000000--- features: - | Enable caching of action definitions in local memory. Now, instead of downloading the definitions from the database every time, mistral engine will store them in a local cache. This should reduce the number of database requests and improve the whole performance of the system. Cache ttl can be configured with ``action_definition_cache_time`` option from [engine] group. The default value is 60 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_config_option_for_oslo_rpc_executor-44afe1f728afdcb2.yaml0000644000175000017500000000172700000000000033413 0ustar00coreycorey00000000000000--- features: - | Added the config option "oslo_rpc_executor" sets an executor type used by Oslo Messaging framework. Defines how Oslo Messaging based RPC subsystem processes incoming calls. Allowed values: "eventlet", "threading" and "blocking". However, "blocking" is deprecated by the Oslo Messaging team and may be removed in the next versions. The reason of adding this option was in the issues occuring when using MySQLDb database driver and "eventlet" RPC executor. Once in a while, the system would hang on a deadlock caused by the fact that the DB driver wasn't eventlet-friendly and dispatching of green threads didn't work properly. That's why "blocking" was used. Now it's been proven that a combination of "eventlet" executor and PyMysql driver works well. The configuration option for the RPC executor though allows to rollback to "blocking" in case if regression is found, or also experiment with "threading". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_more_logging_for_sending_actions-c2ddd97027803ecd.yaml0000644000175000017500000000042700000000000032476 0ustar00coreycorey00000000000000--- fixes: - | Mistral doesn't log enough info about sending actions to executor and receiving them on the executor side. It makes it hard to debug situations when an action got stuck in RUNNING state. It has now been fixed by adding additional log statements. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_public_event_triggers-ab6249ca85fd5497.yaml0000644000175000017500000000043700000000000030252 0ustar00coreycorey00000000000000--- features: - | Added ability to create public event triggers. Public event triggers are applied to all projects, i.e. workflows are triggered by event in any project. Currently public event triggers may be created only by admin but it can be changed in policy.json. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_root_execution_id_to_jinja-90b67c69a50370b5.yaml0000644000175000017500000000031700000000000031106 0ustar00coreycorey00000000000000--- features: - | This makes getting a root_execution_id available to the jinja execution object. Before this it was only possible to get that through filtering and querying the executions search. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_skip_validation-9e8b906c45bdb89f.yaml0000644000175000017500000000207400000000000027132 0ustar00coreycorey00000000000000--- features: - | The new configuration option "validation_mode" was added. It can take one of the values: "enabled", "mandatory", "disabled". If it is set to "enabled" then Mistral will be validating the workflow language syntax for all API operations that create or update workflows (either via /v2/workflows or /v2/workbooks) unless it's explicitly disabled with the API parameter "skip_validation" that has now been added to the corresponding API endpoints. The "skip_validation" parameter doesn't have to have any value since it's a boolean flag. If the configuration option "validation_mode" is set to "mandatory" then Mistral will be always validating the syntax of all workflows for the mentioned operations. If set to "disabled" then validation will always be skipped. Note that if validation is disabled (one way or another) then there's a risk of breaking a workflow unexpectedly while it's running or getting another an unexpected error when uploading it possible w/o a user-friendly description of the error. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=mistral-10.0.0.0b3/releasenotes/notes/add_yaql_conver_output_data_config_option-4a0fa926a736de7e.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_yaql_conver_output_data_config_option-4a0fa926a736de7e.yam0000644000175000017500000000116600000000000033430 0ustar00coreycorey00000000000000--- fixes: - | Added the configuration option "convert_output_data" in the "yaql" group. This option, if set to False, allows to disable YAQL expression result conversion. This fixes performance issues for lots of use cases where a result of an expression is a big data structure. In fact, YAQL created a copy of this data structure every time before giving it to Mistral. This option can't be set to False when the corresponding "convert_input_data" is True. Otherwise, it doesn't work correctly. By default, the value of "convert_output_data" is True which keeps backwards compatibility. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=mistral-10.0.0.0b3/releasenotes/notes/add_yaql_convert_input_data_config_property-09822dee1f46eb8e.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_yaql_convert_input_data_config_property-09822dee1f46eb8e.y0000644000175000017500000000102400000000000033447 0ustar00coreycorey00000000000000--- fixes: - | Added the "convert_input_data" config property under the "yaql" group. By default it's set to True which preserves the current behavior so there's no risk with compatibility. If set to False, it disables the additional data conversion that was initially added to support some tricky cases like working with sets of dicts (although dict is not a hashable type and can't be put into a set). Disabling it give a significant performance boost in cases when data contexts are very large. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/add_yaql_engine_options-200fdcfda04683ca.yaml0000644000175000017500000000067500000000000030046 0ustar00coreycorey00000000000000--- features: - | Added several config options that allow to tweak some aspects of the YAQL engine behavior. fixes: - | Fixed how Mistral initializes a child YAQL context before evaluating YAQL expressions. The given data context needs to go through a special filter that prepares the data properly, does conversion into internal types etc. Also, without this change YAQL engine options are not applied properly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/allow_none_for_workflow_execution_params-f25b752e207d51d7.yaml0000644000175000017500000000032100000000000033334 0ustar00coreycorey00000000000000--- fixes: - | Fixed a backward compatibility issue: there was a change made in Rocky that disallowed the 'params' property of a workflow execution to be None when one wants to start a workflow. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/alternative-rpc-layer-21ca7f6171c8f628.yaml0000644000175000017500000000037500000000000027200 0ustar00coreycorey00000000000000--- features: - Mistral now support usage of alternative RPC layer, that calls RabbitMQ directly instead of using Oslo. - Tasks support new flag 'safe-rerun'. If it is set to 'true', a task would be re-run if executor dies during execution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/changing-context-in-delayed-calls-78d8e9a622fe3fe9.yaml0000644000175000017500000000040200000000000031504 0ustar00coreycorey00000000000000--- security: - > [`bug 1521802 `_] Fixing the problem that sometimes sub-workflow executions were run/saved under the wrong tenant in cron trigger periodic task in multi-tenancy deployment. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=mistral-10.0.0.0b3/releasenotes/notes/changing-isolation-level-to-read-committed-7080833ad284b901.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/changing-isolation-level-to-read-committed-7080833ad284b901.ya0000644000175000017500000000624700000000000032467 0ustar00coreycorey00000000000000--- fixes: - | [`bug 1518012 `_] [`bug 1513456 `_] Fix concurrency issues by using READ_COMMITTED This release note describes bugs: * #1513456 - task stuck in RUNNING state when all action executions are finished regarding the problem and the fix. * #1518012- WF execution stays in RUNNING although task and action executions are in SUCCESS. This fix does not require any action from Mistral users and does not have any implications other than the bug fix. The state of a workflow execution was not updated even when all task executions were completed if some tasks finished at the same time as other tasks. Because we were using our connections with transaction isolation level = REPEATABLE_READ - Each process was using a snapshot of the DB created at the first read statement in that transaction. When a task finished and evaluated the state of all the other tasks it did not see the up-to-date state of those tasks - and so, because not all tasks were completed - the task did not change the workflow execution state. Similar behavior happened with multiple action executions under same task. On completion, each action execution checked the status of the other action executions and did not see the up-to-date state of these action execution - causing task execution to stay in RUNNING state. The solution is to change DB transaction isolation level from REPEATABLE_READ to READ_COMMITTED so process A can see changes committed in other transactions even if process A is in the middle of a transaction. A short explanation regarding the different isolation levels: - | REPEATABLE_READ - while in transaction, the first read operation to the DB creates a snapshot of the entire DB so you are guarantee that all the data in the DB will remain the same until the end of the transaction. REPEATABLE_READ example: * ConnectionA selects from tableA in a transaction. * ConnectionB deletes all rows from tableB in a transaction. * ConnectionB commits. * ConnectionA loops over the rows of tableA and fetches from tableB using the tableA_tableB_FK - ConnectionA will get rows from tableB. - | READ_COMMITTED - while in a transaction, every query to the DB will get the committed data. READ_COMMITTED example: * ConnectionA starts a transaction. * ConnectionB starts a transaction. * ConnectionA insert row to tableA and commits. * ConnectionB insert row to tableA. * ConnectionB selects tableA and gets two rows. * ConnectionB commits / rollback. Two good articles about isolation levels are: * `Differences between READ-COMMITTED and REPEATABLE-READ transaction isolation levels `_. * `MySQL performance implications of InnoDB isolation modes `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/cleanup-rpc-cleints-transport-eaa90fef070b81fd.yaml0000644000175000017500000000052600000000000031157 0ustar00coreycorey00000000000000--- fixes: - | Cleanup transports along RPC clients. Fixed a bad weird condition in the API server related to cron-triggers and SIGHUP. The parent API server creates a RPC connection when creating workflows from cron triggers. If a SIGUP signal happens after, the child inherits the connection, but it's non-functional. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/clone_cached_action_definitions-e8b6005b467f35f2.yaml0000644000175000017500000000025500000000000031301 0ustar00coreycorey00000000000000--- fixes: - | Sometimes Mistral was raising DetachedInstanceError for action defintions coming from cache. It's now fixed by cloning objects before caching them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/close-stuck-running-action-executions-b67deda65d117cee.yaml0000644000175000017500000000034100000000000032626 0ustar00coreycorey00000000000000--- features: - > [`blueprint action-execution-reporting `_] Introduced a mechanism to close action executions that stuck in RUNNING state. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=mistral-10.0.0.0b3/releasenotes/notes/create-and-run-workflows-within-namespaces-e4fba869a889f55f.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/create-and-run-workflows-within-namespaces-e4fba869a889f55f.ya0000644000175000017500000000153400000000000033075 0ustar00coreycorey00000000000000--- features: - | Creating and running workflows within a namespace. Workflows with the same name can be added to the same project as long as they are within a different namespace. This feature is backwards compatible. All existing workflows are assumed to be in the default namespace, represented by an empty string. Also if a workflow is created without a namespace spcified, it is assumed to be in the default namespace. When a workflow is being executed, the namespace is saved under params and passed to all its sub workflow executions. When looking for the next sub-workflow to run, the correct workflow will be found by name and namespace, where the namespace can be the workflow namespace or the default namespace. Workflows in the same namespace as the top workflow will be given a higher priority. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/drop-ceilometerclient-b33330a28906759e.yaml0000644000175000017500000000023400000000000027107 0ustar00coreycorey00000000000000--- fixes: - | Remove ceilometerclient requirement. This library is not maintained and the ceilometer api is dead. So lets drop this integration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/drop-py-2-7-d6ce46d3dc571c01.yaml0000644000175000017500000000031400000000000025003 0ustar00coreycorey00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of mistral to support python 2.7 is OpenStack Train. The minimum version of Python now supported by mistral is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/evaluate_env_parameter-14baa54c860da11c.yaml0000644000175000017500000000143400000000000027612 0ustar00coreycorey00000000000000--- fixes: - When we pass a workflow environment to workflow parameters using 'env' Mistral first evaluates it assuming that it can contain expressions (YAQL/Jinja) For example, one environment variable can be expressed through the other. In some cases it causes problems. For example, if the environment is too big and has many expressions, especially something like <% $ %> or <% env() %>. Also, in some cases we don't want any evaluations to happen if we want to have some informative text in the environment containing expressions. In order to address that the 'evaluate_env' workflow parameter was added, defaulting to True for backwards compatibility. If it's set to False then it disables evaluation of expressions in the environment. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=mistral-10.0.0.0b3/releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/external_openstack_action_mapping_support-5cec5d9d5192feb7.yam0000644000175000017500000000051100000000000033566 0ustar00coreycorey00000000000000--- features: - External OpenStack action mapping file could be specified at sync_db.sh or mistral-db-mange script. For more details see 'sync_db.sh --help' or 'mistral-db-manage --help'. - From now it is optional to list openstack modules in mapping file which you would not include into supported action set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix-auth-context-with-big-catalog-7647a07d616e653f.yaml0000644000175000017500000000064500000000000031243 0ustar00coreycorey00000000000000--- upgrade: - | Run ``mistral-db-manage --config-file upgrade head`` to ensure the database schema is up-to-date. fixes: - | [`bug 1785654 `_] Fixed a bug that prevents any action to run if the OpenStack catalog returned by Keystone is larger than 64kB if the backend is MySQL/MariaDB. The limit is now increased to 16MB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix-event-engines-ha-cc78f341095cdabf.yaml0000644000175000017500000000023500000000000027117 0ustar00coreycorey00000000000000--- fixes: - | [`bug 1715848 `_] Fixed a bug that prevents event-engines to work correctly in HA. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix-jinja-expression-handling-135451645d7a4e6f.yaml0000644000175000017500000000022600000000000030536 0ustar00coreycorey00000000000000--- fixes: - Fixed jinja expression error handling where invalid expression could prevent action or task status to be correctly updated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix-next-url-formatting-2cc0d8a27625c73a.yaml0000644000175000017500000000036700000000000027546 0ustar00coreycorey00000000000000--- fixes: - | Fix issue where next link in some list APIs, when invoked with pagination and filter(s), contained JSON string. This made next link an invalid URL. This issue impacted all REST APIs where filters can be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix-regression-when-logging-58faa35f02cefb34.yaml0000644000175000017500000000026300000000000030521 0ustar00coreycorey00000000000000--- fixes: - | A regression was introduced that caused an error when logging a specific message. The string formatting was broken, which caused the logging to fail. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=mistral-10.0.0.0b3/releasenotes/notes/fix_error_validate_token_when_run_cron_trigger-7beffc06b75294fb.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix_error_validate_token_when_run_cron_trigger-7beffc06b75294f0000644000175000017500000000023300000000000033621 0ustar00coreycorey00000000000000--- fixes: - | Fix error validate token when run cron trigger. The problem is that a trust client can't do validate token when run cron trigger. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix_has_next_tasks_field_calculation-5717f93d7adcd9b0.yaml0000644000175000017500000000035500000000000032535 0ustar00coreycorey00000000000000--- fixes: - | There was a weird typo in the list generator expression made in https://review.opendev.org/#/c/652575 that led to calculating a field value in the wrong way. Fixed. The added test was previously failing. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=mistral-10.0.0.0b3/releasenotes/notes/fix_join_when_last_finished_indirect_error-b0e5adf99cde9a58.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix_join_when_last_finished_indirect_error-b0e5adf99cde9a58.ya0000644000175000017500000000024100000000000033551 0ustar00coreycorey00000000000000--- fixes: - | Fixed the issue when "join" task remained in WAITING state forever if the last inbound task failed and it was not a direct predecessor. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix_pause_command-58294f613488511c.yaml0000644000175000017500000000100400000000000026223 0ustar00coreycorey00000000000000--- fixes: - Fixed the logic of the 'pause' command. Before the fix Mistral wouldn't run any commands specified in 'on-success', 'on-error' and 'on-complete' clauses following after the 'pause' command when a workflow was resumed after it. Now it works as expected. If Mistral encounters 'pause' in the list of commands it saves all commands following after it to the special backlog storage and when/if the workflow is later resumed it checks that storage and runs commands from it first. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix_task_function-04b83ada20a71f12.yaml0000644000175000017500000000033600000000000026527 0ustar00coreycorey00000000000000--- fixes: - | "__task_execution" wasn't always included into the expression data context so the function task() didn't work properly. Fixes [`bug 1823875 `_] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix_task_state_info_assignment-e25481ce8c3193ba.yaml0000644000175000017500000000070700000000000031313 0ustar00coreycorey00000000000000--- fixes: - | If an action execution fails but returns a result as a list (error=[]) the result of this action is assigned to the task execution 'state_info' field which is a string according to the DB model. On Python 3 it this list magically converts to a string. On Python 2.7 it doesn't. The reason is probably in how SQLAlchemy works on different versions of Python. This has now been fixed with an explicit type coercion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/fix_workflow_output-cee5df431679de6b.yaml0000644000175000017500000000115400000000000027351 0ustar00coreycorey00000000000000--- fixes: - | Workflow output sometimes was not calculated correctly due to the race condition between different transactions: the one that checks workflow completion (i.e. calls "check_and_complete") and the one that processes action execution completion (i.e. calls "on_action_complete"). Calculating output sometimes was based on stale data cached by the SQLAlchemy session. To fix this, we just need to expire all objects in the session so that they are refreshed automatically if we read their state in order to make required calculations. The corresponding change was made. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/force-stop-executions-00cd67dbbc9b5483.yaml0000644000175000017500000000065600000000000027367 0ustar00coreycorey00000000000000--- features: - Use of the parameter force to forcefully delete executions. Note using this parameter on unfinished executions might cause a cascade of errors. issues: - Deleting unfinished executions might cause a cascade of errors, so the standard behaviour has been changed to delete only safe to delete executions and a new parameter force was added to forceful delete ignoring the state the execution is in. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=mistral-10.0.0.0b3/releasenotes/notes/function-called-tasks-available-in-an-expression-17ca83d797ffb3ab.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/function-called-tasks-available-in-an-expression-17ca83d797ffb0000644000175000017500000000045500000000000033165 0ustar00coreycorey00000000000000--- features: - | New function, called tasks, available from within an expression (Yaql, Jinja2). This function allows to filter all tasks of a user by workflow execution id and/or state. In addition it is possible to get tasks recursively and flatten the tasks list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/http-proxy-to-wsgi-oslo-middleware-f66f1b9533ea1e8a.yaml0000644000175000017500000000101400000000000031727 0ustar00coreycorey00000000000000--- features: - | Added HTTPProxyToWSGI middleware in front of the Mistral API. The purpose of this middleware is to set up the request URL correctly in the case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of the Mistral API. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. Fixes [`bug 1590608 `_] Fixes [`bug 1816364 `_] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/improve_std_html_action-eca10df5bf934be8.yaml0000644000175000017500000000012400000000000030166 0ustar00coreycorey00000000000000--- features: - | Improves std.email action with cc, bcc and html formatting. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=mistral-10.0.0.0b3/releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a052.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/include-output-paramter-in-action-execution-list-c946f1b38dc5a0000644000175000017500000000075500000000000033275 0ustar00coreycorey00000000000000--- features: - | New parameter called 'include_output' added to action execution api. By default output field does not return when calling list action executions API critical: - | By default, output field will not return when calling list action executions. In the previous version it did, so if a user used this, and/or wants to get output field when calling list action executions API, it will be possible only by using the new include output parameter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/include_root_cause_of_action_error_first-4a730a7cbc36f375.yaml0000644000175000017500000000105400000000000033341 0ustar00coreycorey00000000000000--- fixes: - | Some users rely on the presence of the root error related to running an action and it's not convenient that it is now in the end of the string, e.g. if we look at the corresponding task execution "state_info" field. Now a cause error message is included in the beginning of the resulting error string returned by the action executor so that it's clearly visible. This message can be also truncated in some cases (depending on the config option) so we need to make sure we keep the cause error message. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/ironic-api-newton-9397da8135bb97b4.yaml0000644000175000017500000000073000000000000026333 0ustar00coreycorey00000000000000--- features: - It is now possible to use the Bare metal (Ironic) API features introduced in API version 1.10 to 1.22. upgrade: - Required Ironic API version was bumped to '1.22' (corresponding to Ironic 6.2.0 - Newton final release). - Due to the default Ironic API version change to '1.22', new bare metal nodes created with 'node_create' action appears in "enroll" provision state instead of "available". Please update your workflows accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/keycloak-auth-support-74131b49e2071762.yaml0000644000175000017500000000015600000000000027007 0ustar00coreycorey00000000000000--- features: - Mistral now supports authentication with KeyCloak server using OpenId Connect protocol. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/load-keystoneauth-option-d9657d3052e82125.yaml0000644000175000017500000000072300000000000027571 0ustar00coreycorey00000000000000--- fixes: - | A new config option section `[keystone]` is added. The options in the section is from keystoneauth by default. Please use them to talk with keystone session. If the option value is not set, to keep backward compatibility, Mistral will read the value from the same option in `[keystone_authtoken]`. The override behvaior will be removed in Stein. Please update the options into `[keystone]` if you still want to use them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/magnum-actions-support-b131fa942b937fa5.yaml0000644000175000017500000000033600000000000027471 0ustar00coreycorey00000000000000--- features: - Magnum action are now supported. upgrade: - During an upgrade to Newton, operators or administrators need to run ``python tools/sync_db.py`` to populate database with Magnum action definitions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/make_integrity_checker_work_with_batches-56c1cd94200d4c38.yaml0000644000175000017500000000131200000000000033232 0ustar00coreycorey00000000000000--- fixes: - | Workflow execution integrity checker mechanism was too aggressive in case of big workflows that have many task executions in RUNNING state at the same time. The mechanism was selecting them all in one query and calling "on_action_complete" for each of them within a single DB transaction. That could lead to situations when this mechanism would totally block all normal workflow processing whereas it should only be a "last chance" aid in case of real infrastructure failures (e.g. MQ outage). This issue has been fixed by adding a configurable batch size, so that the checker can't select more than this number of task executions in RUNNING state at once. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-aodh-actions-e4c2b7598d2e39ef.yaml0000644000175000017500000000006200000000000027154 0ustar00coreycorey00000000000000--- features: - Aodh actions are now supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-api-server-https-716a6d741893dd23.yaml0000644000175000017500000000012100000000000027563 0ustar00coreycorey00000000000000--- features: - Mistral API server can be configured to handle https requests. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-customize-authorization-d6b9a965f3056f09.yaml0000644000175000017500000000006600000000000031365 0ustar00coreycorey00000000000000--- features: - Role base access control was added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-docker-image-9d6e04ac928289dd.yaml0000644000175000017500000000014500000000000027053 0ustar00coreycorey00000000000000--- prelude: > Pre-installed Mistral docker image is now available to get quick idea of Mistral. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-engine-scale-in-bd348f9237f32481.yaml0000644000175000017500000000154200000000000027316 0ustar00coreycorey00000000000000features: - | Mistral engine now supports graceful scale-in. That is, if a number of engines in a cluster needs to be reduced manually it is now possible to do w/o breaking currently running workflows. In order to shutdown a Mistral engine, SIGTERM signal needs to be sent to the corresponding process. In Unix operatating systems it's a matter of running the command "kill " in any shell. When this signal is caught by the process, it has a certain amount of time configured by the 'graceful_shutdown_timeout' property to complete currently running database transactions and process all buffered RPC messages that have already been polled from the queue. After this time elapses, the process will be forced to exit. By default, the value of the 'graceful_shutdown_timeout' property is 60 (seconds). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-gnocchi-actions-f26fd76b8a4df40e.yaml0000644000175000017500000000006500000000000027726 0ustar00coreycorey00000000000000--- features: - Gnocchi actions are now supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-murano-actions-2250f745aaf8536a.yaml0000644000175000017500000000006400000000000027363 0ustar00coreycorey00000000000000--- features: - Murano actions are now supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-senlin-actions-f3fe359c4e91de01.yaml0000644000175000017500000000006400000000000027521 0ustar00coreycorey00000000000000--- features: - Senlin actions are now supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-tempest-plugin-2f6dcbceb4d27eb0.yaml0000644000175000017500000000021500000000000027752 0ustar00coreycorey00000000000000--- prelude: > Tempest plugin has been implemented. Now Mistral tests can be run from Mistral repo as well as from Tempest repo. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/mistral-vitrage-actions-a205b8ea82b43cab.yaml0000644000175000017500000000011500000000000027723 0ustar00coreycorey00000000000000--- features: - Add Mistral actions for OpenStack Vitrage, the RCA service ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=mistral-10.0.0.0b3/releasenotes/notes/move_openstack_actions_from_mistral_to_mistral_extra-b3f7bc71ffd72c6e.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/move_openstack_actions_from_mistral_to_mistral_extra-b3f7bc71f0000644000175000017500000000011500000000000034211 0ustar00coreycorey00000000000000--- features: - Move Mistral actions for OpenStack to mistral-extra library././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/namespace_for_adhoc_actions.yaml0000644000175000017500000000104000000000000026273 0ustar00coreycorey00000000000000--- features: - | Add support for creating ad-hoc actions in a namespace. Creating actions with same name is now possible inside the same project now. This feature is backward compatible. All existing actions are assumed to be in the default namespace, represented by an empty string. Also, if an action is created without a namespace specified, it is assumed to be in the default namespace. If an ad-hoc action is created inside a workbook, then the namespace of the workbook would be also it's namespace. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/namespace_for_workbooks.yaml0000644000175000017500000000122500000000000025522 0ustar00coreycorey00000000000000--- features: - | Add support for creating workbooks in a namespace. Creating workbooks with same name is now possible inside the same project now. This feature is backward compatible. All existing workbooks are assumed to be in the default namespace, represented by an empty string. Also, if a workbook is created without a namespace specified, it is assumed to be in the default namespace. When a workbook is created, its namespace is inherited by the workflows contained within it. All operations on a particular workbook require combination of name and namespace to uniquely identify a workbook inside a project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/new-service-actions-support-47279bd649732632.yaml0000644000175000017500000000107300000000000030152 0ustar00coreycorey00000000000000--- prelude: > Actions of several OpenStack services are supported out of the box in Mitaka, including Barbican, Cinder(V2), Swift, Trove, Zaqar and Mistral. upgrade: - During an upgrade to Mitaka, operators or administrators need to run ``python tools/get_action_list.py `` command to generate service action names and values for updating ``mistral/actions/openstack/mapping.json``, then, run ``python tools/sync_db.py`` to populate database. Please note, some services like Neutron, Swift, Zaqar don't support the command yet. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/optimize_adhoc_actions_scheduling-e324f66f962ae409.yaml0000644000175000017500000000055200000000000031714 0ustar00coreycorey00000000000000--- fixes: - | For an ad-hoc action, preparing input for its base action was done more than once. It happened during the validation phase and the scheduling phase. However, input preparation may be expensive in case of heavy expressions and data contexts. This has now been fixed by caching a prepared input within an AdHocAction instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/policy-and-doc-in-code-9f1737c474998991.yaml0000644000175000017500000000117300000000000026715 0ustar00coreycorey00000000000000--- features: - | Mistral now support policy in code, which means if users didn't modify any of policy rules, they can leave policy file (in `json` or `yaml` format) empty or just remove it all together. Because from now, Mistral keeps all default policies under `mistral/policies` module. Users can still modify/generate `policy.yaml` file which will override policy rules in code if those rules show in `policy.yaml` file. other: - | Default `policy.json` file is now removed as Mistral now generate the default policies in code. Please be aware that when using that file in your environment. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=mistral-10.0.0.0b3/releasenotes/notes/refactor_action_heartbeats_without_scheduler-9c3500d6a2b25a4d.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/refactor_action_heartbeats_without_scheduler-9c3500d6a2b25a4d.0000644000175000017500000000124000000000000033317 0ustar00coreycorey00000000000000--- fixes: - | Action heartbeat checker was using scheduler to process expired action executions periodically. The side effect was that upon system reboot there may have been duplicating delayed calls in the database. So over time, the number of such calls could be significant and those jobs could even affect performance. This has now been fixed with regular threads without using scheduler at all. Additionally, the new configuration property "batch_size" has been added under the group "action_heartbeat" to control the maximum number of action executions processed during one iteration of the action execution heartbeat checker. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/region-name-support-9e4b4ccd963ace88.yaml0000644000175000017500000000042100000000000027122 0ustar00coreycorey00000000000000--- fixes: - | [`bug 1633345 `_] User now could define the target region for the openstack actions. It could be done via API in X-Region-Name and X-Target-Region-Name in case of multi-vim feature is used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/remove_polling_from_join-3a7921c4af741822.yaml0000644000175000017500000000060100000000000027752 0ustar00coreycorey00000000000000--- fixes: - | Removed DB polling from the logic that checks readiness of a "join" task which leads to situations when CPU was mostly occupied by scheduler that runs corresponding periodic jobs and that doesn't let the workflow move forward with a proper speed. That happens in case if a workflow has lots of "join" tasks with many dependencies. It's fixed now. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=mistral-10.0.0.0b3/releasenotes/notes/remove_redundant_persistent_data_from_task_context-c5281a5f5ae688f1.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/remove_redundant_persistent_data_from_task_context-c5281a5f5ae0000644000175000017500000000072700000000000034037 0ustar00coreycorey00000000000000--- fixes: - Mistral was storing some internal information in task execution inbound context ('task_executions_v2.in_contex' DB field) to DB. This information was needed only to correctly implement the YAQL function task() without arguments. A fix was made to not store this information in the persistent storage and rather include it into a context view right before evaluating expressions where needed. So it slightly optimizes spaces in DB. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=mistral-10.0.0.0b3/releasenotes/notes/remove_unnecessary_workflow_execution_update-bdc9526bd39539c4.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/remove_unnecessary_workflow_execution_update-bdc9526bd39539c4.0000644000175000017500000000073200000000000033460 0ustar00coreycorey00000000000000--- fixes: - | Eliminated an unnecessary update of the workflow execution object when processing "on_action_complete" operation. W/o this fix all such transactions would have to compete for the workflow executions table that causes lots of DB deadlocks (on MySQL) and transaction retries. In some cases the number of retries even exceeds the limit (currently hardcoded 50) and such tasks can be fixed only with the integrity checker over time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/role-based-resource-access-control-3579714be15d9b0b.yaml0000644000175000017500000000022500000000000031535 0ustar00coreycorey00000000000000--- features: - By default, admin user could get/list/update/delete other projects' resources. In Pike, only workflow/execution are supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/safe-rerun-in-task-defaults-87a4cbe12558bc6d.yaml0000644000175000017500000000011300000000000030332 0ustar00coreycorey00000000000000--- features: - | Added 'safe-rerun' policy to task-defaults section ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=mistral-10.0.0.0b3/releasenotes/notes/set_security_context_for_action_execution_checker-eee7fb697fb213d1.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/set_security_context_for_action_execution_checker-eee7fb697fb20000644000175000017500000000071000000000000034110 0ustar00coreycorey00000000000000--- fixes: - | Action execution checker didn't set a security context before failing expired action executions. It caused ApplicationContextNotFoundException in case if corresponding workflow specification was not in the cache and Mistral had to load a DB object. The DB operation in turn was trying to access a security context which wasn't set. It's now fixed by setting an admin context in the action execution checker thread. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=mistral-10.0.0.0b3/releasenotes/notes/simplify_workflow_and_join_completion_check-77a47c5d8953096d.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/simplify_workflow_and_join_completion_check-77a47c5d8953096d.y0000644000175000017500000000072400000000000033245 0ustar00coreycorey00000000000000--- fixes: - | Workflow and join completion check logic is now simplified with using post transactional queue of operations which is a more generic version of action_queue module previously serving for scheduling action runs outside of the main DB transaction. Workflow completion check is now registered only once when a task completes which reduces clutter and it's registered only if the task may potentially lead to workflow completion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/std-ssh-add-pkey-2c665a81ff9fbdfd.yaml0000644000175000017500000000032100000000000026345 0ustar00coreycorey00000000000000--- features: - | Adds the parameter private_key in the standard ssh actions. This allows a user to specify the key to use instead of using the ones available in the filesystem of the executors. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/std.email-reply-to-c283770c798db7d0.yaml0000644000175000017500000000012000000000000026411 0ustar00coreycorey00000000000000--- features: - It's now possible to add reply-to address when sending email. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/sub_execution_api.yaml0000644000175000017500000000013300000000000024322 0ustar00coreycorey00000000000000--- features: - | Added a new API to fetch sub-executions of an execution or a task. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=mistral-10.0.0.0b3/releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/support-created-at-yaql-function-execution-6ece8eaf34664c38.ya0000644000175000017500000000021100000000000033105 0ustar00coreycorey00000000000000--- features: - Mistral action developer can get the start time of a workflow execution by using ``<% execution().created_at %>``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/support-env-in-adhoc-actions-20c98598893aa19f.yaml0000644000175000017500000000042700000000000030342 0ustar00coreycorey00000000000000--- fixes: - Added support for referencing task and workflow context data, including environment variables via env(), when using YAQL/Jinja2 expressions inside AdHoc Actions. YAQL/Jinja2 expressions can reference env() and other context data in the base-input section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/support-manage-cron-trigger-by-id-ab544e8068b84967.yaml0000644000175000017500000000010300000000000031252 0ustar00coreycorey00000000000000--- features: - Support to manage a cron-trigger instance by id. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/support-manila-action-8af256d5fadd1ac5.yaml0000644000175000017500000000014400000000000027500 0ustar00coreycorey00000000000000--- features: - | Add Mistral actions for Openstack Manila, the fileshare management service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/support-qinling-action-99cd323d4df36d48.yaml0000644000175000017500000000014400000000000027476 0ustar00coreycorey00000000000000--- features: - | Add Mistral actions for Openstack Qinling, the function management service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/support-zun-action-3263350334d1d34f.yaml0000644000175000017500000000012600000000000026403 0ustar00coreycorey00000000000000--- features: - | Add Mistral actions for Openstack Zun, the container service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/tacket-actions-support-2b4cee2644313cb3.yaml0000644000175000017500000000006400000000000027444 0ustar00coreycorey00000000000000--- features: - Tacker actions are now supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/transition-message-8dc4dd99240bd0f7.yaml0000644000175000017500000000024200000000000026735 0ustar00coreycorey00000000000000--- features: - | Now user can provide custom message for fail/pause/success transition. e.g. - fail(msg='error in task'): <% condition if any %> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/update-mistral-docker-image-0c6294fc021545e0.yaml0000644000175000017500000000051500000000000030145 0ustar00coreycorey00000000000000--- features: - The Mistral docker image and tooling has been updated to significantly ease the starting of a Mistral cluster. The setup now supports all-in-one and multi-container deployments. Also, the scripts were cleaned up and aligned with the Docker best practice. fixes: - Javascript support in docker image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/update-retry-policy-fb5e73ce717ed066.yaml0000644000175000017500000000057300000000000027054 0ustar00coreycorey00000000000000--- critical: - Due to bug https://bugs.launchpad.net/mistral/+bug/1631140, Mistral was not considering retry with value 1. After this bug is fixed, Mistral now considers count value 1 as a value for retry. - Mistral does not consider the initial task run as a retry but only considers the retry value after the failure of initial task execution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/use-workflow-uuid-30d5e51c6ac57f1d.yaml0000644000175000017500000000037600000000000026526 0ustar00coreycorey00000000000000--- deprecations: - Usage of workflow name in the system(e.g. creating executions/cron-triggers , workfow CRUD operations, etc.) is deprecated, please use workflow UUID instead. The workflow sharing feature can only be used with workflow UUID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/use_mapped_entity_for_root_execution-1af6af12ee437282.yaml0000644000175000017500000000117200000000000032544 0ustar00coreycorey00000000000000--- fixes: - | WorkflowExecution database model had only "root_execution_id" to reference a root workflow execution, i.e. the most parent workflow execution in the execution tree. So if we needed to get an entity itself we'd always make a direct query to the database, in fact, w/o using an entity cache in the SQLAlchemy session. It's now been fixed by adding a normal mapped entity for root workflow execution. In other words, WorkflowExecution class now has the property "root_execution". It slightly improves performance in case this property is accessed more than once per the database session. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/using_passive_deletes_in_sqlalchemy-4b3006b3aba55155.yaml0000644000175000017500000000066400000000000032234 0ustar00coreycorey00000000000000--- fixes: - | Used "passive_deletes=True" in the configuration of relationships in SQLAlchemy models. This improves deletion of graphs of related objects stored in DB because dependent objects don't get loaded prior to deletion which also reduces the memory requirement on the system. More about using this flag can be found at: http://docs.sqlalchemy.org/en/latest/orm/collections.html#using-passive-deletes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/validate-ad-hoc-action-api-added-6d7eaaedbe8129a7.yaml0000644000175000017500000000010200000000000031255 0ustar00coreycorey00000000000000--- features: - New API for validating ad-hoc actions was added.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/wf_final_context_evaluation_with_batches-6292ab64c131dfcc.yaml0000644000175000017500000000111500000000000033407 0ustar00coreycorey00000000000000--- fixes: - Evaluation of final workflow context was very heavy in cases when the workflow had a lot of parallel tasks with large inbound contexts. Merging of those contexts in order to evaluate the workflow output consumed a lot of memory. Now this algorithm is rewritten with batched DB query and Python generators so that GS has a chance to destroy objects that have already been processed. Previously all task executions had to stay in memory until the end of the processing. The result is that now it consumes 3 times less memory on heavy cases.././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=mistral-10.0.0.0b3/releasenotes/notes/workflow-create-instance-YaqlEvaluationException-e22afff26a193c4f.yaml 22 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/workflow-create-instance-YaqlEvaluationException-e22afff26a1930000644000175000017500000000012000000000000033357 0ustar00coreycorey00000000000000--- fixes: - Fix for YaqlEvaluationException in std.create_instance workflow. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/workflow-sharing-746255cda20c48d2.yaml0000644000175000017500000000044400000000000026256 0ustar00coreycorey00000000000000--- features: - | Add support for `workflow sharing`_ feature. users of one project can share workflows to other projects using this feature. .. _workflow sharing: https://specs.openstack.org/openstack/mistral-specs/specs/mitaka/approved/mistral-workflow-resource-sharing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/workflow_environment_optimizations-deb8868df3f0dc36.yaml0000644000175000017500000000140400000000000032477 0ustar00coreycorey00000000000000--- fixes: - Mistral was storing, in fact, two copies of a workflow environment, one in workflow parameters (the 'params' field) and another one in a context (the 'context' field). Now it's stored only in workflow parameters. It saves space in DB and increases performance in case of big workflow environments. - Mistral was copying a workflow environment into all of their sub workflows. In case of a big workflow environment and a big number of sub workflows it caused serious problems, used additional space in DB and used a lot of RAM (e.g. when the 'on-success' clause has a lot of tasks where each one of them is a subworkflow). Now it is fixed by evaluating a workflow environment through the root execution reference. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/x-target-insecure-values-4b2bdbfd42526abc.yaml0000644000175000017500000000055400000000000030107 0ustar00coreycorey00000000000000--- fixes: - | The header X-Target-Insecure previously accepted any string and used it for comparisons. This meant unless it was empty (or not provided) it would always evaluate as True. This change makes the validation stricter, only accepting "True" and "False" and converting these to boolean values. Any other value will return an error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/notes/yaml-json-parse-53217627a647dc1d.yaml0000644000175000017500000000024300000000000025714 0ustar00coreycorey00000000000000--- features: - | Add yaml_parse and json_parse expression functions. Each accepts a string and will parse as either json or yaml, and return an object. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1775687 mistral-10.0.0.0b3/releasenotes/source/0000755000175000017500000000000000000000000020104 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1775687 mistral-10.0.0.0b3/releasenotes/source/_static/0000755000175000017500000000000000000000000021532 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/_static/.placeholder0000644000175000017500000000000000000000000024003 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1775687 mistral-10.0.0.0b3/releasenotes/source/_templates/0000755000175000017500000000000000000000000022241 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/_templates/.placeholder0000644000175000017500000000000000000000000024512 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/conf.py0000644000175000017500000002152400000000000021407 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Mistral Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Mistral Release Notes' copyright = u'2015, Mistral Developers' # Release notes are version independent release = '' version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # Must set this variable to include year, month, day, hours, and minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'MistralReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'MistralReleaseNotes.tex', u'Mistral Release Notes Documentation', u'Mistral Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'mistralreleasenotes', u'Mistral Release Notes Documentation', [u'Mistral Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'MistralReleaseNotes', u'Mistral Release Notes Documentation', u'Mistral Developers', 'MistralReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] # -- Options for openstackdocstheme ------------------------------------------- repository_name = 'openstack/mistral' bug_project = 'mistral' bug_tag = '' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/index.rst0000644000175000017500000000141500000000000021746 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Mistral Release Notes ====================== .. toctree:: :maxdepth: 1 unreleased train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/liberty.rst0000644000175000017500000000022200000000000022304 0ustar00coreycorey00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/mitaka.rst0000644000175000017500000000023200000000000022101 0ustar00coreycorey00000000000000=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/newton.rst0000644000175000017500000000026600000000000022154 0ustar00coreycorey00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton :earliest-version: 3.0.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/ocata.rst0000644000175000017500000000023000000000000021720 0ustar00coreycorey00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/pike.rst0000644000175000017500000000021700000000000021566 0ustar00coreycorey00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/queens.rst0000644000175000017500000000022300000000000022133 0ustar00coreycorey00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/rocky.rst0000644000175000017500000000022100000000000021760 0ustar00coreycorey00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/stein.rst0000644000175000017500000000022100000000000021753 0ustar00coreycorey00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/train.rst0000644000175000017500000000017600000000000021757 0ustar00coreycorey00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/releasenotes/source/unreleased.rst0000644000175000017500000000016000000000000022762 0ustar00coreycorey00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/requirements.txt0000644000175000017500000000265600000000000017410 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. alembic>=0.9.6 # MIT Babel!=2.4.0,>=2.3.4 # BSD croniter>=0.3.4 # MIT License cachetools>=2.0.0 # MIT License dogpile.cache>=0.6.2 # BSD eventlet!=0.20.1,!=0.21.0,!=0.23.0,!=0.25.0,>=0.20.0 # MIT Jinja2>=2.10 # BSD License (3 clause) jsonschema>=2.6.0 # MIT keystonemiddleware>=4.18.0 # Apache-2.0 kombu!=4.0.2,>=4.6.1 # BSD mistral-lib>=1.4.0 # Apache-2.0 networkx<2.3,>=1.10;python_version<'3.0' # BSD networkx>=2.3;python_version>='3.4' # BSD oslo.concurrency>=3.26.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.20.0 # Apache-2.0 oslo.db>=4.40.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 oslo.utils>=3.37.0 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.serialization>=2.21.1 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 paramiko>=2.4.1 # LGPLv2.1+ pbr!=2.1.0,>=2.0.0 # Apache-2.0 pecan>=1.2.1 # BSD PyJWT>=1.5 # MIT PyYAML>=5.1 # MIT requests>=2.14.2 # Apache-2.0 tenacity>=5.0.1 # Apache-2.0 six>=1.10.0 # MIT SQLAlchemy>=1.2.5 # MIT stevedore>=1.20.0 # Apache-2.0 WSME>=0.8.0 # MIT yaql>=1.1.3 # Apache 2.0 License tooz>=1.58.0 # Apache-2.0 zake>=0.1.6 # Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/run_tests.sh0000755000175000017500000002133300000000000016502 0ustar00coreycorey00000000000000#!/bin/bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Mistral's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path

Location of the tools directory" echo " Default: \$(pwd)" echo " --db-type Database type" echo " Default: sqlite" echo " --parallel Determines whether the tests are run in one thread or not" echo " Default: false" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; --db-type) (( i++ )) db_type=${!i} ;; --parallel) (( i++ )) parallel=${!i} ;; -*) stestropts="$stestropts ${!i}";; *) stestrargs="$stestrargs ${!i}" esac (( i++ )) done } db_type=${db_type:-sqlite} parallel=${parallel:-false} tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= stestrargs= stestropts= wrapper="" just_pep8=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C CI_PROJECT=${CI_PROJECT:-""} process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function setup_db { case ${db_type} in sqlite ) rm -f tests.sqlite ;; "postgresql" | "mysql" ) dbname="openstack_citest" username="openstack_citest" password="openstack_citest" ;; esac } function setup_db_pylib { case ${db_type} in postgresql ) echo "Installing python library for PostgreSQL." ${wrapper} pip install psycopg2==2.8.3 ;; mysql ) echo "Installing python library for MySQL" ${wrapper} pip install PyMySQL ;; esac } function setup_db_cfg { case ${db_type} in sqlite ) rm -f .mistral.conf ;; "postgresql" ) oslo-config-generator --config-file \ ./tools/config/config-generator.mistral.conf \ --output-file .mistral.conf sed -i "s/#connection = /connection = $db_type:\/\/$username:$password@localhost\/$dbname/g" .mistral.conf ;; "mysql" ) oslo-config-generator --config-file \ ./tools/config/config-generator.mistral.conf \ --output-file .mistral.conf sed -i "s/#connection = /connection = mysql+pymysql:\/\/$username:$password@localhost\/$dbname/g" .mistral.conf ;; esac } function upgrade_db { case ${db_type} in "postgresql" | "mysql" ) mistral-db-manage --config-file .mistral.conf upgrade head ;; *) echo "Skip a database upgrade" ;; esac } function cleanup { rm -f .mistral.conf } function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$stestropts" = "" ] && [ "$stestrargs" = "" ]; then # Default to running all tests if specific test is not # provided. stestrargs="discover ./mistral/tests/unit" fi ${wrapper} python -m testtools.run $stestropts $stestrargs # Short circuit because all of the stestr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then STESTRTESTS="$STESTRTESTS --coverage" else STESTRTESTS="$STESTRTESTS --slowest" fi # Just run the test suites in current environment set +e stestrargs=$(echo "$stestrargs" | sed -e's/^\s*\(.*\)\s*$/\1/') if [ $parallel = true ] then runoptions="--subunit" else runoptions="--concurrency 1 --subunit" fi STESTRTESTS="$STESTRTESTS $runoptions $stestropts $stestrargs" OS_TEST_PATH=$(echo $stestrargs|grep -o 'mistral\.tests[^[:space:]:]*\+'|tr . /) if [ -d "$OS_TEST_PATH" ]; then wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" elif [ -d "$(dirname $OS_TEST_PATH)" ]; then wrapper="OS_TEST_PATH=$(dirname $OS_TEST_PATH) $wrapper" fi echo "Running ${wrapper} $STESTRTESTS" bash -c "${wrapper} $STESTRTESTS | ${wrapper} subunit2pyunit" RESULT=$? set -e copy_subunit_log cleanup if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='mistral/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=".stestr/$(($(cat .stestr/next-stream) - 1))" cp $LOGNAME subunit.log } function run_pep8 { echo "Running flake8 ..." ${wrapper} flake8 } STESTRTESTS="stestr run" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $recreate_db -eq 1 ]; then setup_db fi setup_db_pylib setup_db_cfg upgrade_db run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (stestropts), which begin with a '-', and # arguments (stestrargs). if [ -z "$stestrargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1815686 mistral-10.0.0.0b3/setup.cfg0000644000175000017500000001010500000000000015731 0ustar00coreycorey00000000000000[metadata] name = mistral summary = Mistral Project description-file = README.rst license = Apache License, Version 2.0 home-page = https://docs.openstack.org/mistral/latest/ classifiers = Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux author = OpenStack author-email = openstack-discuss@lists.openstack.org [files] packages = mistral [entry_points] console_scripts = mistral-server = mistral.cmd.launch:main mistral-db-manage = mistral.db.sqlalchemy.migration.cli:main wsgi_scripts = mistral-wsgi-api = mistral.api.app:init_wsgi mistral.rpc.backends = oslo_client = mistral.rpc.oslo.oslo_client:OsloRPCClient oslo_server = mistral.rpc.oslo.oslo_server:OsloRPCServer kombu_client = mistral.rpc.kombu.kombu_client:KombuRPCClient kombu_server = mistral.rpc.kombu.kombu_server:KombuRPCServer oslo.config.opts = mistral.config = mistral.config:list_opts oslo.config.opts.defaults = mistral.config = mistral.config:set_cors_middleware_defaults oslo.policy.policies = mistral = mistral.policies:list_rules oslo.policy.enforcer = mistral = mistral.api.access_control:get_enforcer mistral.actions = std.async_noop = mistral.actions.std_actions:AsyncNoOpAction std.noop = mistral.actions.std_actions:NoOpAction std.fail = mistral.actions.std_actions:FailAction std.echo = mistral.actions.std_actions:EchoAction std.http = mistral.actions.std_actions:HTTPAction std.mistral_http = mistral.actions.std_actions:MistralHTTPAction std.ssh = mistral.actions.std_actions:SSHAction std.ssh_proxied = mistral.actions.std_actions:SSHProxiedAction std.email = mistral.actions.std_actions:SendEmailAction std.javascript = mistral.actions.std_actions:JavaScriptAction std.js = mistral.actions.std_actions:JavaScriptAction std.sleep = mistral.actions.std_actions:SleepAction std.test_dict = mistral.actions.std_actions:TestDictAction mistral.executors = local = mistral.executors.default_executor:DefaultExecutor remote = mistral.executors.remote_executor:RemoteExecutor mistral.notifiers = local = mistral.notifiers.default_notifier:DefaultNotifier remote = mistral.notifiers.remote_notifier:RemoteNotifier mistral.notification.publishers = webhook = mistral.notifiers.publishers.webhook:WebhookPublisher noop = mistral.notifiers.publishers.noop:NoopPublisher mistral.expression.functions = json_pp = mistral.expressions.std_functions:json_pp_ env = mistral.expressions.std_functions:env_ execution = mistral.expressions.std_functions:execution_ executions = mistral.expressions.std_functions:executions_ global = mistral.expressions.std_functions:global_ json_parse = mistral.expressions.std_functions:json_parse_ json_dump = mistral.expressions.std_functions:json_dump_ task = mistral.expressions.std_functions:task_ tasks = mistral.expressions.std_functions:tasks_ uuid = mistral.expressions.std_functions:uuid_ yaml_parse = mistral.expressions.std_functions:yaml_parse_ yaml_dump = mistral.expressions.std_functions:yaml_dump_ mistral.expression.evaluators = yaql = mistral.expressions.yaql_expression:InlineYAQLEvaluator jinja = mistral.expressions.jinja_expression:InlineJinjaEvaluator mistral.auth = keystone = mistral.auth.keystone:KeystoneAuthHandler keycloak-oidc = mistral.auth.keycloak:KeycloakAuthHandler kombu_driver.executors = blocking = futurist:SynchronousExecutor threading = futurist:ThreadPoolExecutor eventlet = futurist:GreenThreadPoolExecutor pygments.lexers = mistral = mistral.ext.pygmentplugin:MistralLexer mistral.js.implementation = pyv8 = mistral.utils.javascript:PyV8Evaluator v8eval = mistral.utils.javascript:V8EvalEvaluator py_mini_racer = mistral.utils.javascript:PyMiniRacerEvaluator mistral.schedulers = legacy = mistral.services.legacy_scheduler:LegacyScheduler default = mistral.scheduler.default_scheduler:DefaultScheduler [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/setup.py0000644000175000017500000000200600000000000015623 0ustar00coreycorey00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/test-requirements.txt0000644000175000017500000000107000000000000020352 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=3.0,<3.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 doc8>=0.6.0 # Apache-2.0 Pygments>=2.2.0 # BSD license fixtures>=3.0.0 # Apache-2.0/BSD mock>=2.0.0 # BSD nose>=1.3.7 # LGPL oslotest>=3.2.0 # Apache-2.0 requests-mock>=1.2.0 # Apache-2.0 tempest>=17.1.0 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 testtools>=2.2.0 # MIT unittest2>=1.1.0 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1775687 mistral-10.0.0.0b3/tools/0000755000175000017500000000000000000000000015253 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1775687 mistral-10.0.0.0b3/tools/config/0000755000175000017500000000000000000000000016520 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/config/check_uptodate.sh0000755000175000017500000000132000000000000022035 0ustar00coreycorey00000000000000#!/usr/bin/env bash PROJECT_NAME=${PROJECT_NAME:-mistral} CFGFILE_NAME=${PROJECT_NAME}.conf.sample if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME} elif [ -e etc/${CFGFILE_NAME} ]; then CFGFILE=etc/${CFGFILE_NAME} else echo "${0##*/}: can not find config file" exit 1 fi TEMPDIR=$(mktemp -d /tmp/${PROJECT_NAME}.XXXXXX) trap "rm -rf $TEMPDIR" EXIT oslo-config-generator --config-file tools/config/config-generator.mistral.conf --output-file ${TEMPDIR}/${CFGFILE_NAME} if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE} then echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date." echo "${0##*/}: Please run tox -egenconfig." exit 1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/config/config-generator.mistral.conf0000644000175000017500000000047100000000000024274 0ustar00coreycorey00000000000000[DEFAULT] namespace = mistral.config namespace = oslo.db namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.middleware.http_proxy_to_wsgi namespace = keystonemiddleware.auth_token namespace = periodic.config namespace = oslo.log namespace = oslo.policy namespace = oslo.service.sslutils ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/config/policy-generator.mistral.conf0000644000175000017500000000003500000000000024322 0ustar00coreycorey00000000000000[DEFAULT] namespace = mistral././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1775687 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/0000755000175000017500000000000000000000000022734 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/README.rst0000644000175000017500000000162100000000000024423 0ustar00coreycorey00000000000000cookiecutter-mistral-custom =========================== A minimal [cookiecutter](https://github.com/audreyr/cookiecutter) template for Mistral custom actions, expressions Usage ----- This will run the cookiecutter and will install it if needed .. code-block:: bash $ run_cookiecutter.sh | Install the python project when finish editing ([sudo] pip install [folder]) | Run the script to update the actions in the database | NOTE: default configuration file is /etc/mistral/mistral.conf .. code-block:: bash $ update_actions.sh [/path/to/mistral/conf] Explanation ----------- The generated directory contains a minimal python project for mistral custom actions and expressions. It also has the following: * LICENSE An Apache 2 license if you choose another license then update the setup.cfg file * README A basic README file * Testing Tox to manage test environments using pytest and flake8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/cookiecutter.json0000644000175000017500000000043000000000000026324 0ustar00coreycorey00000000000000{ "author": "mistral mistraly", "email": "mistral@mistral.com", "project_name": "example-action", "pkg_name": "{{ cookiecutter.project_name.lower().replace(' ', '_').replace('-', '_') }}", "summary": "A mistral custom action example", "version": "0.1.0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/run_cookiecutter.sh0000755000175000017500000000031300000000000026654 0ustar00coreycorey00000000000000#!/bin/bash if [ ! -f "/usr/local/bin/cookiecutter" ] then echo "Installing cookiecutter" if [[ $EUID -ne 0 ]]; then SUDO=sudo fi $SUDO pip install cookiecutter fi cookiecutter . ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/update_actions.sh0000755000175000017500000000016100000000000026273 0ustar00coreycorey00000000000000#!/bin/bash config_file=${$1:-'/etc/mistral/conf'} mistral-db-manage --config-file $config_file populate_actions././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1815686 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/0000755000175000017500000000000000000000000031061 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/LICENSE0000644000175000017500000002613500000000000032075 0ustar00coreycorey00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/README.rst0000644000175000017500000000052500000000000032552 0ustar00coreycorey00000000000000{{ cookiecutter.project_name }} {{ cookiecutter.project_name|count * "=" }} {{ cookiecutter.summary }} Usage ----- Installation ------------ Requirements ^^^^^^^^^^^^ Compatibility ------------- Licence ------- Authors ------- `{{ cookiecutter.project_name }}` was written by `{{ cookiecutter.author }} <{{ cookiecutter.email }}>`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/requirements.txt0000644000175000017500000000024300000000000034344 0ustar00coreycorey00000000000000# The order of packages is significant, # because pip processes them in the order of appearance. # Changing the order has an impact on the overall integration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/setup.cfg0000644000175000017500000000221400000000000032701 0ustar00coreycorey00000000000000[metadata] name = {{ cookiecutter.project_name }} license = Apache 2.0 summary = {{ cookiecutter.summary }} description-file = README.rst long_description_content_type = text/x-rst author = {{ cookiecutter.author }} author-email = {{ cookiecutter.email }} version = {{cookiecutter.version}} classifier = Intended Audience :: Developers License :: OSI Approved :: Apache Software License Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Topic :: Utilities [options] packages = {{ cookiecutter.pkg_name }} install_requires = mistral_lib>=1.2.0 yaql>=1.1.3 [options.entry_points] mistral.actions = test.my_action = {{cookiecutter.pkg_name}}.actions:MyAction mistral.expression.functions = test.my_function = {{cookiecutter.pkg_name}}.expression_functions:my_function_ [wheel] universal = 1 [tool:pytest] filterwarnings = # Show any DeprecationWarnings once once::DeprecationWarning ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/setup.py0000644000175000017500000000121300000000000032570 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import setuptools setuptools.setup() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/tox.ini0000644000175000017500000000103700000000000032375 0ustar00coreycorey00000000000000[tox] envlist = py27, py35, py36, py37, pep8 [testenv] usedevelop = True sitepackages = False deps = pytest -r{toxinidir}/requirements.txt commands = pytest {posargs} [testenv:pep8] basepython = python3 deps = flake8 flake8-import-order flake8-blind-except flake8-builtins flake8-docstrings flake8-rst-docstrings flake8-logging-format commands = flake8 [flake8] exclude = .tox,.eggs show-source = true ignore = D100,D101,D102,D103,D104,D105,D107,G200,G201,W503,W504 enable-extensions=G././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000119 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/ 28 mtime=1586538868.1815686 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000755000175000017500000000000000000000000034501 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/__init__.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000644000175000017500000000000000000000000034471 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/actions.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000644000175000017500000000151300000000000034503 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mistral_lib import actions class MyAction(actions.Action): def __init__(self, param): # store the incoming params self.param = param def run(self, context): # return your results here return {'status': 0} ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/expression_functions.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000644000175000017500000000172100000000000034504 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from time import time from uuid import UUID from uuid import uuid5 def my_function_(context): """Generate a UUID using the execution ID and the clock.""" # fetch the current workflow execution ID found in the context execution_id = context['__execution']['id'] time_str = str(time()) execution_uuid = UUID(execution_id) return uuid5(execution_uuid, time_str) ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000125 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/tests/ 28 mtime=1586538868.1815686 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000755000175000017500000000000000000000000034501 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/tests/__init__.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000644000175000017500000000000000000000000034471 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/tests/test_action.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000644000175000017500000000146700000000000034513 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from {{cookiecutter.pkg_name}}.actions import MyAction class MyTestCase(unittest.TestCase): def test_action(self): action = MyAction(None) self.assertDictEqual(action.run(None), {'status': 0}) ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pkg_name}}/tests/test_expressions.py 22 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cookiecutter-mistral-custom/{{cookiecutter.project_name}}/{{cookiecutter.pk0000644000175000017500000000275100000000000034510 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import uuid from uuid import UUID from {{cookiecutter.pkg_name}}.expression_functions import my_function_ class MyTestCase(unittest.TestCase): def test_my_function(self): context = {'__execution': {'id': uuid.uuid4().hex}} result = my_function_(context) # should not throw an exception UUID(str(result), version=5) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/cover.sh0000755000175000017500000000456600000000000016743 0ustar00coreycorey00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ALLOWED_EXTRA_MISSING=4 show_diff () { head -1 $1 diff -U 0 $1 $2 | sed 1,2d } # Stash uncommitted changes, checkout previous commit and save coverage report uncommitted=$(git status --porcelain | grep -v "^??") [[ -n $uncommitted ]] && git stash > /dev/null git checkout HEAD^ baseline_report=$(mktemp -t mistral_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && stestr --coverage "$*" coverage report -m > $baseline_report baseline_missing=$(awk 'END { print $3 }' $baseline_report) previous_sha=$(git rev-parse HEAD); # Checkout back and unstash uncommitted changes (if any) git checkout - [[ -n $uncommitted ]] && git stash pop > /dev/null # Erase previously collected coverage data. coverage erase; # Generate and save coverage report current_report=$(mktemp -t mistral_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && stestr --coverage "$*" coverage report -m > $current_report current_missing=$(awk 'END { print $3 }' $current_report) # Show coverage details allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING)) echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" echo "Copmared against ${previous_sha}"; echo "Missing lines in previous commit : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ $allowed_missing -gt $current_missing ]; then if [ $baseline_missing -lt $current_missing ]; then show_diff $baseline_report $current_report echo "I believe you can cover all your code with 100% coverage!" else echo "Thank you! You are awesome! Keep writing unit tests! :)" fi exit_code=0 else show_diff $baseline_report $current_report echo "Please write more unit tests, we should keep our test coverage :( " exit_code=1 fi rm $baseline_report $current_report exit $exit_code ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1815686 mistral-10.0.0.0b3/tools/docker/0000755000175000017500000000000000000000000016522 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/DOCKER_README.rst0000644000175000017500000000026000000000000021236 0ustar00coreycorey00000000000000You can find the latest documentation of Mistral integration with Docker `here `_.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/Dockerfile0000644000175000017500000000404700000000000020521 0ustar00coreycorey00000000000000FROM krallin/ubuntu-tini:16.04 LABEL name="Mistral" \ description="Workflow Service for OpenStack" \ maintainers="Andras Kovi \ Vitalii Solodilov " RUN apt-get -qq update && \ apt-get install -y \ libffi-dev \ libpq-dev \ libssl-dev \ libxml2-dev \ libxslt1-dev \ libyaml-dev \ libmysqlclient-dev \ python \ python-dev \ crudini \ curl \ git \ gcc \ libuv1 \ libuv1-dev && \ curl -f -o /tmp/get-pip.py https://bootstrap.pypa.io/3.2/get-pip.py && \ python /tmp/get-pip.py && rm /tmp/get-pip.py && \ pip install --upgrade pip RUN pip install pymysql psycopg2 py_mini_racer ENV MISTRAL_DIR="/opt/stack/mistral" \ TMP_CONSTRAINTS="/tmp/upper-constraints.txt" \ CONFIG_FILE="/etc/mistral/mistral.conf" \ INI_SET="crudini --set /etc/mistral/mistral.conf" \ MESSAGE_BROKER_URL="rabbit://guest:guest@rabbitmq:5672/" \ DATABASE_URL="sqlite:///mistral.db" \ UPGRADE_DB="false" \ RUN_TESTS="false" \ DEBIAN_FRONTEND="noninteractive" \ MISTRAL_SERVER="all" \ LOG_DEBUG="false" \ AUTH_URL="http://keycloak:8080/auth" \ AUTH_ENABLE="false" \ AUTH_TYPE="keycloak-oidc" # We install dependencies separatly for a caching purpose COPY requirements.txt "${MISTRAL_DIR}/" RUN curl -o "${TMP_CONSTRAINTS}" \ http://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt && \ sed -i "/^mistral.*/d" "${TMP_CONSTRAINTS}" && \ pip install -r "${MISTRAL_DIR}/requirements.txt" ARG BUILD_TEST_DEPENDENCIES="false" COPY test-requirements.txt "${MISTRAL_DIR}/" RUN if ${BUILD_TEST_DEPENDENCIES} ; then \ pip install -r "${MISTRAL_DIR}/test-requirements.txt" ; \ fi COPY . ${MISTRAL_DIR} RUN pip install -e "${MISTRAL_DIR}" && \ mkdir /etc/mistral && \ rm -rf /var/lib/apt/lists/* && \ find ${MISTRAL_DIR} -name "*.sh" -exec chmod +x {} \; WORKDIR "${MISTRAL_DIR}" EXPOSE 8989 CMD "${MISTRAL_DIR}/tools/docker/start.sh" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1815686 mistral-10.0.0.0b3/tools/docker/docker-compose/0000755000175000017500000000000000000000000021434 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/docker-compose/auth.json0000644000175000017500000000046700000000000023277 0ustar00coreycorey00000000000000{ "_type": "openid-connect", "issuer": "http://keycloak:8080/auth/realms/master", "loginUrl": "http://keycloak:8080/auth/realms/master/protocol/openid-connect/auth", "logoutUrl": "http://keycloak:8080/auth/realms/master/protocol/openid-connect/logout", "requireHttps": false, "clientId": "mistral" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/docker-compose/infrastructure.yaml0000644000175000017500000000317000000000000025401 0ustar00coreycorey00000000000000version: '3' services: # postgresql: # image: postgres:10.1-alpine # restart: always # ports: # - "5432:5432" # volumes: # - postgresql:/var/lib/postgresql/data # networks: # - database # environment: # - POSTGRES_PASSWORD=mistral # - POSTGRES_USER=mistral # - POSTGRES_DB=mistral rabbitmq: image: rabbitmq:3.7.2-management-alpine restart: always ports: - "15672:15672" networks: - message-broker hostname: rabbitmq environment: - RABBITMQ_VM_MEMORY_HIGH_WATERMARK=0.81 - RABBITMQ_DEFAULT_USER=mistral - RABBITMQ_DEFAULT_PASS=mistral - RABBITMQ_DEFAULT_VHOST=mistral mysql: image: mysql:8.0.3 restart: always ports: - "3306:3306" volumes: - mysql:/var/lib/mysql networks: - database environment: - MYSQL_ROOT_PASSWORD=mistral - MYSQL_DATABASE=mistral - MYSQL_USER=mistral - MYSQL_PASSWORD=mistral keycloak: image: jboss/keycloak:4.1.0.Final ports: - "8080:8080" networks: - identity-provider environment: DB_VENDOR: h2 KEYCLOAK_USER: mistral KEYCLOAK_PASSWORD: mistral cloud-flow: image: mcdoker18/cloud-flow:0.5.0 restart: always networks: - cloud-flow ports: - "8000:8000" # TODO: make a Keycloak environment variables for CloudFlow # volumes: # - "./auth.json:/opt/CloudFlow/dist/assets/auth.json" environment: - CF_MISTRAL_URL=http://mistral:8989 volumes: postgresql: rabbitmq: mysql: networks: database: message-broker: identity-provider: cloud-flow:././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/docker-compose/mistral-multi-node.yaml0000644000175000017500000000412700000000000026052 0ustar00coreycorey00000000000000version: '3' services: mistral-api: build: context: ../../.. dockerfile: tools/docker/Dockerfile args: BUILD_TEST_DEPENDENCIES: "false" restart: always ports: - "8989:8989" networks: database: message-broker: identity-provider: cloud-flow: aliases: - mistral env_file: - mistral.env # volumes: # - "/path/to/mistral.conf:/etc/mistral/mistral.conf" environment: - MISTRAL_SERVER=api - UPGRADE_DB=true mistral-engine: build: context: ../../.. dockerfile: tools/docker/Dockerfile args: BUILD_TEST_DEPENDENCIES: "false" restart: always networks: - database - identity-provider - message-broker env_file: - mistral.env # volumes: # - "/path/to/mistral.conf:/etc/mistral/mistral.conf" environment: - MISTRAL_SERVER=engine mistral-executor: build: context: ../../.. dockerfile: tools/docker/Dockerfile args: BUILD_TEST_DEPENDENCIES: "false" restart: always networks: - message-broker - identity-provider env_file: - mistral.env # volumes: # - "/path/to/mistral.conf:/etc/mistral/mistral.conf" environment: - MISTRAL_SERVER=executor mistral-event-engine: build: context: ../../.. dockerfile: tools/docker/Dockerfile args: BUILD_TEST_DEPENDENCIES: "false" restart: always networks: - database - message-broker - identity-provider env_file: - mistral.env # volumes: # - "/path/to/mistral.conf:/etc/mistral/mistral.conf" environment: - MISTRAL_SERVER=event-engine mistral-notifier: build: context: ../../.. dockerfile: tools/docker/Dockerfile args: BUILD_TEST_DEPENDENCIES: "false" restart: always networks: - database - message-broker - identity-provider env_file: - mistral.env # volumes: # - "/path/to/mistral.conf:/etc/mistral/mistral.conf" environment: - MISTRAL_SERVER=notifier ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/docker-compose/mistral-single-node.yaml0000644000175000017500000000071600000000000026201 0ustar00coreycorey00000000000000version: '3' services: mistral: build: context: ../../.. dockerfile: "tools/docker/Dockerfile" args: BUILD_TEST_DEPENDENCIES: "false" restart: always ports: - "8989:8989" networks: - database - message-broker - identity-provider - cloud-flow env_file: - mistral.env # volumes: # - "/path/to/mistral.conf:/etc/mistral/mistral.conf" environment: - UPGRADE_DB=true././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/docker-compose/mistral.env0000644000175000017500000000033700000000000023624 0ustar00coreycorey00000000000000MESSAGE_BROKER_URL=rabbit://mistral:mistral@rabbitmq:5672/mistral #DATABASE_URL=postgresql+psycopg2://mistral:mistral@postgresql:5432/mistral DATABASE_URL=mysql+pymysql://mistral:mistral@mysql:3306/mistral AUTH_ENABLE=false././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/docker/start.sh0000755000175000017500000000245200000000000020221 0ustar00coreycorey00000000000000#!/bin/bash set -e # If a Mistral config doesn't exist we should create it and fill in with # parameters if [ ! -f ${CONFIG_FILE} ]; then oslo-config-generator \ --config-file "${MISTRAL_DIR}/tools/config/config-generator.mistral.conf" \ --output-file "${CONFIG_FILE}" ${INI_SET} DEFAULT js_implementation py_mini_racer ${INI_SET} oslo_policy policy_file "${MISTRAL_DIR}/etc/policy.json" ${INI_SET} DEFAULT auth_type ${AUTH_TYPE} ${INI_SET} pecan auth_enable ${AUTH_ENABLE} ${INI_SET} keycloak_oidc auth_url ${AUTH_URL} ${INI_SET} keycloak_oidc insecure true ${INI_SET} DEFAULT transport_url "${MESSAGE_BROKER_URL}" ${INI_SET} database connection "${DATABASE_URL}" ${INI_SET} DEFAULT debug "${LOG_DEBUG}" fi if [ ${DATABASE_URL} == "sqlite:///mistral.db" -a ! -f ./mistral.db ] then python ./tools/sync_db.py --config-file "${CONFIG_FILE}" mistral-db-manage --config-file "${CONFIG_FILE}" populate fi if "${UPGRADE_DB}"; then /usr/local/bin/mistral-db-manage --config-file "${CONFIG_FILE}" upgrade head mistral-db-manage --config-file "${CONFIG_FILE}" populate fi if "${RUN_TESTS}"; then cp "${CONFIG_FILE}" .mistral.conf "${MISTRAL_DIR}/run_tests.sh" -N else mistral-server --config-file "${CONFIG_FILE}" --server ${MISTRAL_SERVER} fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/generate_mistralclient_help.sh0000755000175000017500000000151300000000000023346 0ustar00coreycorey00000000000000if [ -z "$1" ]; then echo echo "Usage: $(basename $0) " echo exit fi cmd_list=$(mistral --help | sed -e '1,/Commands for API/d' | cut -d " " -f 3 | grep -vwE "(help|complete|bash-completion)") file=$1 > $file for cmd in $cmd_list do echo "Processing help for command $cmd..." echo "**$cmd**:" >> $file read -d '' helpstr << EOF $(mistral help $cmd | sed -e '/output formatters/,$d' | grep -vwE "(--help)") EOF usage=$(echo "$helpstr" | sed -e '/^$/,$d' | sed 's/^/ /') helpstr=$(echo "$helpstr" | sed -e '1,/^$/d') echo -e "::\n" >> $file echo "$usage" >> $file echo >> $file echo "$helpstr" >> $file echo >> $file done # Delete empty 'optional arguments:'. sed -i '/optional arguments:/ { N /^optional arguments:\n$/d }' $file # Delete extra empty lines. sed -i '/^$/ { N /^\n$/d }' $file ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/install_venv.py0000644000175000017500000000456300000000000020341 0ustar00coreycorey00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(venv, root): help = """ Mistral development environment setup is complete. Mistral development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Mistral virtualenv for the extent of your current shell session you can run: $ . %s/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ %s/tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help % (venv, root)) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if os.environ.get('tools_path'): root = os.environ['tools_path'] venv = os.path.join(root, '.venv') if os.environ.get('venv'): venv = os.environ['venv'] pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Mistral' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help(venv, root) if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/install_venv_common.py0000644000175000017500000001350700000000000021707 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/rank_profiled_methods.py0000644000175000017500000000556300000000000022200 0ustar00coreycorey00000000000000# Copyright 2019 - Nokia Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ """ import sys def _print_help(): print("\nUsage: \n") print( 'The script processes a Mistral profiler log file () that\n' 'contains statistics about each profiler trace: \n' '-------------------------------------------------------------\n' ' Total time | Max time | Avg time | Occurrences | Trace name \n' '-------------------------------------------------------------\n' ' ... ... ... ... ...\n' ) def main(): try: in_file_name = str(sys.argv[1]) out_file_name = str(sys.argv[2]) except: _print_help() return "Failed to parse arguments." print('Ranking profiled methods...') in_f = open(in_file_name, 'r') out_f = open(out_file_name, 'w') # {trace_name: [total_time, max_time, occurrences]} d = dict() with in_f: for line in in_f: tokens = line.split() # Skip all "-start" lines that don't contain a duration in seconds. # Processing only "-stop" lines. if len(tokens[1]) > 10: continue trace_name = tokens[5] trace_name = trace_name[0:len(trace_name) - 5] duration = float(tokens[1]) if trace_name not in d: d[trace_name] = [duration, duration, 1] else: l = d[trace_name] l[0] = l[0] + duration l[2] = l[2] + 1 if duration > l[1]: l[1] = duration result = sorted(d.items(), key=lambda x: x[1][0], reverse=True) out_f.write('Total time | Max time | Avg time | Occurrences | Trace name\n') out_f.write('-' * 90) out_f.write('-\n') for item in result: out_f.write( '{0:<12.3f} {1:<10.3f} {2:<10.3f} {3:<13d} {4}\n'.format( item[1][0], item[1][1], item[1][0] / item[1][2], item[1][2], item[0] ) ) out_f.close() print("Ranking file was successfully created: %s" % out_file_name) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/sync_db.py0000644000175000017500000000346500000000000017256 0ustar00coreycorey00000000000000# Copyright 2014 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import keystonemiddleware.opts as keystonemw_opts from oslo_config import cfg from oslo_log import log as logging from mistral import config from mistral.db.v2 import api as db_api from mistral.services import action_manager from mistral.services import workflows CONF = cfg.CONF LOG = logging.getLogger(__name__) def main(): # NOTE(jaosorior): This is needed in order for db-sync to also register the # keystonemiddleware options. Those options are used by clients that need a # keystone session in order to be able to register their actions. # This can be removed when mistral moves out of using keystonemiddleware in # favor of keystoneauth1. for group, opts in keystonemw_opts.list_auth_token_opts(): CONF.register_opts(opts, group=group) logging.register_options(CONF) config.parse_args() if len(CONF.config_file) == 0: print("Usage: sync_db --config-file ") return exit(1) logging.setup(CONF, 'Mistral') LOG.info("Starting db_sync") LOG.debug("Setting up db") db_api.setup_db() LOG.debug("populating db") action_manager.sync_db() workflows.sync_db() if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/sync_db.sh0000755000175000017500000000006500000000000017234 0ustar00coreycorey00000000000000#!/bin/sh tox -evenv -- python tools/sync_db.py "$@"././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/test-setup.sh0000755000175000017500000000350300000000000017730 0ustar00coreycorey00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/update_env_deps0000755000175000017500000000102500000000000020344 0ustar00coreycorey00000000000000TOX_ENVLIST=`grep envlist tox.ini | cut -d '=' -f 2 | tr ',' ' '` TESTENVS=`grep testenv tox.ini | awk -F ':' '{print $2}' | tr '[]' ' '` UNFILTERED_ENVLIST=`echo "$TOX_ENVLIST $TESTENVS"` ENVLIST=$( awk 'BEGIN{RS=ORS=" "}!a[$0]++' <<<$UNFILTERED_ENVLIST ); for env in $ENVLIST do ENV_PATH=.tox/$env PIP_PATH=$ENV_PATH/bin/pip echo -e "\nUpdate environment ${env}...\n" if [ ! -d $ENV_PATH -o ! -f $PIP_PATH ] then tox --notest -e$env else $PIP_PATH install -r requirements.txt -r test-requirements.txt fi done ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538868.1815686 mistral-10.0.0.0b3/tools/wf_generators/0000755000175000017500000000000000000000000020120 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/wf_generators/generate_parallel_wf.py0000644000175000017500000000242200000000000024634 0ustar00coreycorey00000000000000#!/usr/bin/env python import sys try: wf_name = str(sys.argv[1]) branch_cnt = int(sys.argv[2]) branch_depth = int(sys.argv[3]) add_join = len(sys.argv) > 4 except: raise ValueError( 'Usage: workflow_name' ' number_of_parallel_branches branch_depth add_join' ) f = open('%s.mist' % wf_name, 'w') # Writing a workflow header to the file. f.write('---\n') f.write("version: '2.0'\n\n") f.write("%s:\n" % wf_name) f.write(" tasks:\n") # 1. First starting task. f.write(" task_1:\n") f.write(" action: std.noop\n") f.write(" on-success:\n") for branch_num in range(1, branch_cnt + 1): f.write(" - task_%s_1\n" % branch_num) # 2. Branch tasks. for branch_num in range(1, branch_cnt + 1): for task_num in range(1, branch_depth + 1): f.write(" task_%s_%s:\n" % (branch_num, task_num)) f.write(" action: std.noop\n") if task_num < branch_depth: f.write(" on-success: task_%s_%s\n" % (branch_num, task_num + 1)) elif add_join: f.write(" on-success: task_join\n") # 3. The last "join" task, if needed. if add_join: f.write(" task_join:\n") f.write(" join: all") f.close() print("Workflow '%s' is created." % wf_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tools/with_venv.sh0000755000175000017500000000033200000000000017621 0ustar00coreycorey00000000000000#!/bin/bash tools_path=${tools_path:-$(dirname $0)} venv_path=${venv_path:-${tools_path}} venv_dir=${venv_name:-/../.venv} TOOLS=${tools_path} VENV=${venv:-${venv_path}/${venv_dir}} source ${VENV}/bin/activate && "$@" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538864.0 mistral-10.0.0.0b3/tox.ini0000644000175000017500000000766100000000000015440 0ustar00coreycorey00000000000000[tox] envlist = py37,pep8 minversion = 2.0 skipsdist = True ignore_basepython_conflict = True [testenv] basepython = python3 usedevelop = True install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} PYTHONDONTWRITEBYTECODE = 1 PYTHONWARNINGS=default::DeprecationWarning passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt # javascript engine py_mini_racer commands = rm -f .testrepository/times.dbm find . -type f -name "*.pyc" -delete stestr run --slowest {posargs} whitelist_externals = rm find [testenv:unit-postgresql] setenv = VIRTUAL_ENV={envdir} passenv = ZUUL_PROJECT commands = ./run_tests.sh -N --db-type postgresql [testenv:unit-mysql] setenv = VIRTUAL_ENV={envdir} passenv = ZUUL_PROJECT commands = ./run_tests.sh -N --db-type mysql [testenv:pep8] commands = doc8 doc/source flake8 {posargs} . {toxinidir}/tools/get_action_list.py {toxinidir}/tools/sync_db.py [testenv:cover] setenv = {[testenv]setenv} PYTHON=coverage run --source mistral --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:genconfig] commands = oslo-config-generator --config-file tools/config/config-generator.mistral.conf \ --output-file etc/mistral.conf.sample [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file tools/config/policy-generator.mistral.conf \ --output-file etc/policy.yaml.sample #set PYTHONHASHSEED=0 to prevent wsmeext.sphinxext from randomly failing. [testenv:venv] setenv = PYTHONHASHSEED=0 commands = {posargs} #set PYTHONHASHSEED=0 to prevent wsmeext.sphinxext from randomly failing. [testenv:docs] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt setenv = PYTHONHASHSEED=0 commands = rm -rf doc/build sphinx-build -E -W --keep-going -b html doc/source doc/build/html [testenv:pdf-docs] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt whitelist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:releasenotes] commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html [testenv:api-ref] # This environment is called from CI scripts to test and publish # the API Ref to docs.openstack.org. commands = rm -rf api-ref/build sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html whitelist_externals = rm #Skip PEP257 violation. [flake8] ignore = D100,D101,D102,D103,D104,D105,D200,D203,D202,D204,D205,D208,D400,D401,E402,W503,E731,W504 show-source = true builtins = _ # [H106] Don't put vim configuration in source files. # [H203] Use assertIs(Not)None to check for None. # [H904] Delay string interpolations at logging calls. enable-extensions = H106,H203,H904 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,scripts [doc8] extensions = .rst, .yaml, .mistral # Maximal line length should be 80. max-line-length = 80 [hacking] import_exceptions = mistral._i18n [flake8:local-plugins] extension = M001 = checks:CheckForLoggingIssues M319 = checks:no_assert_equal_true_false M320 = checks:no_assert_true_false_is_not M327 = checks:check_python3_xrange M328 = checks:check_python3_no_iteritems M329 = checks:check_python3_no_iterkeys M330 = checks:check_python3_no_itervalues O323 = checks:check_oslo_namespace_imports paths = ./mistral/hacking [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt